• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/xen/blkback/

Lines Matching defs:xbb

138     printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
175 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
177 static int xbb_shutdown(struct xbb_softc *xbb);
193 struct xbb_softc *xbb;
295 * request list free pool (xbb->reqlist_free_stailq) and pending
296 * requests waiting for execution (xbb->reqlist_pending_stailq).
499 * Only a single file based request is outstanding per-xbb instance,
540 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
722 * (e.g. xbb->media_size >> xbb->sector_size_shift).
817 * \param xbb Per-instance xbb configuration structure.
823 xbb_get_req(struct xbb_softc *xbb)
829 mtx_assert(&xbb->lock, MA_OWNED);
831 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
832 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
833 xbb->active_request_count++;
842 * \param xbb Per-instance xbb configuration structure.
846 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
848 mtx_assert(&xbb->lock, MA_OWNED);
850 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
851 xbb->active_request_count--;
853 KASSERT(xbb->active_request_count >= 0,
860 * \param xbb Per-instance xbb configuration structure.
865 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
868 mtx_assert(&xbb->lock, MA_OWNED);
870 STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
871 xbb->active_request_count -= nreqs;
873 KASSERT(xbb->active_request_count >= 0,
959 struct xbb_softc *xbb;
961 xbb = reqlist->xbb;
963 return ((uintptr_t)(xbb->gnt_base_addr +
964 (uintptr_t)(reqlist->kva - xbb->kva) +
971 * \param xbb Per-instance xbb configuration structure.
974 * \param have_lock If set, xbb lock is already held.
984 xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
996 mtx_lock(&xbb->lock);
1001 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
1010 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
1017 if (bit_test(xbb->kva_free, i)) {
1031 bit_nset(xbb->kva_free, first_clear,
1034 free_kva = xbb->kva +
1037 KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1039 (uint8_t *)xbb->ring_config.va,
1042 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1043 (uintmax_t)xbb->ring_config.va));
1051 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1052 xbb->kva_shortages++;
1055 mtx_unlock(&xbb->lock);
1063 * \param xbb Per-instance xbb configuration structure.
1068 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
1072 mtx_assert(&xbb->lock, MA_OWNED);
1074 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1075 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1112 * \param xbb Per-instance xbb configuration structure.
1118 xbb_get_reqlist(struct xbb_softc *xbb)
1124 mtx_assert(&xbb->lock, MA_OWNED);
1126 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1127 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1143 * \param xbb Per-instance xbb configuration structure.
1149 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1153 mtx_assert(&xbb->lock, MA_OWNED);
1156 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1157 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1161 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1163 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1165 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1167 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1174 xbb_shutdown(xbb);
1178 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1184 * \param xbb Per-instance xbb configuration structure.
1192 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1201 mtx_lock(&xbb->lock);
1207 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1208 mtx_unlock(&xbb->lock);
1216 nreqlist = xbb_get_reqlist(xbb);
1222 nreq = xbb_get_req(xbb);
1226 mtx_unlock(&xbb->lock);
1232 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1241 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1249 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1263 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1264 xbb->request_shortages++;
1267 xbb_release_req(xbb, nreq);
1270 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1272 mtx_unlock(&xbb->lock);
1280 * \param xbb Per-instance xbb configuration structure.
1286 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1301 mtx_assert(&xbb->lock, MA_OWNED);
1308 switch (xbb->abi) {
1310 resp = RING_GET_RESPONSE(&xbb->rings.native,
1311 xbb->rings.native.rsp_prod_pvt);
1315 RING_GET_RESPONSE(&xbb->rings.x86_32,
1316 xbb->rings.x86_32.rsp_prod_pvt);
1320 RING_GET_RESPONSE(&xbb->rings.x86_64,
1321 xbb->rings.x86_64.rsp_prod_pvt);
1332 xbb->reqs_completed_with_error++;
1334 xbb->rings.common.rsp_prod_pvt++;
1336 xbb->reqs_queued_for_completion++;
1343 * \param xbb Per-instance xbb configuration structure.
1351 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1358 mtx_assert(&xbb->lock, MA_OWNED);
1362 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1364 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1370 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1371 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1375 xbb->reqs_completed += xbb->reqs_queued_for_completion;
1376 xbb->reqs_queued_for_completion = 0;
1384 * \param xbb Per-instance xbb configuration structure.
1388 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1399 mtx_lock(&xbb->lock);
1414 xbb_queue_response(xbb, nreq, reqlist->status);
1424 devstat_end_transaction(xbb->xbb_stats_in,
1441 devstat_end_transaction(xbb->xbb_stats,
1448 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1450 xbb_push_responses(xbb, &run_taskqueue, &notify);
1452 mtx_unlock(&xbb->lock);
1455 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1458 xen_intr_signal(xbb->xen_intr_handle);
1471 struct xbb_softc *xbb;
1475 xbb = reqlist->xbb;
1497 bio->bio_error, xbb->dev_name);
1501 && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1507 xenbus_set_state(xbb->dev, XenbusStateClosing);
1527 xbb_complete_reqlist(xbb, reqlist);
1536 * \param xbb Per-instance xbb configuration structure.
1547 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1575 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1585 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1595 if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1597 xbb->dev_name);
1612 if (xbb->disable_flush != 0) {
1622 if (xbb->flush_interval != 0) {
1623 if (++(xbb->flush_count) < xbb->flush_interval) {
1626 xbb->flush_count = 0;
1641 reqlist->xbb = xbb;
1642 xbb_sg = xbb->xbb_sgs;
1643 map = xbb->maps;
1662 || __predict_false(nseg > xbb->max_request_segments)) {
1696 xbb->ring_config.gnt_addr,
1700 (uintmax_t)xbb->ring_config.gnt_addr));
1704 map->dom = xbb->otherend_id;
1716 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1720 ((xbb->sector_size >> 9) - 1)) != 0) {
1721 device_printf(xbb->dev, "%s: I/O size (%d) is not "
1725 xbb->sector_size);
1732 xbb->maps, reqlist->nr_segments);
1738 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1754 xbb->media_num_sectors) {
1760 xbb->dev_name);
1767 error = xbb->dispatch_io(xbb,
1781 xbb_complete_reqlist(xbb, reqlist);
1818 struct xbb_softc *xbb;
1825 xbb = (struct xbb_softc *)context;
1826 rings = &xbb->rings;
1847 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1882 switch (xbb->abi) {
1884 ring_req = RING_GET_REQUEST(&xbb->rings.native,
1892 &xbb->rings.x86_32, rings->common.req_cons);
1902 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1923 && ((xbb->no_coalesce_reqs != 0)
1924 || ((xbb->no_coalesce_reqs == 0)
1928 xbb->max_reqlist_segments))))) {
1938 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1939 xbb->rings.common.req_cons);
1962 xbb->rings.common.req_cons++;
1963 xbb->reqs_received++;
1972 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1985 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1987 retval = xbb_dispatch_io(xbb, reqlist);
1996 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
2011 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2014 xbb->forced_dispatch++;
2016 xbb->normal_dispatch++;
2018 xbb->total_dispatch++;
2032 struct xbb_softc *xbb;
2035 xbb = (struct xbb_softc *)arg;
2036 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
2041 SDT_PROVIDER_DEFINE(xbb);
2042 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
2043 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
2045 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
2052 * \param xbb Per-instance xbb configuration structure.
2061 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2075 dev_data = &xbb->backend.dev;
2077 << xbb->sector_size_shift;
2101 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2102 device_get_unit(xbb->dev));
2109 xbb_sg = xbb->xbb_sgs;
2120 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2124 __func__, xbb->otherend_id);
2136 if ((bio_offset & (xbb->sector_size - 1)) != 0){
2139 xbb->otherend_id);
2157 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift;
2165 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2169 __func__, xbb->otherend_id);
2197 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2198 device_get_unit(xbb->dev),
2202 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2203 device_get_unit(xbb->dev),
2219 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2220 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2222 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2228 * \param xbb Per-instance xbb configuration structure.
2237 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2252 file_data = &xbb->backend.file;
2266 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2267 device_get_unit(xbb->dev));
2269 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2271 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2272 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2273 VOP_UNLOCK(xbb->vn);
2285 << xbb->sector_size_shift;
2289 xbb_sg = xbb->xbb_sgs;
2361 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2362 device_get_unit(xbb->dev), xuio.uio_offset,
2365 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2387 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2390 VOP_UNLOCK(xbb->vn);
2395 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2396 device_get_unit(xbb->dev), xuio.uio_offset,
2399 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2401 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2421 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2423 VOP_UNLOCK(xbb->vn);
2458 xbb_complete_reqlist(xbb, reqlist);
2468 * \param xbb Per-instance xbb configuration structure.
2471 xbb_close_backend(struct xbb_softc *xbb)
2474 DPRINTF("closing dev=%s\n", xbb->dev_name);
2475 if (xbb->vn) {
2478 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2481 switch (xbb->device_type) {
2483 if (xbb->backend.dev.csw) {
2484 dev_relthread(xbb->backend.dev.cdev,
2485 xbb->backend.dev.dev_ref);
2486 xbb->backend.dev.csw = NULL;
2487 xbb->backend.dev.cdev = NULL;
2498 (void)vn_close(xbb->vn, flags, NOCRED, curthread);
2499 xbb->vn = NULL;
2501 switch (xbb->device_type) {
2505 if (xbb->backend.file.cred != NULL) {
2506 crfree(xbb->backend.file.cred);
2507 xbb->backend.file.cred = NULL;
2522 * \param xbb Per-instance xbb configuration structure.
2527 xbb_open_dev(struct xbb_softc *xbb)
2534 xbb->device_type = XBB_TYPE_DISK;
2535 xbb->dispatch_io = xbb_dispatch_dev;
2536 xbb->backend.dev.cdev = xbb->vn->v_rdev;
2537 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2538 &xbb->backend.dev.dev_ref);
2539 if (xbb->backend.dev.csw == NULL)
2542 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2544 xenbus_dev_fatal(xbb->dev, error, "error getting "
2546 xbb->dev_name);
2550 dev = xbb->vn->v_rdev;
2553 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2554 "device %s!", xbb->dev_name);
2559 (caddr_t)&xbb->sector_size, FREAD,
2562 xenbus_dev_fatal(xbb->dev, error,
2564 "for device %s", xbb->dev_name);
2569 (caddr_t)&xbb->media_size, FREAD,
2572 xenbus_dev_fatal(xbb->dev, error,
2574 "for device %s", xbb->dev_name);
2584 * \param xbb Per-instance xbb configuration structure.
2589 xbb_open_file(struct xbb_softc *xbb)
2595 file_data = &xbb->backend.file;
2596 xbb->device_type = XBB_TYPE_FILE;
2597 xbb->dispatch_io = xbb_dispatch_file;
2598 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2600 xenbus_dev_fatal(xbb->dev, error,
2602 "for file %s", xbb->dev_name);
2611 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2612 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2613 if (VN_IS_DOOMED(xbb->vn)) {
2615 xenbus_dev_fatal(xbb->dev, error,
2617 xbb->dev_name);
2624 xbb->media_size = vattr.va_size;
2636 xbb->sector_size = vattr.va_blocksize;
2638 xbb->sector_size = 512;
2644 if (xbb->media_size < xbb->sector_size) {
2646 xenbus_dev_fatal(xbb->dev, error,
2648 xbb->dev_name,
2649 (uintmax_t)xbb->media_size,
2650 xbb->sector_size);
2658 * \param xbb Per-instance xbb configuration structure.
2663 xbb_open_backend(struct xbb_softc *xbb)
2672 DPRINTF("opening dev=%s\n", xbb->dev_name);
2675 xenbus_dev_fatal(xbb->dev, ENOENT,
2680 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2686 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
2695 if (xbb->dev_name[0] != '/') {
2700 dev_name = malloc(strlen(xbb->dev_name)
2705 xbb->dev_name);
2706 free(xbb->dev_name, M_XENBLOCKBACK);
2707 xbb->dev_name = dev_name;
2711 xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2712 xbb->dev_name);
2718 xbb->vn = nd.ni_vp;
2721 if (vn_isdisk_error(xbb->vn, &error)) {
2722 error = xbb_open_dev(xbb);
2723 } else if (xbb->vn->v_type == VREG) {
2724 error = xbb_open_file(xbb);
2727 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2728 "or file", xbb->dev_name);
2730 VOP_UNLOCK(xbb->vn);
2733 xbb_close_backend(xbb);
2737 xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2738 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2741 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2742 xbb->dev_name, xbb->sector_size, xbb->media_size);
2751 * \param xbb Per-instance xbb configuration structure.
2754 xbb_free_communication_mem(struct xbb_softc *xbb)
2756 if (xbb->kva != 0) {
2757 if (xbb->pseudo_phys_res != NULL) {
2758 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2759 xbb->pseudo_phys_res);
2760 xbb->pseudo_phys_res = NULL;
2763 xbb->kva = 0;
2764 xbb->gnt_base_addr = 0;
2765 if (xbb->kva_free != NULL) {
2766 free(xbb->kva_free, M_XENBLOCKBACK);
2767 xbb->kva_free = NULL;
2774 * \param xbb Per-instance xbb configuration structure.
2777 xbb_disconnect(struct xbb_softc *xbb)
2786 if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
2789 mtx_unlock(&xbb->lock);
2790 xen_intr_unbind(&xbb->xen_intr_handle);
2791 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
2792 mtx_lock(&xbb->lock);
2798 if (xbb->active_request_count != 0)
2802 ring_idx < xbb->ring_config.ring_pages;
2804 op->host_addr = xbb->ring_config.gnt_addr
2806 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2807 op->handle = xbb->ring_config.handle[ring_idx];
2811 xbb->ring_config.ring_pages);
2815 xbb_free_communication_mem(xbb);
2817 if (xbb->requests != NULL) {
2818 free(xbb->requests, M_XENBLOCKBACK);
2819 xbb->requests = NULL;
2822 if (xbb->request_lists != NULL) {
2827 for (i = 0, reqlist = xbb->request_lists;
2828 i < xbb->max_requests; i++, reqlist++){
2840 free(xbb->request_lists, M_XENBLOCKBACK);
2841 xbb->request_lists = NULL;
2844 xbb->flags &= ~XBBF_RING_CONNECTED;
2853 * \param xbb Per-instance xbb configuration structure.
2856 xbb_connect_ring(struct xbb_softc *xbb)
2863 if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2870 xbb->ring_config.va = xbb->kva
2871 + (xbb->kva_size
2872 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2873 xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2874 + (xbb->kva_size
2875 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2878 ring_idx < xbb->ring_config.ring_pages;
2880 gnt->host_addr = xbb->ring_config.gnt_addr
2883 gnt->ref = xbb->ring_config.ring_ref[ring_idx];
2884 gnt->dom = xbb->otherend_id;
2888 xbb->ring_config.ring_pages);
2893 ring_idx < xbb->ring_config.ring_pages;
2899 xbb->ring_config.va = 0;
2900 xenbus_dev_fatal(xbb->dev, EACCES,
2905 for (i = 0, j = 0; i < xbb->ring_config.ring_pages;
2923 xbb->ring_config.handle[ring_idx] = gnt->handle;
2924 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2928 switch (xbb->abi) {
2932 sring = (blkif_sring_t *)xbb->ring_config.va;
2933 BACK_RING_INIT(&xbb->rings.native, sring,
2934 xbb->ring_config.ring_pages * PAGE_SIZE);
2940 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2941 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2942 xbb->ring_config.ring_pages * PAGE_SIZE);
2948 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2949 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2950 xbb->ring_config.ring_pages * PAGE_SIZE);
2957 xbb->flags |= XBBF_RING_CONNECTED;
2959 error = xen_intr_bind_remote_port(xbb->dev,
2960 xbb->otherend_id,
2961 xbb->ring_config.evtchn,
2964 /*arg*/xbb,
2966 &xbb->xen_intr_handle);
2968 (void)xbb_disconnect(xbb);
2969 xenbus_dev_fatal(xbb->dev, error, "binding event channel");
2983 * \param xbb Per-instance xbb configuration structure.
2989 xbb_alloc_communication_mem(struct xbb_softc *xbb)
2991 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
2992 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
2993 xbb->kva_size = xbb->reqlist_kva_size +
2994 (xbb->ring_config.ring_pages * PAGE_SIZE);
2996 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
2997 if (xbb->kva_free == NULL)
3001 device_get_nameunit(xbb->dev), xbb->kva_size,
3002 xbb->reqlist_kva_size);
3009 xbb->pseudo_phys_res_id = 0;
3010 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
3011 xbb->kva_size);
3012 if (xbb->pseudo_phys_res == NULL) {
3013 xbb->kva = 0;
3016 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
3017 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
3020 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
3021 (uintmax_t)xbb->gnt_base_addr);
3028 * \param xbb Per-instance xbb configuration structure.
3031 xbb_collect_frontend_info(struct xbb_softc *xbb)
3040 otherend_path = xenbus_get_otherend_path(xbb->dev);
3045 xbb->ring_config.ring_pages = 1;
3046 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
3047 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
3054 &xbb->ring_config.evtchn);
3056 xenbus_dev_fatal(xbb->dev, error,
3059 xenbus_get_otherend_path(xbb->dev));
3077 xbb->max_requests = 32;
3082 xbb->ring_config.ring_pages = 1 << ring_page_order;
3083 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
3084 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
3086 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
3087 xenbus_dev_fatal(xbb->dev, EINVAL,
3091 xbb->ring_config.ring_pages,
3096 if (xbb->ring_config.ring_pages == 1) {
3099 &xbb->ring_config.ring_ref[0],
3102 xenbus_dev_fatal(xbb->dev, error,
3106 xenbus_get_otherend_path(xbb->dev));
3111 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
3119 &xbb->ring_config.ring_ref[ring_idx]);
3121 xenbus_dev_fatal(xbb->dev, error,
3141 xbb->abi = BLKIF_PROTOCOL_NATIVE;
3143 xbb->abi = BLKIF_PROTOCOL_X86_32;
3145 xbb->abi = BLKIF_PROTOCOL_X86_64;
3147 xenbus_dev_fatal(xbb->dev, EINVAL,
3159 * \param xbb Per-instance xbb configuration structure.
3162 xbb_alloc_requests(struct xbb_softc *xbb)
3170 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3172 if (xbb->requests == NULL) {
3173 xenbus_dev_fatal(xbb->dev, ENOMEM,
3178 req = xbb->requests;
3179 last_req = &xbb->requests[xbb->max_requests - 1];
3180 STAILQ_INIT(&xbb->request_free_stailq);
3182 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3189 xbb_alloc_request_lists(struct xbb_softc *xbb)
3198 xbb->request_lists = malloc(xbb->max_requests *
3199 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3200 if (xbb->request_lists == NULL) {
3201 xenbus_dev_fatal(xbb->dev, ENOMEM,
3206 STAILQ_INIT(&xbb->reqlist_free_stailq);
3207 STAILQ_INIT(&xbb->reqlist_pending_stailq);
3208 for (i = 0; i < xbb->max_requests; i++) {
3211 reqlist = &xbb->request_lists[i];
3213 reqlist->xbb = xbb;
3216 reqlist->bounce = malloc(xbb->max_reqlist_size,
3219 xenbus_dev_fatal(xbb->dev, ENOMEM,
3226 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3230 xenbus_dev_fatal(xbb->dev, ENOMEM,
3236 for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3239 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3248 * \param xbb Per-instance xbb configuration structure.
3251 xbb_publish_backend_info(struct xbb_softc *xbb)
3258 our_path = xenbus_get_node(xbb->dev);
3262 xenbus_dev_fatal(xbb->dev, error,
3270 "%"PRIu64, xbb->media_num_sectors);
3277 xbb->flags & XBBF_READ_ONLY
3284 xbb->sector_size);
3292 xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3297 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3307 * \param xbb Per-instance xbb configuration structure.
3310 xbb_connect(struct xbb_softc *xbb)
3314 if (!xbb->hotplug_done ||
3315 (xenbus_get_state(xbb->dev) != XenbusStateInitWait) ||
3316 (xbb_collect_frontend_info(xbb) != 0))
3319 xbb->flags &= ~XBBF_SHUTDOWN;
3326 xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3327 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3333 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3336 error = xbb_alloc_communication_mem(xbb);
3338 xenbus_dev_fatal(xbb->dev, error,
3343 error = xbb_alloc_requests(xbb);
3349 error = xbb_alloc_request_lists(xbb);
3358 error = xbb_connect_ring(xbb);
3364 if (xbb_publish_backend_info(xbb) != 0) {
3370 (void)xbb_disconnect(xbb);
3375 xenbus_set_state(xbb->dev, XenbusStateConnected);
3382 * \param xbb Per-instance xbb configuration structure.
3390 xbb_shutdown(struct xbb_softc *xbb)
3404 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3407 xbb->flags |= XBBF_IN_SHUTDOWN;
3408 mtx_unlock(&xbb->lock);
3410 if (xbb->hotplug_watch.node != NULL) {
3411 xs_unregister_watch(&xbb->hotplug_watch);
3412 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3413 xbb->hotplug_watch.node = NULL;
3415 xbb->hotplug_done = false;
3417 if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3418 xenbus_set_state(xbb->dev, XenbusStateClosing);
3420 frontState = xenbus_get_otherend_state(xbb->dev);
3421 mtx_lock(&xbb->lock);
3422 xbb->flags &= ~XBBF_IN_SHUTDOWN;
3431 xbb->flags |= XBBF_SHUTDOWN;
3434 error = xbb_disconnect(xbb);
3450 wakeup(xbb);
3459 * \param xbb Per-instance xbb configuration structure.
3464 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3471 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3474 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3477 xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3480 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3482 mtx_lock(&xbb->lock);
3483 xbb_shutdown(xbb);
3484 mtx_unlock(&xbb->lock);
3511 * \param xbb Xen Block Back softc.
3515 xbb_setup_sysctl(struct xbb_softc *xbb)
3520 sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3524 sysctl_tree = device_get_sysctl_tree(xbb->dev);
3529 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3533 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3537 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3541 "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3545 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3550 &xbb->reqs_queued_for_completion,
3555 &xbb->reqs_completed_with_error,
3559 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3563 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3567 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3571 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3576 &xbb->request_shortages,
3580 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3585 &xbb->max_request_segments, 0,
3590 &xbb->max_request_size, 0,
3595 &xbb->ring_config.ring_pages, 0,
3603 struct xbb_softc *xbb;
3607 xbb = device_get_softc(dev);
3610 NULL, &xbb->dev_name, NULL);
3619 error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
3620 "device-type", NULL, &xbb->dev_type,
3623 xbb->dev_type = NULL;
3626 "mode", NULL, &xbb->dev_mode,
3629 xbb_attach_failed(xbb, error, "reading backend fields at %s",
3635 if (strchr(xbb->dev_mode, 'w') == NULL)
3636 xbb->flags |= XBBF_READ_ONLY;
3642 error = xbb_open_backend(xbb);
3644 xbb_attach_failed(xbb, error, "Unable to open %s",
3645 xbb->dev_name);
3650 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3651 xbb->sector_size,
3657 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3658 xbb->sector_size,
3666 xbb_setup_sysctl(xbb);
3672 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3675 /*contxt*/&xbb->io_taskqueue);
3676 if (xbb->io_taskqueue == NULL) {
3677 xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3681 taskqueue_start_threads(&xbb->io_taskqueue,
3688 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3691 xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3692 xenbus_get_node(xbb->dev));
3696 xbb->hotplug_done = true;
3699 if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised)
3700 xbb_connect(xbb);
3713 struct xbb_softc *xbb;
3725 xbb = device_get_softc(dev);
3726 xbb->dev = dev;
3727 xbb->otherend_id = xenbus_get_otherend_id(dev);
3728 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3729 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3735 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3738 xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3739 xenbus_get_node(xbb->dev));
3743 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3746 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3747 xenbus_get_node(xbb->dev));
3752 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3755 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3756 xenbus_get_node(xbb->dev));
3764 KASSERT(!xbb->hotplug_done, ("Hotplug scripts already executed"));
3765 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
3766 xbb->hotplug_watch.callback_data = (uintptr_t)dev;
3767 xbb->hotplug_watch.callback = xbb_attach_disk;
3768 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
3769 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
3775 xbb->hotplug_watch.max_pending = 1;
3777 error = xs_register_watch(&xbb->hotplug_watch);
3779 xbb_attach_failed(xbb, error, "failed to create watch on %s",
3780 xbb->hotplug_watch.node);
3781 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3807 struct xbb_softc *xbb;
3811 xbb = device_get_softc(dev);
3812 mtx_lock(&xbb->lock);
3813 while (xbb_shutdown(xbb) == EAGAIN) {
3814 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3817 mtx_unlock(&xbb->lock);
3821 if (xbb->io_taskqueue != NULL)
3822 taskqueue_free(xbb->io_taskqueue);
3824 if (xbb->xbb_stats != NULL)
3825 devstat_remove_entry(xbb->xbb_stats);
3827 if (xbb->xbb_stats_in != NULL)
3828 devstat_remove_entry(xbb->xbb_stats_in);
3830 xbb_close_backend(xbb);
3832 if (xbb->dev_mode != NULL) {
3833 free(xbb->dev_mode, M_XENSTORE);
3834 xbb->dev_mode = NULL;
3837 if (xbb->dev_type != NULL) {
3838 free(xbb->dev_type, M_XENSTORE);
3839 xbb->dev_type = NULL;
3842 if (xbb->dev_name != NULL) {
3843 free(xbb->dev_name, M_XENSTORE);
3844 xbb->dev_name = NULL;
3847 mtx_destroy(&xbb->lock);
3898 struct xbb_softc *xbb = device_get_softc(dev);
3902 xenbus_strstate(xenbus_get_state(xbb->dev)));
3909 xbb_connect(xbb);
3913 mtx_lock(&xbb->lock);
3914 xbb_shutdown(xbb);
3915 mtx_unlock(&xbb->lock);
3917 xenbus_set_state(xbb->dev, XenbusStateClosed);
3920 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",