• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/xen/blkback/

Lines Matching refs:xbb

138     printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
174 static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt,
176 static int xbb_shutdown(struct xbb_softc *xbb);
192 struct xbb_softc *xbb;
295 * request list free pool (xbb->reqlist_free_stailq) and pending
296 * requests waiting for execution (xbb->reqlist_pending_stailq).
499 * Only a single file based request is outstanding per-xbb instance,
540 typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb,
723 * (e.g. xbb->media_size >> xbb->sector_size_shift).
818 * \param xbb Per-instance xbb configuration structure.
824 xbb_get_req(struct xbb_softc *xbb)
830 mtx_assert(&xbb->lock, MA_OWNED);
832 if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) {
833 STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links);
834 xbb->active_request_count++;
843 * \param xbb Per-instance xbb configuration structure.
847 xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req)
849 mtx_assert(&xbb->lock, MA_OWNED);
851 STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links);
852 xbb->active_request_count--;
854 KASSERT(xbb->active_request_count >= 0,
861 * \param xbb Per-instance xbb configuration structure.
866 xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list,
869 mtx_assert(&xbb->lock, MA_OWNED);
871 STAILQ_CONCAT(&xbb->request_free_stailq, req_list);
872 xbb->active_request_count -= nreqs;
874 KASSERT(xbb->active_request_count >= 0,
960 struct xbb_softc *xbb;
962 xbb = reqlist->xbb;
964 return ((uintptr_t)(xbb->gnt_base_addr +
965 (uintptr_t)(reqlist->kva - xbb->kva) +
972 * \param xbb Per-instance xbb configuration structure.
975 * \param have_lock If set, xbb lock is already held.
985 xbb_get_kva(struct xbb_softc *xbb, int nr_pages)
997 mtx_lock(&xbb->lock);
1002 bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear);
1011 for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) {
1018 if (bit_test(xbb->kva_free, i)) {
1033 bit_nset(xbb->kva_free, first_clear,
1036 free_kva = xbb->kva +
1039 KASSERT(free_kva >= (uint8_t *)xbb->kva &&
1041 (uint8_t *)xbb->ring_config.va,
1044 nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva,
1045 (uintmax_t)xbb->ring_config.va));
1053 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1054 xbb->kva_shortages++;
1057 mtx_unlock(&xbb->lock);
1065 * \param xbb Per-instance xbb configuration structure.
1070 xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages)
1074 mtx_assert(&xbb->lock, MA_OWNED);
1076 start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT;
1077 bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1);
1115 * \param xbb Per-instance xbb configuration structure.
1121 xbb_get_reqlist(struct xbb_softc *xbb)
1127 mtx_assert(&xbb->lock, MA_OWNED);
1129 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1131 STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links);
1147 * \param xbb Per-instance xbb configuration structure.
1153 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1157 mtx_assert(&xbb->lock, MA_OWNED);
1160 wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE;
1161 xbb->flags &= ~XBBF_RESOURCE_SHORTAGE;
1165 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1167 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1169 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1171 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1178 xbb_shutdown(xbb);
1182 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1188 * \param xbb Per-instance xbb configuration structure.
1196 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1205 mtx_lock(&xbb->lock);
1211 if ((xbb->flags & XBBF_SHUTDOWN) != 0) {
1212 mtx_unlock(&xbb->lock);
1220 nreqlist = xbb_get_reqlist(xbb);
1226 nreq = xbb_get_req(xbb);
1230 mtx_unlock(&xbb->lock);
1236 STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
1245 if (xbb->abi != BLKIF_PROTOCOL_NATIVE) {
1253 devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0);
1267 xbb->flags |= XBBF_RESOURCE_SHORTAGE;
1268 xbb->request_shortages++;
1271 xbb_release_req(xbb, nreq);
1274 xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0);
1276 mtx_unlock(&xbb->lock);
1284 * \param xbb Per-instance xbb configuration structure.
1290 xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status)
1305 mtx_assert(&xbb->lock, MA_OWNED);
1312 switch (xbb->abi) {
1314 resp = RING_GET_RESPONSE(&xbb->rings.native,
1315 xbb->rings.native.rsp_prod_pvt);
1319 RING_GET_RESPONSE(&xbb->rings.x86_32,
1320 xbb->rings.x86_32.rsp_prod_pvt);
1324 RING_GET_RESPONSE(&xbb->rings.x86_64,
1325 xbb->rings.x86_64.rsp_prod_pvt);
1336 xbb->reqs_completed_with_error++;
1338 xbb->rings.common.rsp_prod_pvt++;
1340 xbb->reqs_queued_for_completion++;
1347 * \param xbb Per-instance xbb configuration structure.
1355 xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify)
1362 mtx_assert(&xbb->lock, MA_OWNED);
1366 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify);
1368 if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) {
1375 RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do);
1376 } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) {
1381 xbb->reqs_completed += xbb->reqs_queued_for_completion;
1382 xbb->reqs_queued_for_completion = 0;
1390 * \param xbb Per-instance xbb configuration structure.
1394 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1405 mtx_lock(&xbb->lock);
1420 xbb_queue_response(xbb, nreq, reqlist->status);
1430 devstat_end_transaction(xbb->xbb_stats_in,
1447 devstat_end_transaction(xbb->xbb_stats,
1454 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1456 xbb_push_responses(xbb, &run_taskqueue, &notify);
1458 mtx_unlock(&xbb->lock);
1461 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
1464 xen_intr_signal(xbb->xen_intr_handle);
1477 struct xbb_softc *xbb;
1481 xbb = reqlist->xbb;
1503 bio->bio_error, xbb->dev_name);
1507 && xenbus_get_state(xbb->dev) == XenbusStateConnected) {
1514 xenbus_set_state(xbb->dev, XenbusStateClosing);
1534 xbb_complete_reqlist(xbb, reqlist);
1543 * \param xbb Per-instance xbb configuration structure.
1554 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1582 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1592 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1602 if ((xbb->flags & XBBF_READ_ONLY) != 0) {
1604 xbb->dev_name);
1619 if (xbb->disable_flush != 0) {
1629 if (xbb->flush_interval != 0) {
1630 if (++(xbb->flush_count) < xbb->flush_interval) {
1633 xbb->flush_count = 0;
1648 reqlist->xbb = xbb;
1649 xbb_sg = xbb->xbb_sgs;
1650 map = xbb->maps;
1669 || __predict_false(nseg > xbb->max_request_segments)) {
1703 xbb->ring_config.gnt_addr,
1707 (uintmax_t)xbb->ring_config.gnt_addr));
1711 map->dom = xbb->otherend_id;
1723 nr_sects = (nr_sects << 9) >> xbb->sector_size_shift;
1727 ((xbb->sector_size >> 9) - 1)) != 0) {
1728 device_printf(xbb->dev, "%s: I/O size (%d) is not "
1732 xbb->sector_size);
1739 xbb->maps, reqlist->nr_segments);
1745 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1762 xbb->media_num_sectors) {
1769 xbb->dev_name);
1776 error = xbb->dispatch_io(xbb,
1790 xbb_complete_reqlist(xbb, reqlist);
1827 struct xbb_softc *xbb;
1835 xbb = (struct xbb_softc *)context;
1836 rings = &xbb->rings;
1857 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1892 switch (xbb->abi) {
1894 ring_req = RING_GET_REQUEST(&xbb->rings.native,
1902 &xbb->rings.x86_32, rings->common.req_cons);
1912 ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64,
1933 && ((xbb->no_coalesce_reqs != 0)
1934 || ((xbb->no_coalesce_reqs == 0)
1938 xbb->max_reqlist_segments))))) {
1948 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1949 xbb->rings.common.req_cons);
1972 xbb->rings.common.req_cons++;
1973 xbb->reqs_received++;
1982 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1995 STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links);
1997 retval = xbb_dispatch_io(xbb, reqlist);
2006 STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq,
2021 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2024 xbb->forced_dispatch++;
2026 xbb->normal_dispatch++;
2028 xbb->total_dispatch++;
2042 struct xbb_softc *xbb;
2045 xbb = (struct xbb_softc *)arg;
2046 taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task);
2051 SDT_PROVIDER_DEFINE(xbb);
2052 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int");
2053 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t",
2055 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int",
2062 * \param xbb Per-instance xbb configuration structure.
2071 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2085 dev_data = &xbb->backend.dev;
2087 << xbb->sector_size_shift;
2111 SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush,
2112 device_get_unit(xbb->dev));
2119 xbb_sg = xbb->xbb_sgs;
2131 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2135 __func__, xbb->otherend_id);
2147 if ((bio_offset & (xbb->sector_size - 1)) != 0){
2150 xbb->otherend_id);
2168 bio->bio_pblkno = bio_offset >> xbb->sector_size_shift;
2177 if ((bio->bio_length & (xbb->sector_size - 1)) != 0) {
2181 __func__, xbb->otherend_id);
2209 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read,
2210 device_get_unit(xbb->dev),
2214 SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write,
2215 device_get_unit(xbb->dev),
2231 SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int");
2232 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t",
2234 SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int",
2240 * \param xbb Per-instance xbb configuration structure.
2249 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2264 file_data = &xbb->backend.file;
2278 SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush,
2279 device_get_unit(xbb->dev));
2281 (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2283 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2284 error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread);
2285 VOP_UNLOCK(xbb->vn, 0);
2297 << xbb->sector_size_shift;
2301 xbb_sg = xbb->xbb_sgs;
2375 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read,
2376 device_get_unit(xbb->dev), xuio.uio_offset,
2379 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2401 error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2404 VOP_UNLOCK(xbb->vn, 0);
2409 SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write,
2410 device_get_unit(xbb->dev), xuio.uio_offset,
2413 (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT);
2415 vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY);
2435 error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ?
2437 VOP_UNLOCK(xbb->vn, 0);
2474 xbb_complete_reqlist(xbb, reqlist);
2484 * \param xbb Per-instance xbb configuration structure.
2487 xbb_close_backend(struct xbb_softc *xbb)
2490 DPRINTF("closing dev=%s\n", xbb->dev_name);
2491 if (xbb->vn) {
2494 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2497 switch (xbb->device_type) {
2499 if (xbb->backend.dev.csw) {
2500 dev_relthread(xbb->backend.dev.cdev,
2501 xbb->backend.dev.dev_ref);
2502 xbb->backend.dev.csw = NULL;
2503 xbb->backend.dev.cdev = NULL;
2514 (void)vn_close(xbb->vn, flags, NOCRED, curthread);
2515 xbb->vn = NULL;
2517 switch (xbb->device_type) {
2521 if (xbb->backend.file.cred != NULL) {
2522 crfree(xbb->backend.file.cred);
2523 xbb->backend.file.cred = NULL;
2538 * \param xbb Per-instance xbb configuration structure.
2543 xbb_open_dev(struct xbb_softc *xbb)
2550 xbb->device_type = XBB_TYPE_DISK;
2551 xbb->dispatch_io = xbb_dispatch_dev;
2552 xbb->backend.dev.cdev = xbb->vn->v_rdev;
2553 xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev,
2554 &xbb->backend.dev.dev_ref);
2555 if (xbb->backend.dev.csw == NULL)
2558 error = VOP_GETATTR(xbb->vn, &vattr, NOCRED);
2560 xenbus_dev_fatal(xbb->dev, error, "error getting "
2562 xbb->dev_name);
2567 dev = xbb->vn->v_rdev;
2570 xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for "
2571 "device %s!", xbb->dev_name);
2576 (caddr_t)&xbb->sector_size, FREAD,
2579 xenbus_dev_fatal(xbb->dev, error,
2581 "for device %s", xbb->dev_name);
2586 (caddr_t)&xbb->media_size, FREAD,
2589 xenbus_dev_fatal(xbb->dev, error,
2591 "for device %s", xbb->dev_name);
2601 * \param xbb Per-instance xbb configuration structure.
2606 xbb_open_file(struct xbb_softc *xbb)
2612 file_data = &xbb->backend.file;
2613 xbb->device_type = XBB_TYPE_FILE;
2614 xbb->dispatch_io = xbb_dispatch_file;
2615 error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred);
2617 xenbus_dev_fatal(xbb->dev, error,
2619 "for file %s", xbb->dev_name);
2628 if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) {
2629 vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY);
2630 if (xbb->vn->v_iflag & VI_DOOMED) {
2632 xenbus_dev_fatal(xbb->dev, error,
2634 xbb->dev_name);
2641 xbb->media_size = vattr.va_size;
2653 xbb->sector_size = vattr.va_blocksize;
2655 xbb->sector_size = 512;
2661 if (xbb->media_size < xbb->sector_size) {
2663 xenbus_dev_fatal(xbb->dev, error,
2665 xbb->dev_name,
2666 (uintmax_t)xbb->media_size,
2667 xbb->sector_size);
2675 * \param xbb Per-instance xbb configuration structure.
2680 xbb_open_backend(struct xbb_softc *xbb)
2689 DPRINTF("opening dev=%s\n", xbb->dev_name);
2692 xenbus_dev_fatal(xbb->dev, ENOENT,
2697 if ((xbb->flags & XBBF_READ_ONLY) == 0)
2703 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread);
2712 if (xbb->dev_name[0] != '/') {
2717 dev_name = malloc(strlen(xbb->dev_name)
2722 xbb->dev_name);
2723 free(xbb->dev_name, M_XENBLOCKBACK);
2724 xbb->dev_name = dev_name;
2728 xenbus_dev_fatal(xbb->dev, error, "error opening device %s",
2729 xbb->dev_name);
2735 xbb->vn = nd.ni_vp;
2738 if (vn_isdisk(xbb->vn, &error)) {
2739 error = xbb_open_dev(xbb);
2740 } else if (xbb->vn->v_type == VREG) {
2741 error = xbb_open_file(xbb);
2744 xenbus_dev_fatal(xbb->dev, error, "%s is not a disk "
2745 "or file", xbb->dev_name);
2747 VOP_UNLOCK(xbb->vn, 0);
2750 xbb_close_backend(xbb);
2754 xbb->sector_size_shift = fls(xbb->sector_size) - 1;
2755 xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift;
2758 (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file",
2759 xbb->dev_name, xbb->sector_size, xbb->media_size);
2768 * \param xbb Per-instance xbb configuration structure.
2771 xbb_free_communication_mem(struct xbb_softc *xbb)
2773 if (xbb->kva != 0) {
2774 if (xbb->pseudo_phys_res != NULL) {
2775 xenmem_free(xbb->dev, xbb->pseudo_phys_res_id,
2776 xbb->pseudo_phys_res);
2777 xbb->pseudo_phys_res = NULL;
2780 xbb->kva = 0;
2781 xbb->gnt_base_addr = 0;
2782 if (xbb->kva_free != NULL) {
2783 free(xbb->kva_free, M_XENBLOCKBACK);
2784 xbb->kva_free = NULL;
2791 * \param xbb Per-instance xbb configuration structure.
2794 xbb_disconnect(struct xbb_softc *xbb)
2803 if ((xbb->flags & XBBF_RING_CONNECTED) == 0)
2806 mtx_unlock(&xbb->lock);
2807 xen_intr_unbind(&xbb->xen_intr_handle);
2808 taskqueue_drain(xbb->io_taskqueue, &xbb->io_task);
2809 mtx_lock(&xbb->lock);
2815 if (xbb->active_request_count != 0)
2819 ring_idx < xbb->ring_config.ring_pages;
2822 op->host_addr = xbb->ring_config.gnt_addr
2824 op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx];
2825 op->handle = xbb->ring_config.handle[ring_idx];
2829 xbb->ring_config.ring_pages);
2833 xbb_free_communication_mem(xbb);
2835 if (xbb->requests != NULL) {
2836 free(xbb->requests, M_XENBLOCKBACK);
2837 xbb->requests = NULL;
2840 if (xbb->request_lists != NULL) {
2845 for (i = 0, reqlist = xbb->request_lists;
2846 i < xbb->max_requests; i++, reqlist++){
2858 free(xbb->request_lists, M_XENBLOCKBACK);
2859 xbb->request_lists = NULL;
2862 xbb->flags &= ~XBBF_RING_CONNECTED;
2871 * \param xbb Per-instance xbb configuration structure.
2874 xbb_connect_ring(struct xbb_softc *xbb)
2881 if ((xbb->flags & XBBF_RING_CONNECTED) != 0)
2888 xbb->ring_config.va = xbb->kva
2889 + (xbb->kva_size
2890 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2891 xbb->ring_config.gnt_addr = xbb->gnt_base_addr
2892 + (xbb->kva_size
2893 - (xbb->ring_config.ring_pages * PAGE_SIZE));
2896 ring_idx < xbb->ring_config.ring_pages;
2899 gnt->host_addr = xbb->ring_config.gnt_addr
2902 gnt->ref = xbb->ring_config.ring_ref[ring_idx];
2903 gnt->dom = xbb->otherend_id;
2907 xbb->ring_config.ring_pages);
2912 ring_idx < xbb->ring_config.ring_pages;
2918 xbb->ring_config.va = 0;
2919 xenbus_dev_fatal(xbb->dev, EACCES,
2924 for (i = 0, j = 0; i < xbb->ring_config.ring_pages;
2942 xbb->ring_config.handle[ring_idx] = gnt->handle;
2943 xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr;
2947 switch (xbb->abi) {
2951 sring = (blkif_sring_t *)xbb->ring_config.va;
2952 BACK_RING_INIT(&xbb->rings.native, sring,
2953 xbb->ring_config.ring_pages * PAGE_SIZE);
2959 sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va;
2960 BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32,
2961 xbb->ring_config.ring_pages * PAGE_SIZE);
2967 sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va;
2968 BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64,
2969 xbb->ring_config.ring_pages * PAGE_SIZE);
2976 xbb->flags |= XBBF_RING_CONNECTED;
2978 error = xen_intr_bind_remote_port(xbb->dev,
2979 xbb->otherend_id,
2980 xbb->ring_config.evtchn,
2983 /*arg*/xbb,
2985 &xbb->xen_intr_handle);
2987 (void)xbb_disconnect(xbb);
2988 xenbus_dev_fatal(xbb->dev, error, "binding event channel");
3002 * \param xbb Per-instance xbb configuration structure.
3008 xbb_alloc_communication_mem(struct xbb_softc *xbb)
3010 xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments;
3011 xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE;
3012 xbb->kva_size = xbb->reqlist_kva_size +
3013 (xbb->ring_config.ring_pages * PAGE_SIZE);
3015 xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT);
3016 if (xbb->kva_free == NULL)
3020 device_get_nameunit(xbb->dev), xbb->kva_size,
3021 xbb->reqlist_kva_size);
3028 xbb->pseudo_phys_res_id = 0;
3029 xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id,
3030 xbb->kva_size);
3031 if (xbb->pseudo_phys_res == NULL) {
3032 xbb->kva = 0;
3035 xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res);
3036 xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res);
3039 device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva,
3040 (uintmax_t)xbb->gnt_base_addr);
3047 * \param xbb Per-instance xbb configuration structure.
3050 xbb_collect_frontend_info(struct xbb_softc *xbb)
3059 otherend_path = xenbus_get_otherend_path(xbb->dev);
3064 xbb->ring_config.ring_pages = 1;
3065 xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
3066 xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE;
3073 &xbb->ring_config.evtchn);
3075 xenbus_dev_fatal(xbb->dev, error,
3078 xenbus_get_otherend_path(xbb->dev));
3096 xbb->max_requests = 32;
3101 xbb->ring_config.ring_pages = 1 << ring_page_order;
3102 ring_size = PAGE_SIZE * xbb->ring_config.ring_pages;
3103 xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size);
3105 if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) {
3106 xenbus_dev_fatal(xbb->dev, EINVAL,
3110 xbb->ring_config.ring_pages,
3115 if (xbb->ring_config.ring_pages == 1) {
3118 &xbb->ring_config.ring_ref[0],
3121 xenbus_dev_fatal(xbb->dev, error,
3125 xenbus_get_otherend_path(xbb->dev));
3130 for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages;
3138 &xbb->ring_config.ring_ref[ring_idx]);
3140 xenbus_dev_fatal(xbb->dev, error,
3160 xbb->abi = BLKIF_PROTOCOL_NATIVE;
3163 xbb->abi = BLKIF_PROTOCOL_X86_32;
3166 xbb->abi = BLKIF_PROTOCOL_X86_64;
3169 xenbus_dev_fatal(xbb->dev, EINVAL,
3181 * \param xbb Per-instance xbb configuration structure.
3184 xbb_alloc_requests(struct xbb_softc *xbb)
3192 xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests),
3194 if (xbb->requests == NULL) {
3195 xenbus_dev_fatal(xbb->dev, ENOMEM,
3200 req = xbb->requests;
3201 last_req = &xbb->requests[xbb->max_requests - 1];
3202 STAILQ_INIT(&xbb->request_free_stailq);
3204 STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links);
3211 xbb_alloc_request_lists(struct xbb_softc *xbb)
3220 xbb->request_lists = malloc(xbb->max_requests *
3221 sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO);
3222 if (xbb->request_lists == NULL) {
3223 xenbus_dev_fatal(xbb->dev, ENOMEM,
3228 STAILQ_INIT(&xbb->reqlist_free_stailq);
3229 STAILQ_INIT(&xbb->reqlist_pending_stailq);
3230 for (i = 0; i < xbb->max_requests; i++) {
3233 reqlist = &xbb->request_lists[i];
3235 reqlist->xbb = xbb;
3238 reqlist->bounce = malloc(xbb->max_reqlist_size,
3241 xenbus_dev_fatal(xbb->dev, ENOMEM,
3248 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3252 xenbus_dev_fatal(xbb->dev, ENOMEM,
3258 for (seg = 0; seg < xbb->max_reqlist_segments; seg++)
3261 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3270 * \param xbb Per-instance xbb configuration structure.
3273 xbb_publish_backend_info(struct xbb_softc *xbb)
3280 our_path = xenbus_get_node(xbb->dev);
3284 xenbus_dev_fatal(xbb->dev, error,
3292 "%"PRIu64, xbb->media_num_sectors);
3299 xbb->flags & XBBF_READ_ONLY
3306 xbb->sector_size);
3314 xenbus_dev_fatal(xbb->dev, error, "ending transaction");
3319 xenbus_dev_fatal(xbb->dev, error, "writing %s/%s",
3329 * \param xbb Per-instance xbb configuration structure.
3332 xbb_connect(struct xbb_softc *xbb)
3336 if (!xbb->hotplug_done ||
3337 (xenbus_get_state(xbb->dev) != XenbusStateInitWait) ||
3338 (xbb_collect_frontend_info(xbb) != 0))
3341 xbb->flags &= ~XBBF_SHUTDOWN;
3348 xbb->max_reqlist_segments = MIN(xbb->max_request_segments *
3349 xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST);
3355 xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE;
3358 error = xbb_alloc_communication_mem(xbb);
3360 xenbus_dev_fatal(xbb->dev, error,
3365 error = xbb_alloc_requests(xbb);
3371 error = xbb_alloc_request_lists(xbb);
3380 error = xbb_connect_ring(xbb);
3386 if (xbb_publish_backend_info(xbb) != 0) {
3392 (void)xbb_disconnect(xbb);
3397 xenbus_set_state(xbb->dev, XenbusStateConnected);
3404 * \param xbb Per-instance xbb configuration structure.
3412 xbb_shutdown(struct xbb_softc *xbb)
3426 if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0)
3429 xbb->flags |= XBBF_IN_SHUTDOWN;
3430 mtx_unlock(&xbb->lock);
3432 if (xbb->hotplug_watch.node != NULL) {
3433 xs_unregister_watch(&xbb->hotplug_watch);
3434 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3435 xbb->hotplug_watch.node = NULL;
3437 xbb->hotplug_done = false;
3439 if (xenbus_get_state(xbb->dev) < XenbusStateClosing)
3440 xenbus_set_state(xbb->dev, XenbusStateClosing);
3442 frontState = xenbus_get_otherend_state(xbb->dev);
3443 mtx_lock(&xbb->lock);
3444 xbb->flags &= ~XBBF_IN_SHUTDOWN;
3453 xbb->flags |= XBBF_SHUTDOWN;
3456 error = xbb_disconnect(xbb);
3472 wakeup(xbb);
3481 * \param xbb Per-instance xbb configuration structure.
3486 xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...)
3493 xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev),
3496 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3499 xenbus_dev_vfatal(xbb->dev, err, fmt, ap);
3502 xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3504 mtx_lock(&xbb->lock);
3505 xbb_shutdown(xbb);
3506 mtx_unlock(&xbb->lock);
3533 * \param xbb Xen Block Back softc.
3537 xbb_setup_sysctl(struct xbb_softc *xbb)
3542 sysctl_ctx = device_get_sysctl_ctx(xbb->dev);
3546 sysctl_tree = device_get_sysctl_tree(xbb->dev);
3551 "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0,
3555 "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0,
3559 "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0,
3563 "reqs_received", CTLFLAG_RW, &xbb->reqs_received,
3567 "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed,
3572 &xbb->reqs_queued_for_completion,
3577 &xbb->reqs_completed_with_error,
3581 "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch,
3585 "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch,
3589 "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch,
3593 "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages,
3598 &xbb->request_shortages,
3602 "max_requests", CTLFLAG_RD, &xbb->max_requests, 0,
3607 &xbb->max_request_segments, 0,
3612 &xbb->max_request_size, 0,
3617 &xbb->ring_config.ring_pages, 0,
3625 struct xbb_softc *xbb;
3629 xbb = device_get_softc(dev);
3632 NULL, &xbb->dev_name, NULL);
3641 error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev),
3642 "device-type", NULL, &xbb->dev_type,
3645 xbb->dev_type = NULL;
3648 "mode", NULL, &xbb->dev_mode,
3651 xbb_attach_failed(xbb, error, "reading backend fields at %s",
3657 if (strchr(xbb->dev_mode, 'w') == NULL)
3658 xbb->flags |= XBBF_READ_ONLY;
3664 error = xbb_open_backend(xbb);
3666 xbb_attach_failed(xbb, error, "Unable to open %s",
3667 xbb->dev_name);
3672 xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev),
3673 xbb->sector_size,
3679 xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev),
3680 xbb->sector_size,
3688 xbb_setup_sysctl(xbb);
3694 xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev),
3697 /*contxt*/&xbb->io_taskqueue);
3698 if (xbb->io_taskqueue == NULL) {
3699 xbb_attach_failed(xbb, error, "Unable to create taskqueue");
3703 taskqueue_start_threads(&xbb->io_taskqueue,
3710 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3713 xbb_attach_failed(xbb, error, "writing %s/hotplug-status",
3714 xenbus_get_node(xbb->dev));
3718 xbb->hotplug_done = true;
3721 if (xenbus_get_otherend_state(xbb->dev) == XenbusStateInitialised)
3722 xbb_connect(xbb);
3735 struct xbb_softc *xbb;
3747 xbb = device_get_softc(dev);
3748 xbb->dev = dev;
3749 xbb->otherend_id = xenbus_get_otherend_id(dev);
3750 TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb);
3751 mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF);
3757 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3760 xbb_attach_failed(xbb, error, "writing %s/feature-barrier",
3761 xenbus_get_node(xbb->dev));
3765 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3768 xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache",
3769 xenbus_get_node(xbb->dev));
3774 error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev),
3777 xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order",
3778 xenbus_get_node(xbb->dev));
3786 KASSERT(!xbb->hotplug_done, ("Hotplug scripts already executed"));
3787 watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path");
3788 xbb->hotplug_watch.callback_data = (uintptr_t)dev;
3789 xbb->hotplug_watch.callback = xbb_attach_disk;
3790 KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup"));
3791 xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK);
3797 xbb->hotplug_watch.max_pending = 1;
3799 error = xs_register_watch(&xbb->hotplug_watch);
3801 xbb_attach_failed(xbb, error, "failed to create watch on %s",
3802 xbb->hotplug_watch.node);
3803 free(xbb->hotplug_watch.node, M_XENBLOCKBACK);
3829 struct xbb_softc *xbb;
3833 xbb = device_get_softc(dev);
3834 mtx_lock(&xbb->lock);
3835 while (xbb_shutdown(xbb) == EAGAIN) {
3836 msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0,
3839 mtx_unlock(&xbb->lock);
3843 if (xbb->io_taskqueue != NULL)
3844 taskqueue_free(xbb->io_taskqueue);
3846 if (xbb->xbb_stats != NULL)
3847 devstat_remove_entry(xbb->xbb_stats);
3849 if (xbb->xbb_stats_in != NULL)
3850 devstat_remove_entry(xbb->xbb_stats_in);
3852 xbb_close_backend(xbb);
3854 if (xbb->dev_mode != NULL) {
3855 free(xbb->dev_mode, M_XENSTORE);
3856 xbb->dev_mode = NULL;
3859 if (xbb->dev_type != NULL) {
3860 free(xbb->dev_type, M_XENSTORE);
3861 xbb->dev_type = NULL;
3864 if (xbb->dev_name != NULL) {
3865 free(xbb->dev_name, M_XENSTORE);
3866 xbb->dev_name = NULL;
3869 mtx_destroy(&xbb->lock);
3920 struct xbb_softc *xbb = device_get_softc(dev);
3924 xenbus_strstate(xenbus_get_state(xbb->dev)));
3931 xbb_connect(xbb);
3935 mtx_lock(&xbb->lock);
3936 xbb_shutdown(xbb);
3937 mtx_unlock(&xbb->lock);
3939 xenbus_set_state(xbb->dev, XenbusStateClosed);
3942 xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend",