Lines Matching defs:reqlist

306 	 * Linked list links used to aggregate requests into a reqlist
354 struct xbb_xen_reqlist *reqlist;
539 struct xbb_xen_reqlist *reqlist, int operation,
877 * \param reqlist The request structure whose kva region will be accessed.
885 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
887 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
895 * \param reqlist The request structure whose bounce region will be accessed.
903 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
905 return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
914 * \param reqlist The request structure whose I/O region will be accessed.
926 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
929 return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
931 return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
940 * \param reqlist The request list structure whose pseudo-physical region
953 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
957 xbb = reqlist->xbb;
960 (uintptr_t)(reqlist->kva - xbb->kva) +
1082 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1090 for (i = 0; i < reqlist->nr_segments; i++) {
1092 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1095 unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0);
1097 unmap[invcount].handle = reqlist->gnt_handles[i];
1098 reqlist->gnt_handles[i] = GRANT_REF_INVALID;
1118 struct xbb_xen_reqlist *reqlist;
1120 reqlist = NULL;
1124 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1127 reqlist->flags = XBB_REQLIST_NONE;
1128 reqlist->kva = NULL;
1129 reqlist->status = BLKIF_RSP_OKAY;
1130 reqlist->residual_512b_sectors = 0;
1131 reqlist->num_children = 0;
1132 reqlist->nr_segments = 0;
1133 STAILQ_INIT(&reqlist->contig_req_list);
1136 return (reqlist);
1144 * \param wakeup If set, wakeup the work thread if freeing this reqlist
1148 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1159 if (reqlist->kva != NULL)
1160 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1162 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1164 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1184 * \param reqlist Pointer to reqlist pointer.
1191 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1212 * Allocate a reqlist if the caller doesn't have one already.
1214 if (*reqlist == NULL) {
1227 if (*reqlist == NULL) {
1228 *reqlist = nreqlist;
1235 nreq->reqlist = *reqlist;
1249 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1250 (*reqlist)->num_children++;
1251 (*reqlist)->nr_segments += ring_req->nr_segments;
1386 * \param reqlist Allocated internal request list structure.
1389 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1397 if (reqlist->flags & XBB_REQLIST_MAPPED)
1398 xbb_unmap_reqlist(reqlist);
1406 * reqlist right now. However, in order to make sure that no one
1411 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1415 xbb_queue_response(xbb, nreq, reqlist->status);
1418 if (reqlist->status == BLKIF_RSP_OKAY)
1427 reqlist->ds_tag_type,
1428 reqlist->ds_trans_type,
1438 sectors_sent -= reqlist->residual_512b_sectors;
1444 reqlist->ds_tag_type,
1445 reqlist->ds_trans_type,
1447 /*then*/&reqlist->ds_t0);
1449 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1473 struct xbb_xen_reqlist *reqlist;
1475 reqlist = bio->bio_caller1;
1476 xbb = reqlist->xbb;
1478 reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1499 reqlist->status = BLKIF_RSP_ERROR;
1518 - (vm_offset_t)reqlist->bounce;
1519 memcpy((uint8_t *)reqlist->kva + kva_offset,
1528 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1529 xbb_complete_reqlist(xbb, reqlist);
1539 * \param reqlist Allocated internal request list structure.
1549 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1565 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1575 reqlist->kva = NULL;
1576 if (reqlist->nr_segments != 0) {
1577 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1578 if (reqlist->kva == NULL) {
1586 binuptime(&reqlist->ds_t0);
1587 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1589 switch (reqlist->operation) {
1592 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1596 reqlist->ds_trans_type = DEVSTAT_WRITE;
1600 reqlist->status = BLKIF_RSP_ERROR;
1606 reqlist->ds_trans_type = DEVSTAT_READ;
1632 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1633 reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1638 reqlist->operation);
1639 reqlist->status = BLKIF_RSP_ERROR;
1643 reqlist->xbb = xbb;
1648 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1667 reqlist->status = BLKIF_RSP_ERROR;
1690 reqlist->status = BLKIF_RSP_ERROR;
1695 map->host_addr = xbb_get_gntaddr(reqlist,
1728 reqlist->status = BLKIF_RSP_ERROR;
1734 xbb->maps, reqlist->nr_segments);
1738 reqlist->flags |= XBB_REQLIST_MAPPED;
1740 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1750 reqlist->status = BLKIF_RSP_ERROR;
1754 reqlist->gnt_handles[seg_idx] = map->handle;
1756 if (reqlist->starting_sector_number + total_sects >
1762 reqlist->starting_sector_number,
1763 reqlist->starting_sector_number + total_sects,
1765 reqlist->status = BLKIF_RSP_ERROR;
1772 reqlist,
1777 reqlist->status = BLKIF_RSP_ERROR;
1785 xbb_complete_reqlist(xbb, reqlist);
1827 struct xbb_xen_reqlist *reqlist;
1848 * Initialize reqlist to the last element in the pending
1852 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1854 if (reqlist != NULL) {
1855 cur_sector = reqlist->next_contig_sector;
1856 cur_operation = reqlist->operation;
1927 if ((reqlist != NULL)
1932 || ((ring_req->nr_segments + reqlist->nr_segments) >
1934 reqlist = NULL;
1943 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1972 reqlist->next_contig_sector = cur_sector;
1977 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1978 if (reqlist == NULL) {
1992 retval = xbb_dispatch_io(xbb, reqlist);
2002 reqlist, links);
2016 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2018 if (reqlist != NULL)
2058 * \param reqlist Allocated internal request list structure.
2066 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2081 bio_offset = (off_t)reqlist->starting_sector_number
2101 bio->bio_caller1 = reqlist;
2104 reqlist->pendcnt = 1;
2116 nseg = reqlist->nr_segments;
2159 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx,
2162 bio->bio_caller1 = reqlist;
2188 reqlist->pendcnt = nbio;
2196 - (vm_offset_t)reqlist->bounce;
2199 (uint8_t *)reqlist->kva + kva_offset,
2236 * \param reqlist Allocated internal request list.
2244 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2291 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2297 nseg = reqlist->nr_segments;
2311 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2322 *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
2467 reqlist->status = BLKIF_RSP_ERROR;
2469 xbb_complete_reqlist(xbb, reqlist);
2837 struct xbb_xen_reqlist *reqlist;
2841 for (i = 0, reqlist = xbb->request_lists;
2842 i < xbb->max_requests; i++, reqlist++){
2844 if (reqlist->bounce != NULL) {
2845 free(reqlist->bounce, M_XENBLOCKBACK);
2846 reqlist->bounce = NULL;
2849 if (reqlist->gnt_handles != NULL) {
2850 free(reqlist->gnt_handles, M_XENBLOCKBACK);
2851 reqlist->gnt_handles = NULL;
3188 struct xbb_xen_reqlist *reqlist;
3208 reqlist = &xbb->request_lists[i];
3210 reqlist->xbb = xbb;
3213 reqlist->bounce = malloc(xbb->max_reqlist_size,
3215 if (reqlist->bounce == NULL) {
3223 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3224 sizeof(*reqlist->gnt_handles),
3226 if (reqlist->gnt_handles == NULL) {
3234 reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3236 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3320 * We limit the maximum number of reqlist segments to the maximum