• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/dev/xen/blkback/

Lines Matching refs:reqlist

308 	 * Linked list links used to aggregate requests into a reqlist
356 struct xbb_xen_reqlist *reqlist;
541 struct xbb_xen_reqlist *reqlist, int operation,
882 * \param reqlist The request structure whose kva region will be accessed.
890 xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
892 return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9));
900 * \param reqlist The request structure whose bounce region will be accessed.
908 xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
910 return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9));
919 * \param reqlist The request structure whose I/O region will be accessed.
931 xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
934 return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector));
936 return (xbb_reqlist_vaddr(reqlist, pagenr, sector));
945 * \param reqlist The request list structure whose pseudo-physical region
958 xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector)
962 xbb = reqlist->xbb;
965 (uintptr_t)(reqlist->kva - xbb->kva) +
1087 xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist)
1095 for (i = 0; i < reqlist->nr_segments; i++) {
1097 if (reqlist->gnt_handles[i] == GRANT_REF_INVALID)
1100 unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0);
1102 unmap[invcount].handle = reqlist->gnt_handles[i];
1103 reqlist->gnt_handles[i] = GRANT_REF_INVALID;
1123 struct xbb_xen_reqlist *reqlist;
1125 reqlist = NULL;
1129 if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) {
1132 reqlist->flags = XBB_REQLIST_NONE;
1133 reqlist->kva = NULL;
1134 reqlist->status = BLKIF_RSP_OKAY;
1135 reqlist->residual_512b_sectors = 0;
1136 reqlist->num_children = 0;
1137 reqlist->nr_segments = 0;
1138 STAILQ_INIT(&reqlist->contig_req_list);
1141 return (reqlist);
1149 * \param wakeup If set, wakeup the work thread if freeing this reqlist
1153 xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
1164 if (reqlist->kva != NULL)
1165 xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments);
1167 xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children);
1169 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
1189 * \param reqlist Pointer to reqlist pointer.
1196 xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
1217 * Allocate a reqlist if the caller doesn't have one already.
1219 if (*reqlist == NULL) {
1232 if (*reqlist == NULL) {
1233 *reqlist = nreqlist;
1240 nreq->reqlist = *reqlist;
1254 STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links);
1255 (*reqlist)->num_children++;
1256 (*reqlist)->nr_segments += ring_req->nr_segments;
1391 * \param reqlist Allocated internal request list structure.
1394 xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1402 if (reqlist->flags & XBB_REQLIST_MAPPED)
1403 xbb_unmap_reqlist(reqlist);
1411 * reqlist right now. However, in order to make sure that no one
1416 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1420 xbb_queue_response(xbb, nreq, reqlist->status);
1423 if (reqlist->status == BLKIF_RSP_OKAY)
1432 reqlist->ds_tag_type,
1433 reqlist->ds_trans_type,
1443 sectors_sent -= reqlist->residual_512b_sectors;
1449 reqlist->ds_tag_type,
1450 reqlist->ds_trans_type,
1452 /*then*/&reqlist->ds_t0);
1454 xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1);
1478 struct xbb_xen_reqlist *reqlist;
1480 reqlist = bio->bio_caller1;
1481 xbb = reqlist->xbb;
1483 reqlist->residual_512b_sectors += bio->bio_resid >> 9;
1504 reqlist->status = BLKIF_RSP_ERROR;
1523 - (vm_offset_t)reqlist->bounce;
1524 memcpy((uint8_t *)reqlist->kva + kva_offset,
1533 if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1)
1534 xbb_complete_reqlist(xbb, reqlist);
1544 * \param reqlist Allocated internal request list structure.
1554 xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist)
1570 reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1580 reqlist->kva = NULL;
1581 if (reqlist->nr_segments != 0) {
1582 reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments);
1583 if (reqlist->kva == NULL) {
1591 binuptime(&reqlist->ds_t0);
1592 devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0);
1594 switch (reqlist->operation) {
1597 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1601 reqlist->ds_trans_type = DEVSTAT_WRITE;
1605 reqlist->status = BLKIF_RSP_ERROR;
1611 reqlist->ds_trans_type = DEVSTAT_READ;
1637 reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED;
1638 reqlist->ds_trans_type = DEVSTAT_NO_DATA;
1643 reqlist->operation);
1644 reqlist->status = BLKIF_RSP_ERROR;
1648 reqlist->xbb = xbb;
1653 STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) {
1672 reqlist->status = BLKIF_RSP_ERROR;
1695 reqlist->status = BLKIF_RSP_ERROR;
1700 map->host_addr = xbb_get_gntaddr(reqlist,
1733 reqlist->status = BLKIF_RSP_ERROR;
1739 xbb->maps, reqlist->nr_segments);
1743 reqlist->flags |= XBB_REQLIST_MAPPED;
1745 for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments;
1755 reqlist->status = BLKIF_RSP_ERROR;
1759 reqlist->gnt_handles[seg_idx] = map->handle;
1761 if (reqlist->starting_sector_number + total_sects >
1767 reqlist->starting_sector_number,
1768 reqlist->starting_sector_number + total_sects,
1770 reqlist->status = BLKIF_RSP_ERROR;
1777 reqlist,
1782 reqlist->status = BLKIF_RSP_ERROR;
1790 xbb_complete_reqlist(xbb, reqlist);
1832 struct xbb_xen_reqlist *reqlist;
1853 * Initialize reqlist to the last element in the pending
1857 reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq,
1859 if (reqlist != NULL) {
1860 cur_sector = reqlist->next_contig_sector;
1861 cur_operation = reqlist->operation;
1932 if ((reqlist != NULL)
1937 || ((ring_req->nr_segments + reqlist->nr_segments) >
1939 reqlist = NULL;
1948 retval = xbb_get_resources(xbb, &reqlist, ring_req,
1977 reqlist->next_contig_sector = cur_sector;
1982 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
1983 if (reqlist == NULL) {
1997 retval = xbb_dispatch_io(xbb, reqlist);
2007 reqlist, links);
2021 reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq);
2023 if (reqlist != NULL)
2063 * \param reqlist Allocated internal request list structure.
2071 xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2086 bio_offset = (off_t)reqlist->starting_sector_number
2106 bio->bio_caller1 = reqlist;
2109 reqlist->pendcnt = 1;
2121 nseg = reqlist->nr_segments;
2164 bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx,
2167 bio->bio_caller1 = reqlist;
2193 reqlist->pendcnt = nbio;
2201 - (vm_offset_t)reqlist->bounce;
2204 (uint8_t *)reqlist->kva + kva_offset,
2241 * \param reqlist Allocated internal request list.
2249 xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist,
2296 xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number
2302 nseg = reqlist->nr_segments;
2316 xiovec->iov_base = xbb_reqlist_ioaddr(reqlist,
2327 *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx,
2472 reqlist->status = BLKIF_RSP_ERROR;
2474 xbb_complete_reqlist(xbb, reqlist);
2841 struct xbb_xen_reqlist *reqlist;
2845 for (i = 0, reqlist = xbb->request_lists;
2846 i < xbb->max_requests; i++, reqlist++){
2848 if (reqlist->bounce != NULL) {
2849 free(reqlist->bounce, M_XENBLOCKBACK);
2850 reqlist->bounce = NULL;
2853 if (reqlist->gnt_handles != NULL) {
2854 free(reqlist->gnt_handles, M_XENBLOCKBACK);
2855 reqlist->gnt_handles = NULL;
3213 struct xbb_xen_reqlist *reqlist;
3233 reqlist = &xbb->request_lists[i];
3235 reqlist->xbb = xbb;
3238 reqlist->bounce = malloc(xbb->max_reqlist_size,
3240 if (reqlist->bounce == NULL) {
3248 reqlist->gnt_handles = malloc(xbb->max_reqlist_segments *
3249 sizeof(*reqlist->gnt_handles),
3251 if (reqlist->gnt_handles == NULL) {
3259 reqlist->gnt_handles[seg] = GRANT_REF_INVALID;
3261 STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links);
3344 * We limit the maximum number of reqlist segments to the maximum