Lines Matching defs:io_req

175 				      struct pass_io_req *io_req);
177 struct pass_io_req *io_req,
180 struct pass_io_req *io_req);
182 struct pass_io_req *io_req);
236 struct pass_io_req *io_req, *io_req2;
245 TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
246 TAILQ_REMOVE(&softc->done_queue, io_req, links);
247 passiocleanup(softc, io_req);
248 uma_zfree(softc->pass_zone, io_req);
256 TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) {
257 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
258 passiocleanup(softc, io_req);
259 uma_zfree(softc->pass_zone, io_req);
271 TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) {
272 TAILQ_REMOVE(&softc->active_queue, io_req, links);
273 io_req->flags |= PASS_IO_ABANDONED;
274 TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links);
776 struct pass_io_req *io_req, *io_req2;
781 TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
782 TAILQ_REMOVE(&softc->done_queue, io_req, links);
783 passiocleanup(softc, io_req);
784 uma_zfree(softc->pass_zone, io_req);
787 TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links,
789 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
790 passiocleanup(softc, io_req);
791 uma_zfree(softc->pass_zone, io_req);
800 io_req = TAILQ_FIRST(&softc->active_queue);
801 if ((io_req != NULL)
815 TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links,
817 TAILQ_REMOVE(&softc->active_queue, io_req, links);
818 io_req->flags |= PASS_IO_ABANDONED;
819 TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req,
853 struct pass_io_req *io_req;
859 io_req = TAILQ_FIRST(&softc->incoming_queue);
860 if (io_req == NULL) {
864 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
865 TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
869 xpt_merge_ccb(start_ccb, &io_req->ccb);
871 start_ccb->ccb_h.ccb_ioreq = io_req;
873 io_req->alloced_ccb = start_ccb;
874 binuptime(&io_req->start_time);
876 &io_req->start_time);
905 struct pass_io_req *io_req;
907 io_req = done_ccb->ccb_h.ccb_ioreq;
910 __func__, io_req->user_ccb_ptr);
914 && ((io_req->flags & PASS_IO_ABANDONED) == 0)) {
933 bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb));
947 &io_req->start_time);
957 &io_req->start_time);
974 &io_req->start_time);
979 &io_req->start_time);
988 if ((io_req->flags & PASS_IO_ABANDONED) == 0) {
989 TAILQ_REMOVE(&softc->active_queue, io_req, links);
990 TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
999 TAILQ_REMOVE(&softc->abandoned_queue, io_req, links);
1000 passiocleanup(softc, io_req);
1001 uma_zfree(softc->pass_zone, io_req);
1118 passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req)
1124 ccb = &io_req->ccb;
1128 numbufs = min(io_req->num_bufs, 2);
1140 numbufs = min(io_req->num_bufs, 1);
1144 numbufs = min(io_req->num_bufs, 1);
1147 numbufs = min(io_req->num_bufs, 2);
1152 numbufs = min(io_req->num_bufs, 1);
1161 if (io_req->flags & PASS_IO_USER_SEG_MALLOC) {
1162 free(io_req->user_segptr, M_SCSIPASS);
1163 io_req->user_segptr = NULL;
1169 if (io_req->data_flags == CAM_DATA_VADDR) {
1170 for (i = 0; i < io_req->num_bufs; i++) {
1171 if (io_req->kern_bufs[i] == NULL)
1174 free(io_req->kern_bufs[i], M_SCSIPASS);
1175 io_req->kern_bufs[i] = NULL;
1177 } else if (io_req->data_flags == CAM_DATA_SG) {
1178 for (i = 0; i < io_req->num_kern_segs; i++) {
1180 io_req->kern_segptr[i].ds_addr == NULL)
1184 io_req->kern_segptr[i].ds_addr);
1185 io_req->kern_segptr[i].ds_addr = 0;
1189 if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) {
1190 free(io_req->kern_segptr, M_SCSIPASS);
1191 io_req->kern_segptr = NULL;
1194 if (io_req->data_flags != CAM_DATA_PADDR) {
1200 if (io_req->user_bufs[i] != NULL)
1201 *data_ptrs[i] = io_req->user_bufs[i];
1208 passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req,
1220 user_sglist = io_req->user_segptr;
1221 kern_sglist = io_req->kern_segptr;
1223 for (i = 0, j = 0; i < io_req->num_user_segs &&
1224 j < io_req->num_kern_segs;) {
1286 passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req)
1304 ccb = &io_req->ccb;
1329 io_req->data_flags = CAM_DATA_VADDR;
1342 io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK;
1362 io_req->data_flags = CAM_DATA_VADDR;
1371 io_req->data_flags = CAM_DATA_VADDR;
1386 io_req->data_flags = CAM_DATA_VADDR;
1398 io_req->num_bufs = numbufs;
1407 io_req->user_bufs[i] = *data_ptrs[i];
1408 io_req->dirs[i] = dirs[i];
1409 io_req->lengths[i] = lengths[i];
1423 switch (io_req->data_flags) {
1433 if (io_req->lengths[i] == 0)
1440 if (!useracc(io_req->user_bufs[i], io_req->lengths[i],
1441 (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE :
1445 io_req->user_bufs[i], io_req->lengths[i]);
1452 io_req->kern_bufs[i] = tmp_buf;
1458 tmp_buf, lengths[i], io_req->user_bufs[i],
1467 error = copyin(io_req->user_bufs[i],
1468 io_req->kern_bufs[i], lengths[i]);
1473 io_req->user_bufs[i],
1474 io_req->kern_bufs[i], error);
1535 io_req->num_user_segs = num_segs;
1536 io_req->num_kern_segs = num_segs_needed;
1539 io_req->user_bufs[0] = *data_ptrs[0];
1546 io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1548 io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1550 io_req->user_segptr = io_req->user_segs;
1559 error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1563 __func__, *data_ptrs[0], io_req->user_segptr,
1569 io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) *
1571 io_req->flags |= PASS_IO_KERN_SEG_MALLOC;
1573 io_req->kern_segptr = io_req->kern_segs;
1586 io_req->kern_segptr[i].ds_addr =
1588 io_req->kern_segptr[i].ds_len = alloc_size;
1597 *data_ptrs[0] = (uint8_t *)io_req->kern_segptr;
1598 *seg_cnt_ptr = io_req->num_kern_segs;
1604 error = passcopysglist(periph, io_req, dirs[0]);
1643 io_req->num_user_segs = num_segs;
1644 io_req->num_kern_segs = io_req->num_user_segs;
1647 io_req->user_bufs[0] = *data_ptrs[0];
1650 io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) *
1652 io_req->flags |= PASS_IO_USER_SEG_MALLOC;
1654 io_req->user_segptr = io_req->user_segs;
1656 io_req->kern_segptr = io_req->user_segptr;
1658 error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length);
1662 __func__, *data_ptrs[0], io_req->user_segptr,
1680 passiocleanup(softc, io_req);
1686 passmemdone(struct cam_periph *periph, struct pass_io_req *io_req)
1695 ccb = &io_req->ccb;
1697 switch (io_req->data_flags) {
1702 for (i = 0; i < io_req->num_bufs; i++) {
1703 if (io_req->dirs[i] != CAM_DIR_IN)
1706 error = copyout(io_req->kern_bufs[i],
1707 io_req->user_bufs[i], io_req->lengths[i]);
1711 io_req->lengths[i],
1712 io_req->kern_bufs[i],
1713 io_req->user_bufs[i]);
1727 if (io_req->dirs[0] == CAM_DIR_IN)
1728 error = passcopysglist(periph, io_req, io_req->dirs[0]);
1747 passiocleanup(softc, io_req);
1840 struct pass_io_req *io_req;
1862 io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO);
1863 ccb = &io_req->ccb;
1886 uma_zfree(softc->pass_zone, io_req);
1911 uma_zfree(softc->pass_zone, io_req);
1922 io_req->user_ccb_ptr = *user_ccb;
1923 io_req->user_periph_links = ccb->ccb_h.periph_links;
1924 io_req->user_periph_priv = ccb->ccb_h.periph_priv;
1930 ccb->ccb_h.ccb_ioreq = io_req;
1959 error = passmemsetup(periph, io_req);
1961 uma_zfree(softc->pass_zone, io_req);
1966 io_req->mapinfo.num_bufs_used = 0;
1973 TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links);
1994 TAILQ_REMOVE(&softc->incoming_queue, io_req, links);
1995 TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links);
2005 TAILQ_REMOVE(&softc->active_queue, io_req, links);
2006 TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links);
2013 struct pass_io_req *io_req;
2025 io_req = TAILQ_FIRST(&softc->done_queue);
2026 if (io_req == NULL) {
2034 TAILQ_REMOVE(&softc->done_queue, io_req, links);
2046 error = passmemdone(periph, io_req);
2050 io_req->ccb.ccb_h.periph_links = io_req->user_periph_links;
2051 io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv;
2055 "kernel address %p\n", *user_ccb, &io_req->ccb);
2058 error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb));
2062 *user_ccb, &io_req->ccb, error);
2080 uma_zfree(softc->pass_zone, io_req);