Lines Matching refs:io_req

472 			struct io_thread_req *io_req = (*irq_req_buffer)[count];
474 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
475 blk_queue_max_discard_sectors(io_req->req->q, 0);
476 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
478 blk_mq_end_request(io_req->req, io_req->error);
479 kfree(io_req);
1212 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req,
1218 unsigned long byte_offset = io_req->offset;
1222 io_req->io_desc[0].buffer = NULL;
1223 io_req->io_desc[0].length = blk_rq_bytes(req);
1226 BUG_ON(i >= io_req->desc_cnt);
1228 io_req->io_desc[i].buffer = bvec_virt(&bvec);
1229 io_req->io_desc[i].length = bvec.bv_len;
1235 for (i = 0; i < io_req->desc_cnt; i++) {
1236 cowify_req(io_req, &io_req->io_desc[i], byte_offset,
1239 byte_offset += io_req->io_desc[i].length;
1248 struct io_thread_req *io_req;
1251 io_req = kmalloc(sizeof(*io_req) +
1254 if (!io_req)
1257 io_req->req = req;
1259 io_req->fds[0] = dev->cow.fd;
1261 io_req->fds[0] = dev->fd;
1262 io_req->error = 0;
1263 io_req->sectorsize = SECTOR_SIZE;
1264 io_req->fds[1] = dev->fd;
1265 io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT;
1266 io_req->offsets[0] = 0;
1267 io_req->offsets[1] = dev->cow.data_offset;
1270 io_req->io_desc[i].sector_mask = 0;
1271 io_req->io_desc[i].cow_offset = -1;
1274 return io_req;
1280 struct io_thread_req *io_req;
1291 io_req = ubd_alloc_req(dev, req, segs);
1292 if (!io_req)
1295 io_req->desc_cnt = segs;
1297 ubd_map_req(dev, io_req, req);
1299 ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
1300 if (ret != sizeof(io_req)) {
1303 kfree(io_req);