Searched refs:io_req (Results 1 - 25 of 31) sorted by relevance

12

/linux-master/drivers/scsi/bnx2fc/
H A Dbnx2fc_io.c19 static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
21 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
22 static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
23 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
24 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
25 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
29 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, argument
32 struct bnx2fc_interface *interface = io_req->port->priv;
35 &io_req->timeout_work,
37 kref_get(&io_req
42 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, local
162 bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) argument
215 struct bnx2fc_cmd *io_req; local
380 struct bnx2fc_cmd *tmp, *io_req; local
399 struct bnx2fc_cmd *io_req; local
469 struct bnx2fc_cmd *io_req; local
519 struct bnx2fc_cmd *io_req = container_of(ref, local
544 bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) argument
579 bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) argument
666 struct bnx2fc_cmd *io_req; local
815 bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) argument
992 bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) argument
1092 bnx2fc_initiate_cleanup(io_req); variable
1130 struct bnx2fc_cmd *io_req; local
1322 bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq) argument
1354 bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq) argument
1454 bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) argument
1493 bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) argument
1524 bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq, unsigned char *rq_data) argument
1626 bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index) argument
1651 bnx2fc_map_sg(struct bnx2fc_cmd *io_req) argument
1697 bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) argument
1727 bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) argument
1744 bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, struct fcp_cmnd *fcp_cmnd) argument
1757 bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, u8 num_rq, unsigned char *rq_data) argument
1846 struct bnx2fc_cmd *io_req; local
1904 bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq, unsigned char *rq_data) argument
2023 bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req) argument
[all...]
H A Dbnx2fc_debug.c17 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) argument
30 if (io_req && io_req->port && io_req->port->lport &&
31 io_req->port->lport->host)
32 shost_printk(KERN_INFO, io_req->port->lport->host,
34 io_req->xid, &vaf);
H A Dbnx2fc_tgt.c168 struct bnx2fc_cmd *io_req; local
178 list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
180 list_del_init(&io_req->link);
181 io_req->on_active_queue = 0;
182 BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
184 if (cancel_delayed_work(&io_req->timeout_work)) {
186 &io_req->req_flags)) {
188 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
190 complete(&io_req->abts_done);
192 kref_put(&io_req
[all...]
H A Dbnx2fc_debug.h41 void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
H A Dbnx2fc.h407 struct bnx2fc_cmd *io_req; member in struct:bnx2fc_els_cb_arg
474 struct bnx2fc_cmd *io_req; member in struct:io_bdt
496 struct bnx2fc_cmd *io_req; member in struct:bnx2fc_priv
535 int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
536 int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
537 void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
539 int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
540 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
547 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
549 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
[all...]
H A Dbnx2fc_hwi.c635 struct bnx2fc_cmd *io_req = NULL; local
710 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
711 if (!io_req)
714 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
720 &io_req->req_flags)) {
721 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
743 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
744 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
760 memcpy(&io_req->err_entry, err_entry,
763 &io_req
866 struct bnx2fc_cmd *io_req; local
1568 bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) argument
1606 bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) argument
1702 bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) argument
[all...]
H A Dbnx2fc_els.c35 rrq_req = cb_arg->io_req;
138 els_req = cb_arg->io_req;
279 srr_req = cb_arg->io_req;
399 rec_req = cb_arg->io_req;
715 cb_arg->io_req = els_req;
/linux-master/drivers/scsi/qedf/
H A Dqedf_io.c11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, argument
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
21 struct qedf_ioreq *io_req = local
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
34 switch (io_req->cmd_type) {
39 io_req->xid);
44 io_req->xid);
46 qedf_initiate_cleanup(io_req, true);
47 complete(&io_req
115 struct qedf_ioreq *io_req; local
164 struct qedf_ioreq *io_req = local
176 struct qedf_ioreq *io_req; local
302 struct qedf_ioreq *io_req = NULL; local
403 qedf_free_mp_resc(struct qedf_ioreq *io_req) argument
434 struct qedf_ioreq *io_req = local
471 qedf_map_sg(struct qedf_ioreq *io_req) argument
528 qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req) argument
548 qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, struct fcp_cmnd *fcp_cmnd) argument
590 qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) argument
680 qedf_init_mp_task(struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) argument
804 qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req, int8_t direction) argument
850 qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) argument
941 struct qedf_ioreq *io_req; local
1054 qedf_parse_fcp_rsp(struct qedf_ioreq *io_req, struct fcoe_cqe_rsp_info *fcp_rsp) argument
1110 qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req) argument
1121 qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
1323 qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, int result) argument
1432 qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
1502 qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
1589 struct qedf_ioreq *io_req; local
1851 qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) argument
1957 qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
2045 qedf_init_mp_req(struct qedf_ioreq *io_req) argument
2149 qedf_initiate_cleanup(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts) argument
2278 qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
2293 struct qedf_ioreq *io_req; local
2476 qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
[all...]
H A Dqedf.h72 struct qedf_ioreq *io_req; member in struct:qedf_els_cb_arg
193 struct qedf_ioreq *io_req; member in struct:qedf_cmd_priv
435 struct qedf_ioreq *io_req; member in struct:io_bdt
496 struct qedf_ioreq *io_req);
498 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
500 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
503 extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
506 struct qedf_ioreq *io_req);
511 extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
513 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
[all...]
H A Dqedf_els.c72 cb_arg->io_req = els_req;
198 rrq_req = cb_arg->io_req;
207 "Original io_req is NULL, rrq_req = %p.\n", rrq_req);
217 * This should return the aborted io_req to the command pool. Note that
286 "refcount for xid=%x io_req=%p refcount=%d is not 1.\n",
430 els_req = cb_arg->io_req;
552 srr_req = cb_arg->io_req;
744 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
749 cb_arg = io_req->cb_arg;
752 if (io_req
743 qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) argument
[all...]
H A Dqedf_main.c721 struct qedf_ioreq *io_req; local
744 io_req = qedf_priv(sc_cmd)->io_req;
745 if (!io_req) {
754 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */
758 /* If we got a valid io_req, confirm it belongs to this sc_cmd. */
759 if (!rval || io_req->sc_cmd != sc_cmd) {
761 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n",
762 io_req
2230 struct qedf_ioreq *io_req = NULL; local
2806 struct qedf_ioreq *io_req; local
[all...]
/linux-master/drivers/scsi/fnic/
H A Dfnic_scsi.c96 * Unmap the data buffer and sense buffer for an io_req,
100 struct fnic_io_req *io_req,
103 if (io_req->sgl_list_pa)
104 dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
105 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
109 if (io_req->sgl_cnt)
110 mempool_free(io_req->sgl_list_alloc,
111 fnic->io_sgl_pool[io_req->sgl_type]);
112 if (io_req
99 fnic_release_ioreq_buf(struct fnic *fnic, struct fnic_io_req *io_req, struct scsi_cmnd *sc) argument
288 fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count, uint32_t mqtag, uint16_t hwq) argument
393 struct fnic_io_req *io_req = NULL; local
815 struct fnic_io_req *io_req; local
1081 struct fnic_io_req *io_req; local
1416 struct fnic_io_req *io_req; local
1506 struct fnic_io_req *io_req; local
1562 fnic_queue_abort_io_req(struct fnic *fnic, int tag, u32 task_req, u8 *fc_lun, struct fnic_io_req *io_req, unsigned int hwq) argument
1621 struct fnic_io_req *io_req; local
1788 struct fnic_io_req *io_req = NULL; local
2019 fnic_queue_dr_io_req(struct fnic *fnic, struct scsi_cmnd *sc, struct fnic_io_req *io_req) argument
2094 struct fnic_io_req *io_req; local
2279 struct fnic_io_req *io_req = NULL; local
2745 struct fnic_io_req *io_req; local
[all...]
/linux-master/samples/acrn/
H A Dvm-sample.c50 struct acrn_io_request *io_req; local
109 io_req = &io_req_buf[vcpu_id];
110 if ((__sync_add_and_fetch(&io_req->processed, 0) == ACRN_IOREQ_STATE_PROCESSING)
111 && (!io_req->kernel_handled))
112 if (io_req->type == ACRN_IOREQ_TYPE_PORTIO) {
115 port = io_req->reqs.pio_request.address;
116 bytes = io_req->reqs.pio_request.size;
117 in = (io_req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ);
/linux-master/drivers/md/
H A Ddm-io.c488 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, argument
496 switch (io_req->mem.type) {
498 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
502 bio_dp_init(dp, io_req->mem.ptr.bio);
506 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
507 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
508 dp->vma_invalidate_address = io_req->mem.ptr.vma;
511 vm_dp_init(dp, io_req->mem.ptr.vma);
515 km_dp_init(dp, io_req
525 dm_io(struct dm_io_request *io_req, unsigned int num_regions, struct dm_io_region *where, unsigned long *sync_error_bits, unsigned short ioprio) argument
[all...]
H A Ddm-log.c243 struct dm_io_request io_req; member in struct:log_c
301 lc->io_req.bi_opf = op;
303 return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
314 lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
316 return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
461 lc->io_req.mem.type = DM_IO_VMA;
462 lc->io_req.notify.fn = NULL;
463 lc->io_req.client = dm_io_client_create();
464 if (IS_ERR(lc->io_req.client)) {
465 r = PTR_ERR(lc->io_req
[all...]
H A Ddm-integrity.c535 struct dm_io_request io_req; local
540 io_req.bi_opf = opf;
541 io_req.mem.type = DM_IO_KMEM;
542 io_req.mem.ptr.addr = ic->sb;
543 io_req.notify.fn = NULL;
544 io_req.client = ic->io;
558 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
1044 struct dm_io_request io_req; local
1058 io_req.bi_opf = opf;
1059 io_req
1165 struct dm_io_request io_req; local
1488 struct dm_io_request io_req; member in struct:flush_request
1719 struct dm_io_request io_req; local
2716 struct dm_io_request io_req; local
[all...]
H A Ddm-snap-persistent.c217 struct dm_io_request *io_req; member in struct:mdata_req
226 req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
240 struct dm_io_request io_req = { local
250 return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
253 req.io_req = &io_req;
H A Ddm-raid1.c267 struct dm_io_request io_req = { local
281 dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
546 struct dm_io_request io_req = { local
557 BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
660 struct dm_io_request io_req = { local
670 io_req.bi_opf = REQ_OP_DISCARD | op_flags;
671 io_req.mem.type = DM_IO_KMEM;
672 io_req.mem.ptr.addr = NULL;
684 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
H A Ddm-verity-target.c503 struct dm_io_request io_req; local
509 io_req.bi_opf = REQ_OP_READ;
510 io_req.mem.type = DM_IO_KMEM;
511 io_req.mem.ptr.addr = buffer;
512 io_req.notify.fn = NULL;
513 io_req.client = v->io;
517 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
/linux-master/drivers/staging/rtl8723bs/include/
H A Drtw_io.h32 For prompt mode accessing, caller shall free io_req
33 Otherwise, io_handler will free io_req
109 struct io_req { struct
117 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt);
142 struct list_head pending; /* The io_req list that will be served in the single protocol read/write. */
158 extern void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
162 extern uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
163 extern struct io_req *alloc_ioreq(struct io_queue *pio_q);
193 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_req, u8 *cnxt), u8 *cnxt);
195 void (*_async_io_callback)(struct adapter *padater, struct io_req *pio_re
[all...]
/linux-master/arch/um/drivers/
H A Dubd_kern.c472 struct io_thread_req *io_req = (*irq_req_buffer)[count]; local
474 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
475 blk_queue_max_discard_sectors(io_req->req->q, 0);
476 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
478 blk_mq_end_request(io_req->req, io_req->error);
479 kfree(io_req);
1212 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, argument
1218 unsigned long byte_offset = io_req
1248 struct io_thread_req *io_req; local
1280 struct io_thread_req *io_req; local
[all...]
/linux-master/drivers/staging/rtl8712/
H A Drtl871x_io.c101 struct io_req *pio_req;
111 (sizeof(struct io_req)) + 4,
118 pio_req = (struct io_req *)(pio_queue->free_ioreqs_buf);
H A Drtl871x_io.h40 * For prompt mode accessing, caller shall free io_req
41 * Otherwise, io_handler will free io_req
101 struct io_req { struct
109 struct io_req *pio_req, u8 *cnxt);
214 /*The io_req list that will be served in the single protocol r/w.*/
/linux-master/include/linux/
H A Ddm-io.h82 int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
/linux-master/drivers/scsi/csiostor/
H A Dcsio_lnode.c1435 struct csio_ioreq *io_req = NULL; local
1448 io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
1449 io_req->wr_status = csio_wr_status(wr_cmd);
1453 if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
1456 io_req);
1465 list_del_init(&io_req->sm.sm_list);
1469 /* io_req will be freed by completion handler */
1470 if (io_req->io_cbfn)
1471 io_req->io_cbfn(hw, io_req);
1680 csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, uint32_t immd_len, uint8_t sub_op, uint32_t sid, uint32_t did, uint32_t flow_id, uint8_t *fw_wr) argument
1724 csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, uint8_t sub_op, struct csio_dma_buf *pld, uint32_t pld_len) argument
1805 csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, uint32_t pld_len) argument
[all...]

Completed in 316 milliseconds

12