Searched refs:sqe (Results 51 - 75 of 96) sorted by relevance

1234

/linux-master/drivers/scsi/qedi/
H A Dqedi_fw_api.c98 if (!task_params->sqe)
101 memset(task_params->sqe, 0, sizeof(*task_params->sqe));
102 task_params->sqe->task_id = cpu_to_le16(task_params->itid);
104 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
115 init_dif_context_flags(&task_params->sqe->prot_flags,
118 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
134 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
136 SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
141 SET_FIELD(task_params->sqe
[all...]
H A Dqedi_fw_iscsi.h14 struct iscsi_wqe *sqe; member in struct:iscsi_task_params
H A Dqedi_fw.c1042 task_params.sqe = &ep->sq[sq_idx];
1044 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1116 task_params.sqe = &ep->sq[sq_idx];
1117 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1490 task_params.sqe = &ep->sq[sq_idx];
1492 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1614 task_params.sqe = &ep->sq[sq_idx];
1616 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
1731 task_params.sqe = &ep->sq[sq_idx];
1733 memset(task_params.sqe,
[all...]
/linux-master/io_uring/
H A Dpoll.c904 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, argument
909 events = READ_ONCE(sqe->poll32_events);
921 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
926 if (sqe->buf_index || sqe->splice_fd_in)
928 flags = READ_ONCE(sqe->len);
936 upd->old_user_data = READ_ONCE(sqe->addr);
940 upd->new_user_data = READ_ONCE(sqe->off);
944 upd->events = io_poll_parse_events(sqe, flags);
945 else if (sqe
951 io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
[all...]
H A Drw.c78 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
84 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
86 req->buf_index = READ_ONCE(sqe->buf_index);
88 ioprio = READ_ONCE(sqe->ioprio);
100 rw->addr = READ_ONCE(sqe->addr);
101 rw->len = READ_ONCE(sqe->len);
102 rw->flags = READ_ONCE(sqe->rw_flags);
106 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
110 ret = io_prep_rw(req, sqe);
124 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
146 io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
[all...]
H A Dkbuf.h48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Drsrc.h137 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
H A Drsrc.c554 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) argument
560 if (sqe->rw_flags || sqe->splice_fd_in)
563 up->offset = READ_ONCE(sqe->off);
564 up->nr_args = READ_ONCE(sqe->len);
567 up->arg = READ_ONCE(sqe->addr);
/linux-master/drivers/infiniband/hw/cxgb4/
H A Drestrack.c96 struct t4_swsqe *sqe)
100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
104 if (sqe->complete &&
105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
95 fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx, struct t4_swsqe *sqe) argument
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw.h192 struct siw_sqe sqe; member in union:siw_wqe::__anon1019
476 #define tx_type(wqe) ((wqe)->sqe.opcode)
478 #define tx_flags(wqe) ((wqe)->sqe.flags)
523 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe);
524 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes,
628 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; local
630 return READ_ONCE(sqe->flags) == 0;
635 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; local
637 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID)
638 return sqe;
[all...]
H A Dsiw_qp_rx.c176 srx->ddp_stag = wqe->sqe.sge[0].lkey;
177 srx->ddp_to = wqe->sqe.sge[0].laddr;
691 resp = &tx_work->sqe;
754 wqe->sqe.id = orqe->id;
755 wqe->sqe.opcode = orqe->opcode;
756 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr;
757 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey;
758 wqe->sqe.sge[0].length = orqe->sge[0].length;
759 wqe->sqe.flags = orqe->flags;
760 wqe->sqe
[all...]
H A Dsiw_mem.c263 if (!(wqe->sqe.flags & SIW_WQE_INLINE))
264 siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge);
/linux-master/drivers/scsi/bnx2i/
H A Dbnx2i.h498 struct sqe { struct
634 struct sqe *sq_virt;
638 struct sqe *sq_prod_qe;
639 struct sqe *sq_cons_qe;
640 struct sqe *sq_first_qe;
641 struct sqe *sq_last_qe;
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c1859 struct sq_send_raweth_qp1_hdr *sqe = base_hdr; local
1863 sqe->wqe_type = wqe->type;
1864 sqe->flags = wqe->flags;
1865 sqe->wqe_size = wqe_sz;
1866 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1867 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1868 sqe->length = cpu_to_le32(data_len);
1880 struct sq_send_hdr *sqe = base_hdr; local
1882 sqe->wqe_type = wqe->type;
1883 sqe
1911 struct sq_rdma_hdr *sqe = base_hdr; local
1931 struct sq_atomic_hdr *sqe = base_hdr; local
1948 struct sq_localinvalidate *sqe = base_hdr; local
1959 struct sq_fr_pmr_hdr *sqe = base_hdr; local
1992 struct sq_bind_hdr *sqe = base_hdr; local
[all...]
/linux-master/drivers/scsi/qedf/
H A Dqedf_io.c592 struct fcoe_wqe *sqe)
628 io_req->task_params->sqe = sqe;
681 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
707 io_req->task_params->sqe = sqe;
858 struct fcoe_wqe *sqe; local
905 sqe = &fcport->sq[sqe_idx];
906 memset(sqe, 0, sizeof(struct fcoe_wqe));
919 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
590 qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) argument
680 qedf_init_mp_task(struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) argument
1860 struct fcoe_wqe *sqe; local
2157 struct fcoe_wqe *sqe; local
2301 struct fcoe_wqe *sqe; local
[all...]
H A Dqedf_els.c23 struct fcoe_wqe *sqe; local
120 sqe = &fcport->sq[sqe_idx];
121 memset(sqe, 0, sizeof(struct fcoe_wqe));
125 qedf_init_mp_task(els_req, task, sqe);
702 struct fcoe_wqe *sqe; local
732 sqe = &fcport->sq[sqe_idx];
733 memset(sqe, 0, sizeof(struct fcoe_wqe));
734 orig_io_req->task_params->sqe = sqe;
/linux-master/drivers/dma/
H A Dhisi_dma.c141 struct hisi_dma_sqe sqe; member in struct:hisi_dma_desc
492 desc->sqe.length = cpu_to_le32(len);
493 desc->sqe.src_addr = cpu_to_le64(src);
494 desc->sqe.dst_addr = cpu_to_le64(dst);
508 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; local
522 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe));
524 /* update other field in sqe */
525 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M));
526 sqe
[all...]
/linux-master/tools/testing/selftests/x86/
H A Dlam.c510 struct io_uring_sqe *sqe; local
540 sqe = &ring->sq_ring.queue.sqes[index];
541 sqe->fd = file_fd;
542 sqe->flags = 0;
543 sqe->opcode = IORING_OP_READV;
544 sqe->addr = (unsigned long)fi->iovecs;
545 sqe->len = blocks;
546 sqe->off = 0;
547 sqe->user_data = (uint64_t)fi;
/linux-master/drivers/nvme/host/
H A Drdma.c65 struct nvme_rdma_qe sqe; member in struct:nvme_rdma_request
290 kfree(req->sqe.data);
303 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
304 if (!req->sqe.data)
314 nvme_req(rq)->cmd = req->sqe.data;
1587 container_of(qe, struct nvme_rdma_request, sqe);
1672 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; local
1673 struct nvme_command *cmd = sqe->data;
1677 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1685 sqe
2001 struct nvme_rdma_qe *sqe = &req->sqe; local
[all...]
H A Dfc.c1935 struct nvme_command *sqe = &op->cmd_iu.sqe; local
2039 sqe->common.command_id != cqe->command_id)) {
2049 sqe->common.command_id,
2156 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
2165 struct nvme_command *sqe; local
2179 sqe = &cmdiu->sqe;
2191 memset(sqe, 0, sizeof(*sqe));
2572 struct nvme_command *sqe = &cmdiu->sqe; local
2676 struct nvme_command *sqe = &cmdiu->sqe; local
[all...]
/linux-master/drivers/crypto/hisilicon/
H A Ddebugfs.c293 dev_err(dev, "Please input sqe number!\n");
299 dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
314 void *sqe, *sqe_curr; local
323 sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL);
324 if (!sqe)
328 memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth);
329 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
335 kfree(sqe);
/linux-master/drivers/crypto/hisilicon/hpre/
H A Dhpre_crypto.c63 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
321 struct hpre_sqe *sqe = &req->req; local
324 tmp = le64_to_cpu(sqe->in);
335 tmp = le64_to_cpu(sqe->out);
349 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, argument
361 id = (int)le16_to_cpu(sqe->tag);
366 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
369 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
375 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
481 struct hpre_sqe *sqe local
1450 struct hpre_sqe *sqe = &req->req; local
1766 struct hpre_sqe *sqe = &req->req; local
[all...]
/linux-master/include/linux/qed/
H A Dqed_nvmetcp_if.h98 struct nvmetcp_wqe *sqe; member in struct:nvmetcp_task_params
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.h94 struct qmem *sqe; member in struct:otx2_snd_queue
/linux-master/drivers/nvme/target/
H A Dfcloop.c585 struct nvme_command *sqe = &cmdiu->sqe; local
592 __func__, sqe->common.opcode, sqe->fabrics.fctype,
597 (sqe->common.opcode != nvme_fabrics_command ||
598 sqe->fabrics.fctype != drop_opcode)) ||
599 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))

Completed in 283 milliseconds

1234