Searched refs:sqe (Results 76 - 95 of 95) sorted by relevance

1234

/linux-master/drivers/scsi/lpfc/
H A Dlpfc_nvme.c1016 cid = cp->sqe.common.command_id;
1085 cp->sqe.common.opcode,
1086 cp->sqe.common.command_id,
1098 cp->sqe.common.opcode,
1099 cp->sqe.common.command_id,
1214 struct nvme_common_command *sqe; local
1274 sqe = &((struct nvme_fc_cmd_iu *)
1275 nCmd->cmdaddr)->sqe.common;
1276 if (sqe->opcode == nvme_admin_async_event)
1541 struct nvme_common_command *sqe; local
[all...]
/linux-master/drivers/block/
H A Dublk_drv.c82 * io command is active: sqe cmd is received, and its cqe isn't done
1056 * clear ACTIVE since we are done with this sqe/cmd slot
1822 const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
2168 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2271 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2322 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2513 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2532 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2563 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2594 const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
[all...]
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c803 struct sk_buff *skb, int sqe, int hdr_len)
806 struct sg_list *sg = &sq->sg[sqe];
836 u64 seg_addr, int hdr_len, int sqe)
838 struct sg_list *sg = &sq->sg[sqe];
1325 int sq_idx, sqe; local
1329 for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
1330 sg = &sq->sg[sqe];
801 otx2_dma_map_tso_skb(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int sqe, int hdr_len) argument
834 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq, struct sk_buff *skb, int seg, u64 seg_addr, int hdr_len, int sqe) argument
H A Dqos_sq.c141 qmem_free(pfvf->dev, sq->sqe);
H A Dotx2_common.c935 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
946 sq->sqe_base = sq->sqe->base;
H A Dotx2_pf.c1443 if (!sq->sqe)
1445 qmem_free(pf->dev, sq->sqe);
/linux-master/io_uring/
H A Dio_uring.c2044 const struct io_uring_sqe *sqe)
2053 req->opcode = opcode = READ_ONCE(sqe->opcode);
2055 sqe_flags = READ_ONCE(sqe->flags);
2057 req->cqe.user_data = READ_ONCE(sqe->user_data);
2074 req->buf_index = READ_ONCE(sqe->buf_group);
2098 if (!def->ioprio && sqe->ioprio)
2106 req->cqe.fd = READ_ONCE(sqe->fd);
2119 personality = READ_ONCE(sqe->personality);
2135 return def->prep(req, sqe);
2138 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe, argument
2272 io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) argument
2321 const struct io_uring_sqe *sqe; variable in typeref:struct:io_uring_sqe
[all...]
H A Dopdef.c47 const struct io_uring_sqe *sqe)
46 io_eopnotsupp_prep(struct io_kiocb *kiocb, const struct io_uring_sqe *sqe) argument
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cmdq.c306 u64 *sqe; local
318 sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
320 ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
/linux-master/include/linux/
H A Dnvme-fc.h56 struct nvme_command sqe; member in struct:nvme_fc_cmd_iu
H A Dhisi_acc_qm.h410 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
421 void *sqe; member in struct:hisi_qp
/linux-master/tools/testing/selftests/bpf/prog_tests/
H A Dsockopt.c996 struct io_uring_sqe *sqe; local
1004 sqe = io_uring_get_sqe(&ring);
1005 if (!ASSERT_NEQ(sqe, NULL, "Get an SQE")) {
1010 io_uring_prep_cmd(sqe, op, fd, level, optname, optval, optlen);
/linux-master/drivers/nvme/target/
H A Dfc.c2154 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; local
2186 nvme_is_fabrics((struct nvme_command *) sqe) ||
2189 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2460 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; local
2484 cqe->command_id = sqe->command_id;
2544 if (!nvme_is_write(&cmdiu->sqe))
2548 if (nvme_is_write(&cmdiu->sqe))
2556 fod->req.cmd = &fod->cmdiubuf.sqe;
[all...]
/linux-master/tools/testing/selftests/mm/
H A Dcow.c409 struct io_uring_sqe *sqe; local
499 sqe = io_uring_get_sqe(&ring);
500 if (!sqe) {
504 io_uring_prep_write_fixed(sqe, fd, mem, size, 0, 0);
/linux-master/drivers/scsi/qedf/
H A Dqedf.h515 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
/linux-master/drivers/scsi/bnx2fc/
H A Dbnx2fc_hwi.c1415 struct fcoe_sqe *sqe; local
1417 sqe = &tgt->sq[tgt->sq_prod_idx];
1420 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1421 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
/linux-master/drivers/scsi/qla2xxx/
H A Dqla_nvme.c618 if (cmd->sqe.common.opcode == nvme_admin_async_event) {
669 cmd->sqe.common.opcode == nvme_admin_async_event) {
/linux-master/drivers/crypto/hisilicon/
H A Dqm.c862 qp->req_cb(qp, qp->sqe + qm->sqe_size *
1816 return qp->sqe + sq_tail * qp->qm->sqe_size;
2044 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2188 * causes current qm_db sending fail or can not receive sended sqe. QM
2189 * sync/async receive function should handle the error sqe. ACC reset
2190 * done function should clear used sqe to 0.
2197 void *sqe = qm_get_avail_sqe(qp); local
2206 if (!sqe)
2209 memcpy(sqe, msg, qp->qm->sqe_size);
2760 qp->sqe
[all...]
/linux-master/drivers/nvme/host/
H A Dioctl.c448 const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
/linux-master/drivers/net/ethernet/broadcom/
H A Dcnic_defs.h3105 struct fcoe_sqe sqe; member in struct:fcoe_cached_wqe

Completed in 356 milliseconds

1234