Searched refs:wqe (Results 76 - 100 of 138) sorted by relevance

123456

/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dipsec_offload.c596 struct mlx5_aso_wqe *wqe; local
606 wqe = mlx5_aso_get_wqe(aso->aso);
607 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
608 mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
611 ctrl = &wqe->aso_ctrl;
618 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
H A Dipsec_rxtx.h63 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw.h252 u32 num_rqe; /* max # of wqe's allowed */
325 * Valid, according to wqe->wr_status
476 #define tx_type(wqe) ((wqe)->sqe.opcode)
477 #define rx_type(wqe) ((wqe)->rqe.opcode)
478 #define tx_flags(wqe) ((wqe)->sqe.flags)
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.h529 struct bnxt_qplib_swqe *wqe);
547 struct bnxt_qplib_swqe *wqe);
550 struct bnxt_qplib_swqe *wqe);
621 static inline u16 bnxt_qplib_calc_ilsize(struct bnxt_qplib_swqe *wqe, u16 max) argument
626 for (indx = 0; indx < wqe->num_sge; indx++)
627 size += wqe->sg_list[indx].size;
/linux-master/drivers/infiniband/hw/mlx5/
H A Dwr.c243 static u8 calc_sig(void *wqe, int size) argument
245 u8 *p = wqe;
255 static u8 wq_sig(void *wqe) argument
257 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
261 void **wqe, int *wqe_sz, void **cur_edge)
268 seg = *wqe;
269 *wqe += sizeof(*seg);
285 handle_post_send_edge(&qp->sq, wqe,
289 leftlen = *cur_edge - *wqe;
260 set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, void **wqe, int *wqe_sz, void **cur_edge) argument
[all...]
/linux-master/drivers/crypto/marvell/octeontx2/
H A Dotx2_cptvf_reqmgr.c387 void otx2_cpt_post_process(struct otx2_cptlf_wqe *wqe) argument
389 process_pending_queue(wqe->lfs,
390 &wqe->lfs->lf[wqe->lf_num].pqueue);
/linux-master/drivers/scsi/bnx2fc/
H A Dbnx2fc_hwi.c626 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) argument
643 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
644 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
646 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
859 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, argument
874 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
970 static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, argument
981 work->wqe = wqe;
991 bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) argument
1071 u16 wqe; local
[all...]
/linux-master/drivers/infiniband/hw/hfi1/
H A Dtrace_tx.h814 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
815 TP_ARGS(qp, wqe, idx),
818 __field(struct rvt_swqe *, wqe)
830 __entry->wqe = wqe;
831 __entry->wr_id = wqe->wr.wr_id;
834 __entry->length = wqe->length;
836 __entry->ssn = wqe->ssn;
837 __entry->opcode = wqe->wr.opcode;
838 __entry->send_flags = wqe
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/
H A Dwq.h290 struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix); local
292 return be16_to_cpu(wqe->next_wqe_index);
H A Den_main.c290 struct mlx5e_umr_wqe *wqe)
292 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
293 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
587 next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
592 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
593 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
595 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
598 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
623 WARN_ON(rq->wqe
288 mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) argument
932 struct mlx5e_rx_wqe_ll *wqe = local
945 struct mlx5e_rx_wqe_cyc *wqe = local
1236 struct mlx5e_rx_wqe_ll *wqe; local
2030 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); local
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_loc.h80 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
86 int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
H A Drxe_mr.c329 /* copy data in or out of a wqe, i.e. sg list
688 int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) argument
690 struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
691 u32 key = wqe->wr.wr.reg.key;
692 u32 access = wqe->wr.wr.reg.access;
716 mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
H A Drxe_verbs.h175 struct rxe_recv_wqe *wqe; member in struct:rxe_resp_info
187 struct rxe_recv_wqe wqe; member in struct:rxe_resp_info::__anon177
/linux-master/drivers/infiniband/hw/qedr/
H A Dverbs.c3296 char *seg_prt, *wqe; local
3311 wqe = NULL;
3324 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3325 seg_prt = wqe;
3344 swap_wqe_data64((u64 *)wqe);
3350 swap_wqe_data64((u64 *)wqe);
3554 struct rdma_sq_common_wqe *wqe; local
3564 wqe = qed_chain_produce(&qp->sq.pbl);
3568 wqe->flags = 0;
3569 SET_FIELD2(wqe
[all...]
/linux-master/drivers/crypto/marvell/octeontx/
H A Dotx_cptvf_reqmgr.h223 void otx_cpt_post_process(struct otx_cptvf_wqe *wqe);
H A Dotx_cptvf_reqmgr.c606 void otx_cpt_post_process(struct otx_cptvf_wqe *wqe) argument
608 process_pending_queue(wqe->cptvf->pdev, &wqe->cptvf->pqinfo.queue[0]);
H A Dotx_cptvf_main.c575 struct otx_cptvf_wqe *wqe; local
582 wqe = get_cptvf_vq_wqe(cptvf, 0);
583 if (unlikely(!wqe)) {
588 tasklet_hi_schedule(&wqe->twork);
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dxdp.h206 (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
/linux-master/drivers/infiniband/hw/erdma/
H A Derdma_cmdq.c279 __le64 *wqe; local
286 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
288 memcpy(wqe, req, req_len);
295 *wqe = cpu_to_le64(hdr);
/linux-master/drivers/infiniband/hw/irdma/
H A Duser.h281 void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
283 void (*iw_set_mw_bind_wqe)(__le64 *wqe,
/linux-master/drivers/scsi/bfa/
H A Dbfa_svc.h70 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
72 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
73 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
430 struct bfa_reqq_wait_s wqe; /* request wait queue element */ member in struct:bfa_lps_s
665 void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
675 struct bfa_fcxp_wqe_s *wqe);
H A Dbfa.h88 bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), argument
91 wqe->qresume = qresume;
92 wqe->cbarg = cbarg;
99 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
/linux-master/drivers/scsi/elx/efct/
H A Defct_hw.h163 /* HW wqe object */
186 * @wqe Work queue object, with link for pending
220 struct efct_hw_wqe wqe; member in struct:efct_hw_io
568 struct efct_hw_wqe wqe; member in struct:efct_hw_send_frame_context
693 int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
/linux-master/drivers/crypto/cavium/cpt/
H A Dcptvf_main.c593 struct cptvf_wqe *wqe; local
599 wqe = get_cptvf_vq_wqe(cptvf, 0);
600 if (unlikely(!wqe)) {
605 tasklet_hi_schedule(&wqe->twork);
/linux-master/io_uring/
H A Dpoll.c59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) argument
61 unsigned long priv = (unsigned long)wqe->private;
66 static inline bool wqe_is_double(struct wait_queue_entry *wqe) argument
68 unsigned long priv = (unsigned long)wqe->private;

Completed in 244 milliseconds

123456