Searched refs:srq (Results 1 - 25 of 106) sorted by relevance

12345

/linux-master/drivers/infiniband/hw/mthca/
H A Dmthca_srq.c74 static void *get_wqe(struct mthca_srq *srq, int n) argument
76 if (srq->is_direct)
77 return srq->queue.direct.buf + (n << srq->wqe_shift);
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
99 struct mthca_srq *srq,
108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
110 context->lkey = cpu_to_be32(srq
97 mthca_tavor_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_tavor_srq_context *context, struct ib_udata *udata) argument
118 mthca_arbel_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_arbel_srq_context *context, struct ib_udata *udata) argument
147 mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) argument
154 mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct ib_udata *udata) argument
206 mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_srq_attr *attr, struct mthca_srq *srq, struct ib_udata *udata) argument
327 get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) argument
338 mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) argument
376 struct mthca_srq *srq = to_msrq(ibsrq); local
399 struct mthca_srq *srq = to_msrq(ibsrq); local
433 struct mthca_srq *srq; local
465 mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) argument
487 struct mthca_srq *srq = to_msrq(ibsrq); local
581 struct mthca_srq *srq = to_msrq(ibsrq); local
[all...]
/linux-master/drivers/infiniband/hw/mlx4/
H A Dsrq.c35 #include <linux/mlx4/srq.h>
42 static void *get_wqe(struct mlx4_ib_srq *srq, int n) argument
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
47 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) argument
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54 event.element.srq = ibsrq;
64 "on SRQ %06x\n", type, srq->srqn);
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq); local
98 mutex_init(&srq
228 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
253 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
268 mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) argument
293 mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) argument
310 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
[all...]
H A DMakefile4 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_srq.c16 struct hns_roce_srq *srq; local
19 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1));
20 if (srq)
21 refcount_inc(&srq->refcount);
24 if (!srq) {
29 srq->event(srq, event_type);
31 if (refcount_dec_and_test(&srq->refcount))
32 complete(&srq->free);
35 static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, argument
63 alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
80 free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
85 hns_roce_create_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
114 alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
146 free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
166 alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct ib_udata *udata, unsigned long addr) argument
210 free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
219 alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct ib_udata *udata, unsigned long addr) argument
247 free_srq_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
253 alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
262 free_srq_wrid(struct hns_roce_srq *srq) argument
290 set_srq_basic_param(struct hns_roce_srq *srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) argument
318 set_srq_ext_param(struct hns_roce_srq *srq, struct ib_srq_init_attr *init_attr) argument
328 set_srq_param(struct hns_roce_srq *srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) argument
343 alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct ib_udata *udata) argument
384 free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) argument
391 get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata, struct hns_roce_ib_create_srq *ucmd) argument
406 free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct ib_udata *udata) argument
425 alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, struct ib_udata *udata, struct hns_roce_ib_create_srq_resp *resp) argument
470 struct hns_roce_srq *srq = to_hr_srq(ib_srq); local
529 struct hns_roce_srq *srq = to_hr_srq(ibsrq); local
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_srq.c44 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, argument
52 srq->ibsrq.event_handler = init->event_handler;
53 srq->ibsrq.srq_context = init->srq_context;
54 srq->limit = init->attr.srq_limit;
55 srq->srq_num = srq->elem.index;
56 srq->rq.max_wr = init->attr.max_wr;
57 srq->rq.max_sge = init->attr.max_sge;
60 srq->rq.max_sge*sizeof(struct ib_sge);
62 spin_lock_init(&srq
100 rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask) argument
151 rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq, struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata) argument
192 struct rxe_srq *srq = container_of(elem, typeof(*srq), elem); local
[all...]
H A Drxe.h50 #define rxe_dbg_srq(srq, fmt, ...) ibdev_dbg((srq)->ibsrq.device, \
51 "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
71 #define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
72 "srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
92 #define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)
[all...]
H A Drxe_verbs.c366 /* srq */
372 struct rxe_srq *srq = to_rsrq(ibsrq); local
387 rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
398 err = rxe_add_to_pool(&rxe->srq_pool, srq);
400 rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
405 srq->pd = pd;
407 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
409 rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
416 cleanup_err = rxe_cleanup(srq);
428 struct rxe_srq *srq = to_rsrq(ibsrq); local
469 struct rxe_srq *srq = to_rsrq(ibsrq); local
492 struct rxe_srq *srq = to_rsrq(ibsrq); local
516 struct rxe_srq *srq = to_rsrq(ibsrq); local
[all...]
/linux-master/drivers/infiniband/hw/mlx5/
H A Dsrq.c11 #include "srq.h"
13 static void *get_wqe(struct mlx5_ib_srq *srq, int n) argument
15 return mlx5_frag_buf_get_wqe(&srq->fbc, n);
18 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) argument
21 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
25 event.element.srq = ibsrq;
35 type, srq->srqn);
43 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, argument
76 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
78 srq
105 create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, struct mlx5_srq_attr *in, int buf_size) argument
171 destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, struct ib_udata *udata) argument
184 destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) argument
196 struct mlx5_ib_srq *srq = to_msrq(ib_srq); local
325 struct mlx5_ib_srq *srq = to_msrq(ibsrq); local
350 struct mlx5_ib_srq *srq = to_msrq(ibsrq); local
371 mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) argument
388 mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) argument
405 struct mlx5_ib_srq *srq = to_msrq(ibsrq); local
[all...]
H A Dsrq_cmd.c9 #include "srq.h"
84 struct mlx5_core_srq *srq; local
87 srq = xa_load(&table->array, srqn);
88 if (srq)
89 refcount_inc(&srq->common.refcount);
92 return srq;
114 static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, argument
157 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
158 srq->uid = in->uid;
164 static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
175 arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) argument
189 query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) argument
216 create_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) argument
270 destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
282 arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, u16 lwm) argument
297 query_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) argument
328 create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) argument
386 destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
396 arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, u16 lwm) argument
438 query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) argument
475 create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) argument
535 destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
546 arm_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, u16 lwm) argument
561 query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) argument
600 create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) argument
615 destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
629 mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *in) argument
665 mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq) argument
693 mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, struct mlx5_srq_attr *out) argument
708 mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) argument
727 struct mlx5_core_srq *srq; local
[all...]
H A Dsrq.h48 void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
58 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
60 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
61 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
63 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
H A DMakefile20 srq.o \
/linux-master/drivers/infiniband/sw/rdmavt/
H A Dsrq.c11 #include "srq.h"
15 * rvt_driver_srq_init - init srq resources on a per driver basis
38 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); local
54 srq->rq.size = srq_init_attr->attr.max_wr + 1;
55 srq->rq.max_sge = srq_init_attr->attr.max_sge;
56 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
58 if (rvt_alloc_rq(&srq->rq, srq->rq.size * sz,
69 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
71 srq
128 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); local
281 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); local
296 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); local
[all...]
H A DMakefile12 rc.o srq.o trace.o
/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_srq.c65 struct pvrdma_srq *srq = to_vsrq(ibsrq); local
74 cmd->srq_handle = srq->srq_handle;
102 struct pvrdma_srq *srq = to_vsrq(ibsrq); local
137 spin_lock_init(&srq->lock);
138 refcount_set(&srq->refcnt, 1);
139 init_completion(&srq->free);
149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0);
150 if (IS_ERR(srq->umem)) {
151 ret = PTR_ERR(srq->umem);
155 srq
216 pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) argument
243 pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx4/
H A Dsrq.c36 #include <linux/mlx4/srq.h>
46 struct mlx4_srq *srq; local
49 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
51 if (srq)
52 refcount_inc(&srq->refcount);
58 srq->event(srq, event_type);
60 if (refcount_dec_and_test(&srq->refcount))
61 complete(&srq->free);
163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
162 mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) argument
222 mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) argument
243 mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) argument
249 mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) argument
295 struct mlx4_srq *srq; local
[all...]
H A DMakefile6 srq.o resource_tracker.o crdump.o
/linux-master/drivers/infiniband/hw/cxgb4/
H A Dcq.c462 static void post_pending_srq_wrs(struct t4_srq *srq) argument
467 while (srq->pending_in_use) {
468 pwr = &srq->pending_wrs[srq->pending_cidx];
469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
470 srq->sw_rq[srq->pidx].valid = 1;
474 srq->cidx, srq
491 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq) argument
544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) argument
754 __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) argument
923 struct c4iw_srq *srq = NULL; local
[all...]
H A Dt4.h425 static inline u32 t4_srq_avail(struct t4_srq *srq) argument
427 return srq->size - 1 - srq->in_use;
430 static inline void t4_srq_produce(struct t4_srq *srq, u8 len16) argument
432 srq->in_use++;
433 if (++srq->pidx == srq->size)
434 srq->pidx = 0;
435 srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
436 if (srq
441 t4_srq_produce_pending_wr(struct t4_srq *srq) argument
449 t4_srq_consume_pending_wr(struct t4_srq *srq) argument
457 t4_srq_produce_ooo(struct t4_srq *srq) argument
463 t4_srq_consume_ooo(struct t4_srq *srq) argument
472 t4_srq_consume(struct t4_srq *srq) argument
582 t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16, union t4_recv_wr *wqe) argument
[all...]
H A Dqp.c1341 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe, argument
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1348 srq->in_use, srq->ooo_count,
1349 (unsigned long long)wr_id, srq->pending_cidx,
1350 srq
1361 struct c4iw_srq *srq; local
2416 c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq) argument
2430 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); local
2473 free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) argument
2510 alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, struct c4iw_wr_wait *wr_waitp) argument
2645 c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) argument
2667 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); local
2799 struct c4iw_srq *srq; local
[all...]
/linux-master/drivers/infiniband/core/
H A Duverbs_std_types_srq.c14 struct ib_srq *srq = uobject->object; local
17 enum ib_srq_type srq_type = srq->srq_type;
20 ret = ib_destroy_srq_user(srq, &attrs->driver_udata);
46 struct ib_srq *srq; local
107 srq = ib_create_srq_user(pd, &attr, obj, &attrs->driver_udata);
108 if (IS_ERR(srq)) {
109 ret = PTR_ERR(srq);
113 obj->uevent.uobject.object = srq;
131 &srq->ext.xrc.srq_num,
132 sizeof(srq
[all...]
/linux-master/drivers/infiniband/sw/siw/
H A Dsiw_verbs.c353 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
400 if (attrs->srq) {
406 qp->srq = to_siw_srq(attrs->srq);
533 qp_init_attr->srq = base_qp->srq;
1003 if (qp->srq || qp->attrs.rq_size == 0) {
1578 struct siw_srq *srq = to_siw_srq(base_srq); local
1599 srq->max_sge = attrs->max_sge;
1600 srq
1668 struct siw_srq *srq = to_siw_srq(base_srq); local
1704 struct siw_srq *srq = to_siw_srq(base_srq); local
1728 struct siw_srq *srq = to_siw_srq(base_srq); local
1756 struct siw_srq *srq = to_siw_srq(base_srq); local
1846 siw_srq_event(struct siw_srq *srq, enum ib_event_type etype) argument
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_fp.c82 if (!qp->srq) {
129 if (!qp->srq) {
349 struct bnxt_qplib_srq *srq; local
356 srq = (struct bnxt_qplib_srq *)q_handle;
357 bnxt_qplib_armen_db(&srq->dbinfo,
602 struct bnxt_qplib_srq *srq)
615 req.srq_cid = cpu_to_le32(srq->id);
619 kfree(srq->swq);
622 bnxt_qplib_free_hwq(res, &srq->hwq);
626 struct bnxt_qplib_srq *srq)
601 bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) argument
625 bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) argument
702 bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) argument
720 bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) argument
755 bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_swqe *wqe) argument
2571 bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag) argument
2587 struct bnxt_qplib_srq *srq; local
2667 struct bnxt_qplib_srq *srq; local
2767 struct bnxt_qplib_srq *srq; local
[all...]
/linux-master/drivers/net/ethernet/chelsio/cxgb4/
H A DMakefile9 cxgb4_uld.o srq.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
/linux-master/drivers/net/
H A Deql.c264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
416 slaving_request_t srq; local
418 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
421 slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
436 s->priority = srq.priority;
437 s->priority_bps = srq.priority;
438 s->priority_Bps = srq.priority / 8;
458 slaving_request_t srq; local
461 if (copy_from_user(&srq, srq
[all...]
/linux-master/drivers/infiniband/hw/ocrdma/
H A Docrdma_verbs.c1118 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1173 int dpp_credit_lmt, int srq)
1190 if (!srq) {
1217 if (!srq) {
1342 (attrs->srq != NULL));
1543 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) argument
1548 srq->idx_bit_fields[i] ^= mask;
1609 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1626 if (qp->srq) {
1629 qp->srq
1171 ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, struct ib_udata *udata, int dpp_offset, int dpp_credit_lmt, int srq) argument
1731 ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, struct ib_udata *udata) argument
1770 struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq); local
1835 struct ocrdma_srq *srq; local
1847 struct ocrdma_srq *srq; local
1855 struct ocrdma_srq *srq; local
2262 ocrdma_srq_get_idx(struct ocrdma_srq *srq) argument
2281 ocrdma_ring_srq_db(struct ocrdma_srq *srq) argument
2293 struct ocrdma_srq *srq; local
2629 struct ocrdma_srq *srq; local
[all...]

Completed in 401 milliseconds

12345