Searched refs:srq (Results 1 - 25 of 48) sorted by relevance

12

/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/mthca/
H A Dmthca_srq.c72 static void *get_wqe(struct mthca_srq *srq, int n) argument
74 if (srq->is_direct)
75 return srq->queue.direct.buf + (n << srq->wqe_shift);
77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
97 struct mthca_srq *srq,
102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
104 context->lkey = cpu_to_be32(srq
95 mthca_tavor_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_tavor_srq_context *context) argument
113 mthca_arbel_init_srq_context(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq, struct mthca_arbel_srq_context *context) argument
136 mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) argument
143 mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_srq *srq) argument
195 mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_srq_attr *attr, struct mthca_srq *srq) argument
324 get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) argument
335 mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) argument
376 struct mthca_srq *srq = to_msrq(ibsrq); local
405 struct mthca_srq *srq = to_msrq(ibsrq); local
440 struct mthca_srq *srq; local
472 mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) argument
494 struct mthca_srq *srq = to_msrq(ibsrq); local
594 struct mthca_srq *srq = to_msrq(ibsrq); local
[all...]
H A Dmthca_dev.h249 struct mthca_array srq; member in struct:mthca_srq_table
507 struct mthca_srq *srq);
513 struct ib_srq_attr *attr, struct mthca_srq *srq);
514 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
517 int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
521 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
522 int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
524 int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
H A Dmthca_provider.c474 struct mthca_srq *srq; local
477 srq = kmalloc(sizeof *srq, GFP_KERNEL);
478 if (!srq)
496 srq->mr.ibmr.lkey = ucmd.lkey;
497 srq->db_index = ucmd.db_index;
501 &init_attr->attr, srq);
510 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
511 mthca_free_srq(to_mdev(pd->device), srq);
516 return &srq
524 mthca_destroy_srq(struct ib_srq *srq) argument
[all...]
H A Dmthca_cq.c279 struct mthca_srq *srq)
311 if (srq && is_recv_cqe(cqe))
312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
544 } else if ((*cur_qp)->ibqp.srq) {
545 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); local
548 wqe_index = wqe >> srq->wqe_shift;
549 entry->wr_id = srq->wrid[wqe_index];
550 mthca_free_srq_wqe(srq, wqe);
278 mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, struct mthca_srq *srq) argument
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/mlx4/
H A Dsrq.c35 #include <linux/mlx4/srq.h>
41 static void *get_wqe(struct mlx4_ib_srq *srq, int n) argument
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) argument
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
53 event.element.srq = ibsrq;
63 "on SRQ %06x\n", type, srq->srqn);
76 struct mlx4_ib_srq *srq; local
89 srq
223 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
248 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
263 mlx4_ib_destroy_srq(struct ib_srq *srq) argument
286 mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) argument
303 struct mlx4_ib_srq *srq = to_msrq(ibsrq); local
[all...]
H A DMakefile3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o
H A Dmlx4_ib.h270 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
271 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
282 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
283 int mlx4_ib_destroy_srq(struct ib_srq *srq);
284 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
H A Dcq.c547 struct mlx4_ib_srq *srq; local
620 } else if ((*cur_qp)->ibqp.srq) {
621 srq = to_msrq((*cur_qp)->ibqp.srq);
623 wc->wr_id = srq->wrid[wqe_ctr];
624 mlx4_ib_free_srq_wqe(srq, wqe_ctr);
761 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) argument
786 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
787 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
809 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) argument
[all...]
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/ipath/
H A Dipath_srq.c51 struct ipath_srq *srq = to_isrq(ibsrq); local
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
70 if (next >= srq->rq.size)
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
106 struct ipath_srq *srq; local
121 srq
214 struct ipath_srq *srq = to_isrq(ibsrq); local
348 struct ipath_srq *srq = to_isrq(ibsrq); local
362 struct ipath_srq *srq = to_isrq(ibsrq); local
[all...]
H A Dipath_ruc.c162 struct ipath_srq *srq; local
168 if (qp->ibqp.srq) {
169 srq = to_isrq(qp->ibqp.srq);
170 handler = srq->ibsrq.event_handler;
171 rq = &srq->rq;
173 srq = NULL;
222 if (n < srq->limit) {
225 srq->limit = 0;
228 ev.element.srq
[all...]
H A Dipath_ud.c57 struct ipath_srq *srq; local
106 if (qp->ibqp.srq) {
107 srq = to_isrq(qp->ibqp.srq);
108 handler = srq->ibsrq.event_handler;
109 rq = &srq->rq;
111 srq = NULL;
163 if (n < srq->limit) {
166 srq->limit = 0;
169 ev.element.srq
[all...]
H A Dipath_qp.c636 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
660 init_attr->srq = qp->ibqp.srq;
681 if (qp->ibqp.srq) {
760 if (!init_attr->srq) {
791 if (init_attr->srq) {
792 struct ipath_srq *srq = to_isrq(init_attr->srq); local
794 if (srq->rq.max_sge > 1)
796 (srq
[all...]
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/qib/
H A Dqib_srq.c51 struct qib_srq *srq = to_isrq(ibsrq); local
61 if ((unsigned) wr->num_sge > srq->rq.max_sge) {
67 spin_lock_irqsave(&srq->rq.lock, flags);
68 wq = srq->rq.wq;
70 if (next >= srq->rq.size)
73 spin_unlock_irqrestore(&srq->rq.lock, flags);
79 wqe = get_rwqe_ptr(&srq->rq, wq->head);
87 spin_unlock_irqrestore(&srq->rq.lock, flags);
106 struct qib_srq *srq; local
118 srq
210 struct qib_srq *srq = to_isrq(ibsrq); local
348 struct qib_srq *srq = to_isrq(ibsrq); local
362 struct qib_srq *srq = to_isrq(ibsrq); local
[all...]
H A Dqib_ruc.c90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
143 struct qib_srq *srq; local
149 if (qp->ibqp.srq) {
150 srq = to_isrq(qp->ibqp.srq);
151 handler = srq->ibsrq.event_handler;
152 rq = &srq->rq;
154 srq = NULL;
207 if (n < srq
[all...]
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/mlx4/
H A Dsrq.c63 struct mlx4_srq *srq; local
67 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
68 if (srq)
69 atomic_inc(&srq->refcount);
73 if (!srq) {
78 srq->event(srq, event_type);
80 if (atomic_dec_and_test(&srq->refcount))
81 complete(&srq->free);
113 u64 db_rec, struct mlx4_srq *srq)
112 mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) argument
187 mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) argument
209 mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) argument
215 mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) argument
[all...]
H A DMakefile4 mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/core/
H A Dverbs.c224 struct ib_srq *srq; local
229 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
231 if (!IS_ERR(srq)) {
232 srq->device = pd->device;
233 srq->pd = pd;
234 srq->uobject = NULL;
235 srq->event_handler = srq_init_attr->event_handler;
236 srq->srq_context = srq_init_attr->srq_context;
238 atomic_set(&srq->usecnt, 0);
241 return srq;
245 ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask) argument
255 ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) argument
263 ib_destroy_srq(struct ib_srq *srq) argument
585 struct ib_srq *srq; local
[all...]
H A Duverbs_cmd.c253 static void put_srq_read(struct ib_srq *srq) argument
255 put_uobj_read(srq->uobject);
1055 struct ib_srq *srq; local
1077 srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1083 if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1092 attr.srq = srq;
1117 qp->srq = attr.srq;
1125 if (attr.srq)
1724 struct ib_srq *srq; local
1985 struct ib_srq *srq; local
2083 struct ib_srq *srq; local
2114 struct ib_srq *srq; local
2154 struct ib_srq *srq; local
[all...]
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/
H A Deql.c260 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
261 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
407 slaving_request_t srq; local
409 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
412 slave_dev = dev_get_by_name(&init_net, srq.slave_name);
429 s->priority = srq.priority;
430 s->priority_bps = srq.priority;
431 s->priority_Bps = srq.priority / 8;
454 slaving_request_t srq; local
457 if (copy_from_user(&srq, srq
[all...]
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/ehca/
H A Dehca_iverbs.h161 int ehca_post_srq_recv(struct ib_srq *srq,
169 int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
172 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
174 int ehca_destroy_srq(struct ib_srq *srq);
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/include/rdma/
H A Dib_cm.h130 unsigned int srq:1; member in struct:ib_cm_req_event_param
144 unsigned int srq:1; member in struct:ib_cm_rep_event_param
366 u8 srq; member in struct:ib_cm_req_param
389 u8 srq; member in struct:ib_cm_rep_param
H A Dib_user_cm.h149 __u8 srq; member in struct:ib_ucm_req
166 __u8 srq; member in struct:ib_ucm_rep
246 __u8 srq; member in struct:ib_ucm_req_event_resp
262 __u8 srq; member in struct:ib_ucm_rep_event_resp
H A Dib_verbs.h352 struct ib_srq *srq; member in union:ib_event::__anon18554
571 struct ib_srq *srq; member in struct:ib_qp_init_attr
883 struct ib_srq *srq; member in struct:ib_qp
1043 int (*modify_srq)(struct ib_srq *srq,
1047 int (*query_srq)(struct ib_srq *srq,
1049 int (*destroy_srq)(struct ib_srq *srq);
1050 int (*post_srq_recv)(struct ib_srq *srq,
1341 * @srq: The SRQ to modify.
1351 int ib_modify_srq(struct ib_srq *srq,
1358 * @srq
1377 ib_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr, struct ib_recv_wr **bad_recv_wr) argument
[all...]
H A Drdma_cm.h93 u8 srq; member in struct:rdma_conn_param
/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/ulp/ipoib/
H A Dipoib_cm.c101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
103 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
254 .srq = priv->cm.srq,
425 rep.srq = ipoib_cm_has_srq(dev);
1011 .srq = priv->cm.srq,
1055 req.srq = ipoib_cm_has_srq(dev);
1504 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1505 if (IS_ERR(priv->cm.srq)) {
[all...]

Completed in 219 milliseconds

12