• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/mlx4/

Lines Matching refs:srq

35 #include <linux/mlx4/srq.h>
41 static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
46 static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
53 event.element.srq = ibsrq;
63 "on SRQ %06x\n", type, srq->srqn);
76 struct mlx4_ib_srq *srq;
89 srq = kmalloc(sizeof *srq, GFP_KERNEL);
90 if (!srq)
93 mutex_init(&srq->mutex);
94 spin_lock_init(&srq->lock);
95 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
96 srq->msrq.max_gs = init_attr->attr.max_sge;
100 srq->msrq.max_gs *
102 srq->msrq.wqe_shift = ilog2(desc_size);
104 buf_size = srq->msrq.max * desc_size;
114 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
116 if (IS_ERR(srq->umem)) {
117 err = PTR_ERR(srq->umem);
121 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
122 ilog2(srq->umem->page_size), &srq->mtt);
126 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
131 ucmd.db_addr, &srq->db);
135 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
139 *srq->db.db = 0;
141 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
146 srq->head = 0;
147 srq->tail = srq->msrq.max - 1;
148 srq->wqe_ctr = 0;
150 for (i = 0; i < srq->msrq.max; ++i) {
151 next = get_wqe(srq, i);
153 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
161 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
162 &srq->mtt);
166 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
170 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
171 if (!srq->wrid) {
177 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
178 srq->db.dma, &srq->msrq);
182 srq->msrq.event = mlx4_ib_srq_event;
185 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
190 init_attr->attr.max_wr = srq->msrq.max - 1;
192 return &srq->ibsrq;
196 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
198 kfree(srq->wrid);
201 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
205 ib_umem_release(srq->umem);
207 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
211 mlx4_db_free(dev->dev, &srq->db);
214 kfree(srq);
223 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
231 if (attr->srq_limit >= srq->msrq.max)
234 mutex_lock(&srq->mutex);
235 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
236 mutex_unlock(&srq->mutex);
248 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
252 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
257 srq_attr->max_wr = srq->msrq.max - 1;
258 srq_attr->max_sge = srq->msrq.max_gs;
263 int mlx4_ib_destroy_srq(struct ib_srq *srq)
265 struct mlx4_ib_dev *dev = to_mdev(srq->device);
266 struct mlx4_ib_srq *msrq = to_msrq(srq);
271 if (srq->uobject) {
272 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
286 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
291 spin_lock(&srq->lock);
293 next = get_wqe(srq, srq->tail);
295 srq->tail = wqe_index;
297 spin_unlock(&srq->lock);
303 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
311 spin_lock_irqsave(&srq->lock, flags);
314 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
320 if (unlikely(srq->head == srq->tail)) {
326 srq->wrid[srq->head] = wr->wr_id;
328 next = get_wqe(srq, srq->head);
329 srq->head = be16_to_cpu(next->next_wqe_index);
338 if (i < srq->msrq.max_gs) {
346 srq->wqe_ctr += nreq;
354 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
357 spin_unlock_irqrestore(&srq->lock, flags);