Lines Matching refs:srq

74 static void *get_wqe(struct mthca_srq *srq, int n)
76 if (srq->is_direct)
77 return srq->queue.direct.buf + (n << srq->wqe_shift);
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
99 struct mthca_srq *srq,
108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
110 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
120 struct mthca_srq *srq,
134 max = srq->max;
136 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
137 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
138 context->db_index = cpu_to_be32(srq->db_index);
139 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
147 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
149 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
150 srq->is_direct, &srq->mr);
151 kfree(srq->wrid);
155 struct mthca_srq *srq, struct ib_udata *udata)
165 srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
166 if (!srq->wrid)
169 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
171 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
173 kfree(srq->wrid);
182 for (i = 0; i < srq->max; ++i) {
185 next = wqe = get_wqe(srq, i);
187 if (i < srq->max - 1) {
189 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
196 (void *) scatter < wqe + (1 << srq->wqe_shift);
201 srq->last = get_wqe(srq, srq->max - 1);
207 struct ib_srq_attr *attr, struct mthca_srq *srq,
219 srq->max = attr->max_wr;
220 srq->max_gs = attr->max_sge;
221 srq->counter = 0;
224 srq->max = roundup_pow_of_two(srq->max + 1);
226 srq->max = srq->max + 1;
230 srq->max_gs * sizeof (struct mthca_data_seg)));
235 srq->wqe_shift = ilog2(ds);
237 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
238 if (srq->srqn == -1)
242 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
247 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
248 srq->srqn, &srq->db);
249 if (srq->db_index < 0) {
262 err = mthca_alloc_srq_buf(dev, pd, srq, udata);
266 spin_lock_init(&srq->lock);
267 srq->refcount = 1;
268 init_waitqueue_head(&srq->wait);
269 mutex_init(&srq->mutex);
272 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
274 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
276 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
284 if (mthca_array_set(&dev->srq_table.srq,
285 srq->srqn & (dev->limits.num_srqs - 1),
286 srq)) {
294 srq->first_free = 0;
295 srq->last_free = srq->max - 1;
297 attr->max_wr = srq->max - 1;
298 attr->max_sge = srq->max_gs;
303 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
309 mthca_free_srq_buf(dev, srq);
316 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
319 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
322 mthca_free(&dev->srq_table.alloc, srq->srqn);
327 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
332 c = srq->refcount;
338 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
349 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
354 mthca_array_clear(&dev->srq_table.srq,
355 srq->srqn & (dev->limits.num_srqs - 1));
356 --srq->refcount;
359 wait_event(srq->wait, !get_srq_refcount(dev, srq));
361 if (!srq->ibsrq.uobject) {
362 mthca_free_srq_buf(dev, srq);
364 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
367 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
368 mthca_free(&dev->srq_table.alloc, srq->srqn);
376 struct mthca_srq *srq = to_msrq(ibsrq);
384 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
388 mutex_lock(&srq->mutex);
389 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
390 mutex_unlock(&srq->mutex);
399 struct mthca_srq *srq = to_msrq(ibsrq);
409 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
421 srq_attr->max_wr = srq->max - 1;
422 srq_attr->max_sge = srq->max_gs;
433 struct mthca_srq *srq;
437 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
438 if (srq)
439 ++srq->refcount;
442 if (!srq) {
447 if (!srq->ibsrq.event_handler)
452 event.element.srq = &srq->ibsrq;
453 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
457 if (!--srq->refcount)
458 wake_up(&srq->wait);
465 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
470 ind = wqe_addr >> srq->wqe_shift;
472 spin_lock(&srq->lock);
474 last_free = get_wqe(srq, srq->last_free);
476 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
477 *wqe_to_link(get_wqe(srq, ind)) = -1;
478 srq->last_free = ind;
480 spin_unlock(&srq->lock);
487 struct mthca_srq *srq = to_msrq(ibsrq);
498 spin_lock_irqsave(&srq->lock, flags);
500 first_ind = srq->first_free;
503 ind = srq->first_free;
504 wqe = get_wqe(srq, ind);
508 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
514 prev_wqe = srq->last;
515 srq->last = wqe;
522 if (unlikely(wr->num_sge > srq->max_gs)) {
525 srq->last = prev_wqe;
534 if (i < srq->max_gs)
540 srq->wrid[ind] = wr->wr_id;
541 srq->first_free = next_ind;
553 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
557 first_ind = srq->first_free;
568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
573 spin_unlock_irqrestore(&srq->lock, flags);
581 struct mthca_srq *srq = to_msrq(ibsrq);
590 spin_lock_irqsave(&srq->lock, flags);
593 ind = srq->first_free;
594 wqe = get_wqe(srq, ind);
598 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
609 if (unlikely(wr->num_sge > srq->max_gs)) {
620 if (i < srq->max_gs)
623 srq->wrid[ind] = wr->wr_id;
624 srq->first_free = next_ind;
628 srq->counter += nreq;
635 *srq->db = cpu_to_be32(srq->counter);
638 spin_unlock_irqrestore(&srq->lock, flags);
683 err = mthca_array_init(&dev->srq_table.srq,
696 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);