• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/infiniband/hw/mthca/

Lines Matching refs:srq

72 static void *get_wqe(struct mthca_srq *srq, int n)
74 if (srq->is_direct)
75 return srq->queue.direct.buf + (n << srq->wqe_shift);
77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
97 struct mthca_srq *srq,
102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
115 struct mthca_srq *srq,
122 max = srq->max;
124 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
125 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
126 context->db_index = cpu_to_be32(srq->db_index);
127 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
136 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
138 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
139 srq->is_direct, &srq->mr);
140 kfree(srq->wrid);
144 struct mthca_srq *srq)
154 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
155 if (!srq->wrid)
158 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
160 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
162 kfree(srq->wrid);
171 for (i = 0; i < srq->max; ++i) {
174 next = wqe = get_wqe(srq, i);
176 if (i < srq->max - 1) {
178 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
185 (void *) scatter < wqe + (1 << srq->wqe_shift);
190 srq->last = get_wqe(srq, srq->max - 1);
196 struct ib_srq_attr *attr, struct mthca_srq *srq)
208 srq->max = attr->max_wr;
209 srq->max_gs = attr->max_sge;
210 srq->counter = 0;
213 srq->max = roundup_pow_of_two(srq->max + 1);
215 srq->max = srq->max + 1;
219 srq->max_gs * sizeof (struct mthca_data_seg)));
224 srq->wqe_shift = ilog2(ds);
226 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
227 if (srq->srqn == -1)
231 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
236 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
237 srq->srqn, &srq->db);
238 if (srq->db_index < 0) {
251 err = mthca_alloc_srq_buf(dev, pd, srq);
255 spin_lock_init(&srq->lock);
256 srq->refcount = 1;
257 init_waitqueue_head(&srq->wait);
258 mutex_init(&srq->mutex);
261 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
263 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
265 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
279 if (mthca_array_set(&dev->srq_table.srq,
280 srq->srqn & (dev->limits.num_srqs - 1),
281 srq)) {
289 srq->first_free = 0;
290 srq->last_free = srq->max - 1;
292 attr->max_wr = srq->max - 1;
293 attr->max_sge = srq->max_gs;
298 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
306 mthca_free_srq_buf(dev, srq);
313 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
316 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
319 mthca_free(&dev->srq_table.alloc, srq->srqn);
324 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
329 c = srq->refcount;
335 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
347 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
354 mthca_array_clear(&dev->srq_table.srq,
355 srq->srqn & (dev->limits.num_srqs - 1));
356 --srq->refcount;
359 wait_event(srq->wait, !get_srq_refcount(dev, srq));
361 if (!srq->ibsrq.uobject) {
362 mthca_free_srq_buf(dev, srq);
364 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
367 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
368 mthca_free(&dev->srq_table.alloc, srq->srqn);
376 struct mthca_srq *srq = to_msrq(ibsrq);
385 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
389 mutex_lock(&srq->mutex);
390 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
391 mutex_unlock(&srq->mutex);
405 struct mthca_srq *srq = to_msrq(ibsrq);
416 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
428 srq_attr->max_wr = srq->max - 1;
429 srq_attr->max_sge = srq->max_gs;
440 struct mthca_srq *srq;
444 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
445 if (srq)
446 ++srq->refcount;
449 if (!srq) {
454 if (!srq->ibsrq.event_handler)
459 event.element.srq = &srq->ibsrq;
460 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
464 if (!--srq->refcount)
465 wake_up(&srq->wait);
472 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
477 ind = wqe_addr >> srq->wqe_shift;
479 spin_lock(&srq->lock);
481 last_free = get_wqe(srq, srq->last_free);
483 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
484 *wqe_to_link(get_wqe(srq, ind)) = -1;
485 srq->last_free = ind;
487 spin_unlock(&srq->lock);
494 struct mthca_srq *srq = to_msrq(ibsrq);
505 spin_lock_irqsave(&srq->lock, flags);
507 first_ind = srq->first_free;
510 ind = srq->first_free;
511 wqe = get_wqe(srq, ind);
515 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
521 prev_wqe = srq->last;
522 srq->last = wqe;
529 if (unlikely(wr->num_sge > srq->max_gs)) {
532 srq->last = prev_wqe;
541 if (i < srq->max_gs)
547 srq->wrid[ind] = wr->wr_id;
548 srq->first_free = next_ind;
560 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
564 first_ind = srq->first_free;
575 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
586 spin_unlock_irqrestore(&srq->lock, flags);
594 struct mthca_srq *srq = to_msrq(ibsrq);
603 spin_lock_irqsave(&srq->lock, flags);
606 ind = srq->first_free;
607 wqe = get_wqe(srq, ind);
611 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
622 if (unlikely(wr->num_sge > srq->max_gs)) {
633 if (i < srq->max_gs)
636 srq->wrid[ind] = wr->wr_id;
637 srq->first_free = next_ind;
641 srq->counter += nreq;
648 *srq->db = cpu_to_be32(srq->counter);
651 spin_unlock_irqrestore(&srq->lock, flags);
696 err = mthca_array_init(&dev->srq_table.srq,
709 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);