Lines Matching refs:cq

42 	struct ib_cq *cq = dim->priv;
49 trace_cq_modify(cq, comps, usec);
50 cq->device->ops.modify_cq(cq, comps, usec);
53 static void rdma_dim_init(struct ib_cq *cq)
57 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
58 cq->poll_ctx == IB_POLL_DIRECT)
68 dim->priv = cq;
69 cq->dim = dim;
74 static void rdma_dim_destroy(struct ib_cq *cq)
76 if (!cq->dim)
79 cancel_work_sync(&cq->dim->work);
80 kfree(cq->dim);
83 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
87 rc = ib_poll_cq(cq, num_entries, wc);
88 trace_cq_poll(cq, num_entries, rc);
92 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
97 trace_cq_process(cq);
104 while ((n = __poll_cq(cq, min_t(u32, batch,
110 wc->wr_cqe->done(cq, wc);
126 * @cq: CQ to process
138 int ib_process_cq_direct(struct ib_cq *cq, int budget)
142 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
146 static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
148 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
153 struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
154 struct dim *dim = cq->dim;
157 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
159 irq_poll_complete(&cq->iop);
160 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
161 trace_cq_reschedule(cq);
162 irq_poll_sched(&cq->iop);
172 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
174 trace_cq_schedule(cq);
175 irq_poll_sched(&cq->iop);
180 struct ib_cq *cq = container_of(work, struct ib_cq, work);
183 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
186 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
187 queue_work(cq->comp_wq, &cq->work);
188 else if (cq->dim)
189 rdma_dim(cq->dim, completed);
192 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
194 trace_cq_schedule(cq);
195 queue_work(cq->comp_wq, &cq->work);
201 * @private: driver private data, accessible from cq->cq_context
220 struct ib_cq *cq;
223 cq = rdma_zalloc_drv_obj(dev, ib_cq);
224 if (!cq)
227 cq->device = dev;
228 cq->cq_context = private;
229 cq->poll_ctx = poll_ctx;
230 atomic_set(&cq->usecnt, 0);
231 cq->comp_vector = comp_vector;
233 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
234 if (!cq->wc)
237 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
238 rdma_restrack_set_name(&cq->res, caller);
240 ret = dev->ops.create_cq(cq, &cq_attr, NULL);
244 rdma_dim_init(cq);
246 switch (cq->poll_ctx) {
248 cq->comp_handler = ib_cq_completion_direct;
251 cq->comp_handler = ib_cq_completion_softirq;
253 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
254 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
258 cq->comp_handler = ib_cq_completion_workqueue;
259 INIT_WORK(&cq->work, ib_cq_poll_work);
260 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
261 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
269 rdma_restrack_add(&cq->res);
270 trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
271 return cq;
274 rdma_dim_destroy(cq);
275 cq->device->ops.destroy_cq(cq, NULL);
277 rdma_restrack_put(&cq->res);
278 kfree(cq->wc);
280 kfree(cq);
289 * @private: driver private data, accessible from cq->cq_context
316 * @cq: completion queue to free.
318 void ib_free_cq(struct ib_cq *cq)
322 if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
324 if (WARN_ON_ONCE(cq->cqe_used))
327 switch (cq->poll_ctx) {
331 irq_poll_disable(&cq->iop);
335 cancel_work_sync(&cq->work);
341 rdma_dim_destroy(cq);
342 trace_cq_free(cq);
343 ret = cq->device->ops.destroy_cq(cq, NULL);
345 rdma_restrack_del(&cq->res);
346 kfree(cq->wc);
347 kfree(cq);
353 struct ib_cq *cq, *n;
357 list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
359 WARN_ON(cq->cqe_used);
360 list_del(&cq->pool_entry);
361 cq->shared = false;
362 ib_free_cq(cq);
372 struct ib_cq *cq, *n;
389 cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
390 if (IS_ERR(cq)) {
391 ret = PTR_ERR(cq);
394 cq->shared = true;
395 list_add_tail(&cq->pool_entry, &tmp_list);
405 list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
406 cq->shared = false;
407 ib_free_cq(cq);
420 * @poll_ctx: cq polling context
422 * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
423 * claim entries in it for us. In case there is no available cq, allocate
424 * a new cq with the requirements and add it to the device pool.
434 struct ib_cq *cq, *found = NULL;
458 list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
464 if (vector != cq->comp_vector)
466 if (cq->cqe_used + nr_cqe > cq->cqe)
468 found = cq;
495 * @cq: The CQ to return.
498 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
500 if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
503 spin_lock_irq(&cq->device->cq_pools_lock);
504 cq->cqe_used -= nr_cqe;
505 spin_unlock_irq(&cq->device->cq_pools_lock);