Searched refs:wq (Results 1 - 25 of 109) sorted by relevance

12345

/freebsd-11-stable/sys/dev/mlx5/mlx5_core/
H A Dwq.h25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/wq.h 341958 2018-12-12 12:46:12Z hselasky $
84 void *wqc, struct mlx5_wq_cyc *wq,
86 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
89 void *cqc, struct mlx5_cqwq *wq,
91 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
94 void *wqc, struct mlx5_wq_ll *wq,
96 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
100 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) argument
102 return ctr & wq->sz_m1;
105 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u1 argument
118 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) argument
123 mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) argument
128 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) argument
133 mlx5_cqwq_pop(struct mlx5_cqwq *wq) argument
138 mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) argument
143 mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) argument
148 mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) argument
153 mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) argument
160 mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, __be16 *next_tail_next) argument
167 mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) argument
172 mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) argument
[all...]
H A Dmlx5_wq.c29 #include "wq.h"
32 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) argument
34 return (u32)wq->sz_m1 + 1;
37 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) argument
39 return wq->sz_m1 + 1;
42 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) argument
44 return (u32)wq->sz_m1 + 1;
47 static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) argument
49 return mlx5_wq_cyc_get_size(wq) << wq
52 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) argument
57 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) argument
62 mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
99 mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
137 mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) argument
[all...]
H A Dmlx5_srq.c60 static void set_wq(void *wq, struct mlx5_srq_attr *in) argument
62 MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
63 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
64 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
65 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
66 MLX5_SET(wq, w
86 get_wq(void *wq, struct mlx5_srq_attr *in) argument
150 void *wq; local
[all...]
/freebsd-11-stable/cddl/contrib/opensolaris/tools/ctf/cvt/
H A Dctfmerge.c286 finalize_phase_one(workqueue_t *wq) argument
302 for (startslot = -1, i = 0; i < wq->wq_nwipslots; i++) {
303 if (wq->wq_wip[i].wip_batchid == wq->wq_lastdonebatch + 1) {
311 for (i = startslot; i < startslot + wq->wq_nwipslots; i++) {
312 int slotnum = i % wq->wq_nwipslots;
313 wip_t *wipslot = &wq->wq_wip[slotnum];
322 fifo_add(wq->wq_donequeue, wipslot->wip_td);
323 wq->wq_wip[slotnum].wip_td = NULL;
327 wq
334 init_phase_two(workqueue_t *wq) argument
362 wip_save_work(workqueue_t *wq, wip_t *slot, int slotnum) argument
400 worker_runphase1(workqueue_t *wq) argument
447 worker_runphase2(workqueue_t *wq) argument
518 worker_thread(workqueue_t *wq) argument
552 workqueue_t *wq = arg; local
633 wq_init(workqueue_t *wq, int nfiles) argument
694 start_threads(workqueue_t *wq) argument
723 join_threads(workqueue_t *wq) argument
748 static workqueue_t wq; variable
[all...]
/freebsd-11-stable/contrib/ofed/libcxgb4/
H A Dcq.c42 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) argument
46 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
47 wq, cq, cq->sw_cidx, cq->sw_pidx);
53 V_CQE_QPID(wq->sq.qid));
59 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument
62 int in_use = wq->rq.in_use - count;
65 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
66 wq, cq, wq->rq.in_use, count);
68 insert_recv_cqe(wq, c
74 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument
98 struct t4_wq *wq = &qhp->wq; local
126 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument
163 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument
175 advance_oldest_read(struct t4_wq *wq) argument
284 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
300 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument
345 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
564 struct t4_wq *wq; local
[all...]
H A Dverbs.c330 qhp->wq.qid_mask = resp.qid_mask;
332 qhp->wq.sq.qid = resp.sqid;
333 qhp->wq.sq.size = resp.sq_size;
334 qhp->wq.sq.memsize = resp.sq_memsize;
335 qhp->wq.sq.flags = 0;
336 qhp->wq.rq.msn = 1;
337 qhp->wq.rq.qid = resp.rqid;
338 qhp->wq.rq.size = resp.rq_size;
339 qhp->wq.rq.memsize = resp.rq_memsize;
347 qhp->wq
[all...]
H A Dt4.h363 static inline int t4_rqes_posted(struct t4_wq *wq) argument
365 return wq->rq.in_use;
368 static inline int t4_rq_empty(struct t4_wq *wq) argument
370 return wq->rq.in_use == 0;
373 static inline int t4_rq_full(struct t4_wq *wq) argument
375 return wq->rq.in_use == (wq->rq.size - 1);
378 static inline u32 t4_rq_avail(struct t4_wq *wq) argument
380 return wq->rq.size - 1 - wq
383 t4_rq_produce(struct t4_wq *wq, u8 len16) argument
395 t4_rq_consume(struct t4_wq *wq) argument
406 t4_sq_empty(struct t4_wq *wq) argument
411 t4_sq_full(struct t4_wq *wq) argument
416 t4_sq_avail(struct t4_wq *wq) argument
421 t4_sq_onchip(struct t4_wq *wq) argument
426 t4_sq_produce(struct t4_wq *wq, u8 len16) argument
441 t4_sq_consume(struct t4_wq *wq) argument
476 t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_wr *wqe) argument
525 t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t4, u8 len16, union t4_recv_wr *wqe) argument
549 t4_wq_in_error(struct t4_wq *wq) argument
554 t4_set_wq_in_error(struct t4_wq *wq) argument
561 t4_wq_db_enabled(struct t4_wq *wq) argument
[all...]
H A Dqp.c45 static void copy_wr_to_sq(struct t4_wq *wq, union t4_wr *wqe, u8 len16) argument
52 dst = &wq->sq.queue->flits[wq->sq.wq_pidx *
54 if (t4_sq_onchip(wq)) {
67 end = (uintptr_t)&wq->sq.queue[wq->sq.size];
74 memcpy(wq->sq.queue, src + len, total - len);
77 if (t4_sq_onchip(wq))
81 static void copy_wr_to_rq(struct t4_wq *wq, union t4_recv_wr *wqe, u8 len16) argument
88 dst = &wq
[all...]
H A Ddev.c288 qhp->wq.sq.qid,
289 qhp->wq.error,
290 qhp->wq.flushed,
291 qhp->wq.qid_mask,
292 qhp->wq.sq.qid,
293 qhp->wq.sq.queue,
294 qhp->wq.sq.sw_sq,
295 qhp->wq.sq.cidx,
296 qhp->wq.sq.pidx,
297 qhp->wq
[all...]
/freebsd-11-stable/sys/dev/mlx5/mlx5_en/
H A Dmlx5_en_txrx.c35 cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
37 if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
H A Dmlx5_en_rx.c110 while (!mlx5_wq_ll_is_full(&rq->wq)) {
111 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
113 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
117 mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
123 mlx5_wq_ll_update_db_record(&rq->wq);
303 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
310 memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq
[all...]
/freebsd-11-stable/sys/dev/cxgbe/iw_cxgbe/
H A Dcq.c202 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) argument
206 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
213 V_CQE_QPID(wq->sq.qid));
219 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) argument
222 int in_use = wq->rq.in_use - count;
225 CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
226 __func__, wq, cq, wq->rq.in_use, count);
228 insert_recv_cqe(wq, c
234 insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, struct t4_swsqe *swcqe) argument
258 struct t4_wq *wq = &qhp->wq; local
287 flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) argument
324 create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, struct t4_cqe *read_cqe) argument
336 advance_oldest_read(struct t4_wq *wq) argument
438 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) argument
454 c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) argument
489 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
717 struct t4_wq *wq; local
[all...]
H A Dt4.h370 static inline int t4_rqes_posted(struct t4_wq *wq) argument
372 return wq->rq.in_use;
375 static inline int t4_rq_empty(struct t4_wq *wq) argument
377 return wq->rq.in_use == 0;
380 static inline int t4_rq_full(struct t4_wq *wq) argument
382 return wq->rq.in_use == (wq->rq.size - 1);
385 static inline u32 t4_rq_avail(struct t4_wq *wq) argument
387 return wq->rq.size - 1 - wq
390 t4_rq_produce(struct t4_wq *wq, u8 len16) argument
400 t4_rq_consume(struct t4_wq *wq) argument
408 t4_rq_host_wq_pidx(struct t4_wq *wq) argument
413 t4_rq_wq_size(struct t4_wq *wq) argument
423 t4_sq_empty(struct t4_wq *wq) argument
428 t4_sq_full(struct t4_wq *wq) argument
433 t4_sq_avail(struct t4_wq *wq) argument
438 t4_sq_produce(struct t4_wq *wq, u8 len16) argument
448 t4_sq_consume(struct t4_wq *wq) argument
458 t4_sq_host_wq_pidx(struct t4_wq *wq) argument
463 t4_sq_wq_size(struct t4_wq *wq) argument
485 t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe, u8 wc) argument
510 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe, u8 wc) argument
533 t4_wq_in_error(struct t4_wq *wq) argument
538 t4_set_wq_in_error(struct t4_wq *wq) argument
[all...]
H A Dqp.c103 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, argument
112 wq->rq.memsize, wq->rq.queue,
113 dma_unmap_addr(&wq->rq, mapping));
115 wq->sq.memsize, wq->sq.queue,
116 dma_unmap_addr(&wq->sq, mapping));
117 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
118 kfree(wq
125 create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct t4_cq *rcq, struct t4_cq *scq, struct c4iw_dev_ucontext *uctx) argument
[all...]
/freebsd-11-stable/sys/compat/linuxkpi/common/include/linux/
H A Dworkqueue.h65 #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx)
66 #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx)
129 #define queue_work(wq, work) \
130 linux_queue_work_on(WORK_CPU_UNBOUND, wq, work)
135 #define queue_delayed_work(wq, dwork, delay) \
136 linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay)
141 #define queue_work_on(cpu, wq, work) \
142 linux_queue_work_on(cpu, wq, wor
[all...]
H A Dwait.h125 #define init_wait_entry(wq, flags) \
126 linux_init_wait_entry(wq, flags)
234 __add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) argument
236 list_add(&wq->task_list, &wqh->task_list);
240 add_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) argument
244 __add_wait_queue(wqh, wq);
249 __add_wait_queue_tail(wait_queue_head_t *wqh, wait_queue_t *wq) argument
251 list_add_tail(&wq->task_list, &wqh->task_list);
255 __add_wait_queue_entry_tail(wait_queue_head_t *wqh, wait_queue_entry_t *wq) argument
257 list_add_tail(&wq
261 __remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) argument
267 remove_wait_queue(wait_queue_head_t *wqh, wait_queue_t *wq) argument
[all...]
/freebsd-11-stable/sys/compat/linuxkpi/common/src/
H A Dlinux_work.c93 struct workqueue_struct *wq; local
97 wq = work->work_queue;
98 if (unlikely(wq == NULL))
101 WQ_EXEC_LOCK(wq);
102 TAILQ_FOREACH(exec, &wq->exec_head, entry) {
109 WQ_EXEC_UNLOCK(wq);
129 linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq, argument
140 if (atomic_read(&wq->draining) != 0)
150 work->work_queue = wq;
151 taskqueue_enqueue(wq
165 linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned delay) argument
221 struct workqueue_struct *wq; local
554 struct workqueue_struct *wq; local
574 linux_destroy_workqueue(struct workqueue_struct *wq) argument
[all...]
H A Dlinux_schedule.c171 autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, argument
177 task = wq->private;
179 list_del_init(&wq->task_list);
184 default_wake_function(wait_queue_t *wq, unsigned int state, int flags, argument
187 return (wake_up_task(wq->private, state));
191 linux_init_wait_entry(wait_queue_t *wq, int flags) argument
194 memset(wq, 0, sizeof(*wq));
195 wq->flags = flags;
196 wq
222 linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) argument
233 linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) argument
257 linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, unsigned int state, spinlock_t *lock) argument
[all...]
/freebsd-11-stable/sys/dev/cxgb/ulp/iw_cxgb/
H A Diw_cxgb_hal.c295 struct t3_wq *wq, struct cxio_ucontext *uctx)
297 int depth = 1UL << wq->size_log2;
298 int rqsize = 1UL << wq->rq_size_log2;
300 wq->qpid = get_qpid(rdev_p, uctx);
301 if (!wq->qpid)
304 wq->rq = malloc(depth * sizeof(struct t3_swrq), M_DEVBUF, M_NOWAIT|M_ZERO);
305 if (!wq->rq)
308 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
309 if (!wq->rq_addr)
312 wq
294 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) argument
362 cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, struct cxio_ucontext *uctx) argument
383 insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) argument
402 cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) argument
421 insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, struct t3_swsq *sqp) argument
443 cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) argument
482 cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) argument
501 cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument
519 cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) argument
1077 flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) argument
1107 create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, struct t3_cqe *read_cqe) argument
1122 advance_oldest_read(struct t3_wq *wq) argument
1154 cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit) argument
[all...]
H A Diw_cxgb_qp.c282 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
283 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
284 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
285 qhp->wq.rq_size_log2)].pbl_addr = 0;
346 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
347 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
348 qhp->wq
[all...]
H A Diw_cxgb_cq.c93 struct t3_wq *wq; local
106 wq = NULL;
109 wq = &(qhp->wq);
111 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
225 if (wq)
/freebsd-11-stable/sys/dev/oce/
H A Doce_queue.c49 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50 static void oce_wq_free(struct oce_wq *wq);
51 static void oce_wq_del(struct oce_wq *wq);
90 struct oce_wq *wq; local
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
98 if (!sc->wq[i])
132 for_all_wq_queues(sc, wq, i) {
133 rc = oce_wq_create(wq, sc->eq[i]);
136 wq
169 struct oce_wq *wq; local
212 struct oce_wq *wq; local
287 oce_wq_free(struct oce_wq *wq) argument
325 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq) argument
372 oce_wq_del(struct oce_wq *wq) argument
985 oce_start_wq(struct oce_wq *wq) argument
1087 oce_drain_wq_cq(struct oce_wq *wq) argument
[all...]
H A Doce_if.c157 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
158 static void oce_process_tx_completion(struct oce_wq *wq);
160 struct oce_wq *wq);
655 struct oce_wq *wq = NULL; local
665 wq = sc->wq[queue_index];
667 LOCK(&wq->tx_lock);
668 status = oce_multiq_transmit(ifp, m, wq);
669 UNLOCK(&wq->tx_lock);
684 while ((m = buf_ring_dequeue_sc(sc->wq[
1061 struct oce_wq *wq = sc->wq[wq_index]; local
1257 oce_process_tx_completion(struct oce_wq *wq) argument
1285 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq) argument
1373 struct oce_wq *wq = arg; local
1438 struct oce_wq *wq = (struct oce_wq *)arg; local
1475 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) argument
2382 struct oce_wq *wq; local
2545 struct oce_wq *wq; local
2589 struct oce_wq *wq; local
2637 struct oce_wq *wq; local
[all...]
/freebsd-11-stable/sys/dev/mlx5/mlx5_fpga/
H A Dconn.h42 #include <dev/mlx5/mlx5_core/wq.h>
57 struct mlx5_cqwq wq; member in struct:mlx5_fpga_conn::__anon4926
67 struct mlx5_wq_qp wq; member in struct:mlx5_fpga_conn::__anon4927
/freebsd-11-stable/sys/dev/mlx5/mlx5_ib/
H A Dmlx5_ib_cq.c97 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) argument
99 switch (wq->wr_data[idx]) {
116 struct mlx5_ib_wq *wq, int idx)
152 wc->opcode = get_umr_comp(wq, idx);
168 struct mlx5_ib_wq *wq; local
192 wq = &qp->rq;
193 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
194 ++wq
115 handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_wq *wq, int idx) argument
437 struct mlx5_ib_wq *wq; local
467 struct mlx5_ib_wq *wq; local
519 struct mlx5_ib_wq *wq; local
[all...]

Completed in 209 milliseconds

12345