Lines Matching refs:cq

36 #include <dev/mlx4/cq.h>
42 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
58 struct mlx4_en_cq *cq;
61 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
62 if (!cq) {
63 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
64 if (!cq) {
70 cq->size = entries;
71 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
73 cq->tq = taskqueue_create_fast("mlx4_en_que", M_NOWAIT,
74 taskqueue_thread_enqueue, &cq->tq);
76 TASK_INIT(&cq->cq_task, 0, mlx4_en_rx_que, cq);
77 taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s rx cq",
81 TASK_INIT(&cq->cq_task, 0, mlx4_en_tx_que, cq);
82 taskqueue_start_threads(&cq->tq, 1, PI_NET, "%s tx cq",
86 cq->ring = ring;
87 cq->is_tx = mode;
88 cq->vector = mdev->dev->caps.num_comp_vectors;
89 spin_lock_init(&cq->lock);
91 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
92 cq->buf_size, 2 * PAGE_SIZE);
96 err = mlx4_en_map_buffer(&cq->wqres.buf);
100 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
101 *pcq = cq;
106 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
108 kfree(cq);
113 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
121 cq->dev = mdev->pndev[priv->port];
122 cq->mcq.set_ci_db = cq->wqres.db.db;
123 cq->mcq.arm_db = cq->wqres.db.db + 1;
124 *cq->mcq.set_ci_db = 0;
125 *cq->mcq.arm_db = 0;
126 memset(cq->buf, 0, cq->buf_size);
128 if (cq->is_tx == RX) {
130 cq->vector)) {
131 cq->vector = cq_idx % mdev->dev->caps.num_comp_vectors;
134 &cq->vector);
137 cq->vector);
151 cq->vector = rx_cq->vector;
154 if (!cq->is_tx)
155 cq->size = priv->rx_ring[cq->ring]->actual_size;
157 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
158 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
159 cq->vector, 0, timestamp_en);
163 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
164 cq->mcq.event = mlx4_en_cq_event;
166 if (cq->is_tx) {
167 init_timer(&cq->timer);
168 cq->timer.function = mlx4_en_poll_tx_cq;
169 cq->timer.data = (unsigned long) cq;
177 mlx4_release_eq(mdev->dev, cq->vector);
178 cq->vector = mdev->dev->caps.num_comp_vectors;
185 struct mlx4_en_cq *cq = *pcq;
187 taskqueue_drain(cq->tq, &cq->cq_task);
188 taskqueue_free(cq->tq);
189 mlx4_en_unmap_buffer(&cq->wqres.buf);
190 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
191 if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
192 cq->is_tx == RX)
193 mlx4_release_eq(priv->mdev->dev, cq->vector);
194 cq->vector = 0;
195 cq->buf_size = 0;
196 cq->buf = NULL;
197 kfree(cq);
201 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
203 taskqueue_drain(cq->tq, &cq->cq_task);
204 if (!cq->is_tx) {
207 del_timer_sync(&cq->timer);
210 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
213 /* Set rx cq moderation parameters */
214 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
216 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
217 cq->moder_cnt, cq->moder_time);
220 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
222 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,