Lines Matching refs:mq
92 q = &cq->mq;
138 ce = c2_mq_consume(&cq->mq);
150 c2_mq_free(&cq->mq);
151 ce = c2_mq_consume(&cq->mq);
196 c2_mq_free(&cq->mq);
230 shared = cq->mq.peer;
250 ret = !c2_mq_empty(&cq->mq);
257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
263 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
269 &mq->host_dma, GFP_KERNEL);
273 c2_mq_rep_init(mq,
281 dma_unmap_addr_set(mq, mapping, mq->host_dma);
301 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
302 &cq->mq.shared_dma, GFP_KERNEL);
303 if (!cq->mq.shared)
307 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
321 wr.msg_size = cpu_to_be32(cq->mq.msg_size);
322 wr.depth = cpu_to_be32(cq->mq.q_size);
323 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
324 wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
349 cq->mq.index = be32_to_cpu(reply->mq_index);
352 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
353 if (!cq->mq.peer) {
369 cq->cqn = cq->mq.index;
379 c2_free_cq_buf(c2dev, &cq->mq);
381 c2_free_mqsp(cq->mq.shared);
397 c2dev->qptr_array[cq->mq.index] = NULL;
433 c2_free_cq_buf(c2dev, &cq->mq);