Lines Matching refs:rq

62 #define RQ_MASKED_IDX(rq, idx)  ((idx) & (rq)->wq->mask)
155 struct hinic_rq *rq, u16 global_qid)
162 wq = rq->wq;
181 HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
202 rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
203 rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
242 * alloc_rq_skb_arr - allocate rq array for saved skb
243 * @rq: HW Receive Queue
247 static int alloc_rq_skb_arr(struct hinic_rq *rq)
249 struct hinic_wq *wq = rq->wq;
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
253 rq->saved_skb = vzalloc(skb_arr_size);
254 if (!rq->saved_skb)
261 * free_rq_skb_arr - free rq array for saved skb
262 * @rq: HW Receive Queue
264 static void free_rq_skb_arr(struct hinic_rq *rq)
266 vfree(rq->saved_skb);
311 * alloc_rq_cqe - allocate rq completion queue elements
312 * @rq: HW Receive Queue
316 static int alloc_rq_cqe(struct hinic_rq *rq)
318 struct hinic_hwif *hwif = rq->hwif;
321 struct hinic_wq *wq = rq->wq;
324 cqe_size = wq->q_depth * sizeof(*rq->cqe);
325 rq->cqe = vzalloc(cqe_size);
326 if (!rq->cqe)
329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
330 rq->cqe_dma = vzalloc(cqe_dma_size);
331 if (!rq->cqe_dma)
335 rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
336 sizeof(*rq->cqe[i]),
337 &rq->cqe_dma[i], GFP_KERNEL);
338 if (!rq->cqe[i])
346 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
347 rq->cqe_dma[j]);
349 vfree(rq->cqe_dma);
352 vfree(rq->cqe);
357 * free_rq_cqe - free rq completion queue elements
358 * @rq: HW Receive Queue
360 static void free_rq_cqe(struct hinic_rq *rq)
362 struct hinic_hwif *hwif = rq->hwif;
364 struct hinic_wq *wq = rq->wq;
368 dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
369 rq->cqe_dma[i]);
371 vfree(rq->cqe_dma);
372 vfree(rq->cqe);
377 * @rq: HW Receive Queue
380 * @entry: msix entry for rq
384 int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
391 rq->hwif = hwif;
393 rq->wq = wq;
395 rq->irq = entry->vector;
396 rq->msix_entry = entry->entry;
398 rq->buf_sz = HINIC_RX_BUF_SZ;
400 err = alloc_rq_skb_arr(rq);
402 dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
406 err = alloc_rq_cqe(rq);
408 dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
413 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
414 rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
415 &rq->pi_dma_addr, GFP_KERNEL);
416 if (!rq->pi_virt_addr) {
424 free_rq_cqe(rq);
427 free_rq_skb_arr(rq);
433 * @rq: HW Receive Queue
435 void hinic_clean_rq(struct hinic_rq *rq)
437 struct hinic_hwif *hwif = rq->hwif;
441 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
442 dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
443 rq->pi_dma_addr);
445 free_rq_cqe(rq);
446 free_rq_skb_arr(rq);
464 * @rq: recv queue
468 int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
470 struct hinic_wq *wq = rq->wq;
790 * @rq: rq to get wqe from
796 struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
799 struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
809 * hinic_rq_write_wqe - write the wqe to the rq
810 * @rq: recv queue
815 void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
820 rq->saved_skb[prod_idx] = skb;
825 hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
830 * @rq: recv queue
837 struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
846 hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
850 cqe = rq->cqe[*cons_idx];
858 *skb = rq->saved_skb[*cons_idx];
865 * @rq: recv queue
872 struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
877 struct hinic_wq *wq = rq->wq;
884 *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
886 *skb = rq->saved_skb[*cons_idx];
895 * @rq: recv queue
899 void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
902 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
912 hinic_put_wqe(rq->wq, wqe_size);
917 * @rq: recv queue
922 void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
925 struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
935 * @rq: recv queue
940 void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
945 struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
947 dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
964 * hinic_rq_update - update pi of the rq
965 * @rq: recv queue
968 void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
970 *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));