Lines Matching refs:wq

34 #define WQ_SIZE(wq)                     ((wq)->q_depth * (wq)->wqebb_size)
44 #define WQ_BASE_VADDR(wqs, wq) \
45 ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
46 + (wq)->block_idx * WQ_BLOCK_SIZE)
48 #define WQ_BASE_PADDR(wqs, wq) \
49 ((wqs)->page_paddr[(wq)->page_idx] \
50 + (wq)->block_idx * WQ_BLOCK_SIZE)
52 #define WQ_BASE_ADDR(wqs, wq) \
53 ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
54 + (wq)->block_idx * WQ_BLOCK_SIZE)
56 #define CMDQ_BASE_VADDR(cmdq_pages, wq) \
58 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
60 #define CMDQ_BASE_PADDR(cmdq_pages, wq) \
62 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
64 #define CMDQ_BASE_ADDR(cmdq_pages, wq) \
66 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
68 #define WQ_PAGE_ADDR(wq, idx) \
69 ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
71 #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
77 #define WQE_SHADOW_PAGE(wq, wqe) \
78 (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
79 / (wq)->max_wqe_size)
81 static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
83 return (((idx) & ((wq)->num_wqebbs_per_page - 1))
84 << (wq)->wqebb_size_shift);
87 static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
89 return (((idx) >> ((wq)->wqebbs_per_page_shift))
90 & ((wq)->num_q_pages - 1));
328 dev_err(&pdev->dev, "Failed wq page allocation\n");
372 * @wq: WQ to allocate shadows for
376 static int alloc_wqes_shadow(struct hinic_wq *wq)
378 struct hinic_hwif *hwif = wq->hwif;
381 wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
382 wq->max_wqe_size, GFP_KERNEL);
383 if (!wq->shadow_wqe)
386 wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
387 sizeof(*wq->shadow_idx), GFP_KERNEL);
388 if (!wq->shadow_idx)
394 devm_kfree(&pdev->dev, wq->shadow_wqe);
400 * @wq: WQ to free shadows from
402 static void free_wqes_shadow(struct hinic_wq *wq)
404 struct hinic_hwif *hwif = wq->hwif;
407 devm_kfree(&pdev->dev, wq->shadow_idx);
408 devm_kfree(&pdev->dev, wq->shadow_wqe);
414 * @wq: WQ to free pages from
417 static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
424 void **vaddr = &wq->shadow_block_vaddr[i];
425 u64 *paddr = &wq->block_vaddr[i];
429 dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
433 free_wqes_shadow(wq);
439 * @wq: WQ to allocate pages for
444 static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
450 num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
452 dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
457 dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
461 wq->num_q_pages = num_q_pages;
463 err = alloc_wqes_shadow(wq);
470 void **vaddr = &wq->shadow_block_vaddr[i];
471 u64 *paddr = &wq->block_vaddr[i];
474 *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
477 dev_err(&pdev->dev, "Failed to allocate wq page\n");
488 free_wq_pages(wq, hwif, i);
495 * @wq: WQ to allocate resources for it from the WQ set
503 int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
537 wq->hwif = hwif;
539 err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
545 wq->wqebb_size = wqebb_size;
546 wq->wq_page_size = wq_page_size;
547 wq->q_depth = q_depth;
548 wq->max_wqe_size = max_wqe_size;
549 wq->num_wqebbs_per_page = num_wqebbs_per_page;
550 wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
551 wq->wqebb_size_shift = wqebb_size_shift;
552 wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
553 wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
554 wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
556 err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
558 dev_err(&pdev->dev, "Failed to allocate wq pages\n");
562 atomic_set(&wq->cons_idx, 0);
563 atomic_set(&wq->prod_idx, 0);
564 atomic_set(&wq->delta, q_depth);
565 wq->mask = q_depth - 1;
570 wqs_return_block(wqs, wq->page_idx, wq->block_idx);
577 * @wq: WQ to free its resources to the WQ set resources
579 void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
581 free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
583 wqs_return_block(wqs, wq->page_idx, wq->block_idx);
589 * @wq: returned wqs
591 * @cmdq_blocks: number of cmdq blocks/wq to allocate
600 struct hinic_wq *wq, struct hinic_hwif *hwif,
644 wq[i].hwif = hwif;
645 wq[i].page_idx = 0;
646 wq[i].block_idx = i;
648 wq[i].wqebb_size = wqebb_size;
649 wq[i].wq_page_size = wq_page_size;
650 wq[i].q_depth = q_depth;
651 wq[i].max_wqe_size = max_wqe_size;
652 wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
653 wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
654 wq[i].wqebb_size_shift = wqebb_size_shift;
655 wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
656 wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
657 wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
659 err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
666 atomic_set(&wq[i].cons_idx, 0);
667 atomic_set(&wq[i].prod_idx, 0);
668 atomic_set(&wq[i].delta, q_depth);
669 wq[i].mask = q_depth - 1;
676 free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
685 * @wq: wqs to free
689 struct hinic_wq *wq, int cmdq_blocks)
694 free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
699 static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
706 idx = MASKED_WQE_IDX(wq, idx);
707 wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
708 WQE_PAGE_OFF(wq, idx);
710 memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
712 shadow_addr += wq->wqebb_size;
716 static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
723 idx = MASKED_WQE_IDX(wq, idx);
724 wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
725 WQE_PAGE_OFF(wq, idx);
727 memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
728 shadow_addr += wq->wqebb_size;
734 * @wq: wq to get wqe from
740 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
746 *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
748 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
750 if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
751 atomic_add(num_wqebbs, &wq->delta);
755 end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
757 end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
759 curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
762 end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
764 curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
765 end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
773 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
775 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
777 wq->shadow_idx[curr_pg] = *prod_idx;
781 return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
786 * @wq: wq to return wqe
789 void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
791 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
793 atomic_sub(num_wqebbs, &wq->prod_idx);
795 atomic_add(num_wqebbs, &wq->delta);
800 * @wq: wq to return wqe
803 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
805 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
806 >> wq->wqebb_size_shift;
808 atomic_add(num_wqebbs, &wq->cons_idx);
810 atomic_add(num_wqebbs, &wq->delta);
815 * @wq: wq to get read from
821 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
824 int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
825 >> wq->wqebb_size_shift;
829 if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
832 curr_cons_idx = atomic_read(&wq->cons_idx);
834 curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
835 end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
837 curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
838 end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
846 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
848 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
852 return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
857 * @wq: wq
862 struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
864 return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
869 * @wq: wq of the wqe
874 static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
876 size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
878 return WQE_IN_RANGE(wqe, wq->shadow_wqe,
879 &wq->shadow_wqe[wqe_shadow_size]);
883 * hinic_write_wqe - write the wqe to the wq
884 * @wq: wq to write wqe to
888 void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
895 if (wqe_shadow(wq, wqe)) {
896 curr_pg = WQE_SHADOW_PAGE(wq, wqe);
898 prod_idx = wq->shadow_idx[curr_pg];
899 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
900 shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
902 copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);