1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/slab.h>
13#include <linux/atomic.h>
14#include <linux/semaphore.h>
15#include <linux/errno.h>
16#include <linux/vmalloc.h>
17#include <linux/err.h>
18#include <asm/byteorder.h>
19
20#include "hinic_hw_if.h"
21#include "hinic_hw_wqe.h"
22#include "hinic_hw_wq.h"
23#include "hinic_hw_cmdq.h"
24
25#define WQS_BLOCKS_PER_PAGE             4
26
27#define WQ_BLOCK_SIZE                   4096
28#define WQS_PAGE_SIZE                   (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
29
30#define WQS_MAX_NUM_BLOCKS              128
31#define WQS_FREE_BLOCKS_SIZE(wqs)       (WQS_MAX_NUM_BLOCKS * \
32					 sizeof((wqs)->free_blocks[0]))
33
34#define WQ_SIZE(wq)                     ((wq)->q_depth * (wq)->wqebb_size)
35
36#define WQ_PAGE_ADDR_SIZE               sizeof(u64)
37#define WQ_MAX_PAGES                    (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
38
39#define CMDQ_BLOCK_SIZE                 512
40#define CMDQ_PAGE_SIZE                  4096
41
42#define CMDQ_WQ_MAX_PAGES               (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
43
44#define WQ_BASE_VADDR(wqs, wq)          \
45			((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
46				+ (wq)->block_idx * WQ_BLOCK_SIZE)
47
48#define WQ_BASE_PADDR(wqs, wq)          \
49			((wqs)->page_paddr[(wq)->page_idx] \
50				+ (wq)->block_idx * WQ_BLOCK_SIZE)
51
52#define WQ_BASE_ADDR(wqs, wq)           \
53			((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
54				+ (wq)->block_idx * WQ_BLOCK_SIZE)
55
56#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
57			((void *)((cmdq_pages)->page_vaddr) \
58				+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
59
60#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
61			((cmdq_pages)->page_paddr \
62				+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
63
64#define CMDQ_BASE_ADDR(cmdq_pages, wq)  \
65			((void *)((cmdq_pages)->shadow_page_vaddr) \
66				+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
67
68#define WQ_PAGE_ADDR(wq, idx)           \
69			((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
70
71#define MASKED_WQE_IDX(wq, idx)         ((idx) & (wq)->mask)
72
73#define WQE_IN_RANGE(wqe, start, end)   \
74		(((unsigned long)(wqe) >= (unsigned long)(start)) && \
75		 ((unsigned long)(wqe) < (unsigned long)(end)))
76
77#define WQE_SHADOW_PAGE(wq, wqe)        \
78		(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
79			/ (wq)->max_wqe_size)
80
81static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
82{
83	return (((idx) & ((wq)->num_wqebbs_per_page - 1))
84		<< (wq)->wqebb_size_shift);
85}
86
87static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
88{
89	return (((idx) >> ((wq)->wqebbs_per_page_shift))
90		& ((wq)->num_q_pages - 1));
91}
92
93/**
94 * queue_alloc_page - allocate page for Queue
95 * @hwif: HW interface for allocating DMA
96 * @vaddr: virtual address will be returned in this address
97 * @paddr: physical address will be returned in this address
98 * @shadow_vaddr: VM area will be return here for holding WQ page addresses
99 * @page_sz: page size of each WQ page
100 *
101 * Return 0 - Success, negative - Failure
102 **/
103static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
104			    void ***shadow_vaddr, size_t page_sz)
105{
106	struct pci_dev *pdev = hwif->pdev;
107	dma_addr_t dma_addr;
108
109	*vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
110				    GFP_KERNEL);
111	if (!*vaddr) {
112		dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
113		return -ENOMEM;
114	}
115
116	*paddr = (u64)dma_addr;
117
118	/* use vzalloc for big mem */
119	*shadow_vaddr = vzalloc(page_sz);
120	if (!*shadow_vaddr)
121		goto err_shadow_vaddr;
122
123	return 0;
124
125err_shadow_vaddr:
126	dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
127	return -ENOMEM;
128}
129
130/**
131 * wqs_allocate_page - allocate page for WQ set
132 * @wqs: Work Queue Set
133 * @page_idx: the page index of the page will be allocated
134 *
135 * Return 0 - Success, negative - Failure
136 **/
137static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
138{
139	return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
140				&wqs->page_paddr[page_idx],
141				&wqs->shadow_page_vaddr[page_idx],
142				WQS_PAGE_SIZE);
143}
144
145/**
146 * wqs_free_page - free page of WQ set
147 * @wqs: Work Queue Set
148 * @page_idx: the page index of the page will be freed
149 **/
150static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
151{
152	struct hinic_hwif *hwif = wqs->hwif;
153	struct pci_dev *pdev = hwif->pdev;
154
155	dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
156			  wqs->page_vaddr[page_idx],
157			  (dma_addr_t)wqs->page_paddr[page_idx]);
158	vfree(wqs->shadow_page_vaddr[page_idx]);
159}
160
161/**
162 * cmdq_allocate_page - allocate page for cmdq
163 * @cmdq_pages: the pages of the cmdq queue struct to hold the page
164 *
165 * Return 0 - Success, negative - Failure
166 **/
167static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
168{
169	return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
170				&cmdq_pages->page_paddr,
171				&cmdq_pages->shadow_page_vaddr,
172				CMDQ_PAGE_SIZE);
173}
174
175/**
176 * cmdq_free_page - free page from cmdq
177 * @cmdq_pages: the pages of the cmdq queue struct that hold the page
178 **/
179static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
180{
181	struct hinic_hwif *hwif = cmdq_pages->hwif;
182	struct pci_dev *pdev = hwif->pdev;
183
184	dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
185			  cmdq_pages->page_vaddr,
186			  (dma_addr_t)cmdq_pages->page_paddr);
187	vfree(cmdq_pages->shadow_page_vaddr);
188}
189
190static int alloc_page_arrays(struct hinic_wqs *wqs)
191{
192	struct hinic_hwif *hwif = wqs->hwif;
193	struct pci_dev *pdev = hwif->pdev;
194
195	wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
196				       sizeof(*wqs->page_paddr), GFP_KERNEL);
197	if (!wqs->page_paddr)
198		return -ENOMEM;
199
200	wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
201				       sizeof(*wqs->page_vaddr), GFP_KERNEL);
202	if (!wqs->page_vaddr)
203		goto err_page_vaddr;
204
205	wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
206					      sizeof(*wqs->shadow_page_vaddr),
207					      GFP_KERNEL);
208	if (!wqs->shadow_page_vaddr)
209		goto err_page_shadow_vaddr;
210
211	return 0;
212
213err_page_shadow_vaddr:
214	devm_kfree(&pdev->dev, wqs->page_vaddr);
215
216err_page_vaddr:
217	devm_kfree(&pdev->dev, wqs->page_paddr);
218	return -ENOMEM;
219}
220
221static void free_page_arrays(struct hinic_wqs *wqs)
222{
223	struct hinic_hwif *hwif = wqs->hwif;
224	struct pci_dev *pdev = hwif->pdev;
225
226	devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
227	devm_kfree(&pdev->dev, wqs->page_vaddr);
228	devm_kfree(&pdev->dev, wqs->page_paddr);
229}
230
231static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
232			  int *block_idx)
233{
234	int pos;
235
236	down(&wqs->alloc_blocks_lock);
237
238	wqs->num_free_blks--;
239
240	if (wqs->num_free_blks < 0) {
241		wqs->num_free_blks++;
242		up(&wqs->alloc_blocks_lock);
243		return -ENOMEM;
244	}
245
246	pos = wqs->alloc_blk_pos++;
247	pos &= WQS_MAX_NUM_BLOCKS - 1;
248
249	*page_idx = wqs->free_blocks[pos].page_idx;
250	*block_idx = wqs->free_blocks[pos].block_idx;
251
252	wqs->free_blocks[pos].page_idx = -1;
253	wqs->free_blocks[pos].block_idx = -1;
254
255	up(&wqs->alloc_blocks_lock);
256	return 0;
257}
258
259static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
260			     int block_idx)
261{
262	int pos;
263
264	down(&wqs->alloc_blocks_lock);
265
266	pos = wqs->return_blk_pos++;
267	pos &= WQS_MAX_NUM_BLOCKS - 1;
268
269	wqs->free_blocks[pos].page_idx = page_idx;
270	wqs->free_blocks[pos].block_idx = block_idx;
271
272	wqs->num_free_blks++;
273
274	up(&wqs->alloc_blocks_lock);
275}
276
277static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
278{
279	int page_idx, blk_idx, pos = 0;
280
281	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
282		for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
283			wqs->free_blocks[pos].page_idx = page_idx;
284			wqs->free_blocks[pos].block_idx = blk_idx;
285			pos++;
286		}
287	}
288
289	wqs->alloc_blk_pos = 0;
290	wqs->return_blk_pos = pos;
291	wqs->num_free_blks = pos;
292
293	sema_init(&wqs->alloc_blocks_lock, 1);
294}
295
296/**
297 * hinic_wqs_alloc - allocate Work Queues set
298 * @wqs: Work Queue Set
299 * @max_wqs: maximum wqs to allocate
300 * @hwif: HW interface for use for the allocation
301 *
302 * Return 0 - Success, negative - Failure
303 **/
304int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
305		    struct hinic_hwif *hwif)
306{
307	struct pci_dev *pdev = hwif->pdev;
308	int err, i, page_idx;
309
310	max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
311	if (max_wqs > WQS_MAX_NUM_BLOCKS)  {
312		dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
313		return -EINVAL;
314	}
315
316	wqs->hwif = hwif;
317	wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
318
319	if (alloc_page_arrays(wqs)) {
320		dev_err(&pdev->dev,
321			"Failed to allocate mem for page addresses\n");
322		return -ENOMEM;
323	}
324
325	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
326		err = wqs_allocate_page(wqs, page_idx);
327		if (err) {
328			dev_err(&pdev->dev, "Failed wq page allocation\n");
329			goto err_wq_allocate_page;
330		}
331	}
332
333	wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
334					GFP_KERNEL);
335	if (!wqs->free_blocks) {
336		err = -ENOMEM;
337		goto err_alloc_blocks;
338	}
339
340	init_wqs_blocks_arr(wqs);
341	return 0;
342
343err_alloc_blocks:
344err_wq_allocate_page:
345	for (i = 0; i < page_idx; i++)
346		wqs_free_page(wqs, i);
347
348	free_page_arrays(wqs);
349	return err;
350}
351
352/**
353 * hinic_wqs_free - free Work Queues set
354 * @wqs: Work Queue Set
355 **/
356void hinic_wqs_free(struct hinic_wqs *wqs)
357{
358	struct hinic_hwif *hwif = wqs->hwif;
359	struct pci_dev *pdev = hwif->pdev;
360	int page_idx;
361
362	devm_kfree(&pdev->dev, wqs->free_blocks);
363
364	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
365		wqs_free_page(wqs, page_idx);
366
367	free_page_arrays(wqs);
368}
369
370/**
371 * alloc_wqes_shadow - allocate WQE shadows for WQ
372 * @wq: WQ to allocate shadows for
373 *
374 * Return 0 - Success, negative - Failure
375 **/
376static int alloc_wqes_shadow(struct hinic_wq *wq)
377{
378	struct hinic_hwif *hwif = wq->hwif;
379	struct pci_dev *pdev = hwif->pdev;
380
381	wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
382				      wq->max_wqe_size, GFP_KERNEL);
383	if (!wq->shadow_wqe)
384		return -ENOMEM;
385
386	wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
387				      sizeof(*wq->shadow_idx), GFP_KERNEL);
388	if (!wq->shadow_idx)
389		goto err_shadow_idx;
390
391	return 0;
392
393err_shadow_idx:
394	devm_kfree(&pdev->dev, wq->shadow_wqe);
395	return -ENOMEM;
396}
397
398/**
399 * free_wqes_shadow - free WQE shadows of WQ
400 * @wq: WQ to free shadows from
401 **/
402static void free_wqes_shadow(struct hinic_wq *wq)
403{
404	struct hinic_hwif *hwif = wq->hwif;
405	struct pci_dev *pdev = hwif->pdev;
406
407	devm_kfree(&pdev->dev, wq->shadow_idx);
408	devm_kfree(&pdev->dev, wq->shadow_wqe);
409}
410
411/**
412 * free_wq_pages - free pages of WQ
413 * @hwif: HW interface for releasing dma addresses
414 * @wq: WQ to free pages from
415 * @num_q_pages: number pages to free
416 **/
417static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
418			  int num_q_pages)
419{
420	struct pci_dev *pdev = hwif->pdev;
421	int i;
422
423	for (i = 0; i < num_q_pages; i++) {
424		void **vaddr = &wq->shadow_block_vaddr[i];
425		u64 *paddr = &wq->block_vaddr[i];
426		dma_addr_t dma_addr;
427
428		dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
429		dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
430				  dma_addr);
431	}
432
433	free_wqes_shadow(wq);
434}
435
436/**
437 * alloc_wq_pages - alloc pages for WQ
438 * @hwif: HW interface for allocating dma addresses
439 * @wq: WQ to allocate pages for
440 * @max_pages: maximum pages allowed
441 *
442 * Return 0 - Success, negative - Failure
443 **/
444static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
445			  int max_pages)
446{
447	struct pci_dev *pdev = hwif->pdev;
448	int i, err, num_q_pages;
449
450	num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
451	if (num_q_pages > max_pages) {
452		dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
453		return -EINVAL;
454	}
455
456	if (num_q_pages & (num_q_pages - 1)) {
457		dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
458		return -EINVAL;
459	}
460
461	wq->num_q_pages = num_q_pages;
462
463	err = alloc_wqes_shadow(wq);
464	if (err) {
465		dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
466		return err;
467	}
468
469	for (i = 0; i < num_q_pages; i++) {
470		void **vaddr = &wq->shadow_block_vaddr[i];
471		u64 *paddr = &wq->block_vaddr[i];
472		dma_addr_t dma_addr;
473
474		*vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
475					    &dma_addr, GFP_KERNEL);
476		if (!*vaddr) {
477			dev_err(&pdev->dev, "Failed to allocate wq page\n");
478			goto err_alloc_wq_pages;
479		}
480
481		/* HW uses Big Endian Format */
482		*paddr = cpu_to_be64(dma_addr);
483	}
484
485	return 0;
486
487err_alloc_wq_pages:
488	free_wq_pages(wq, hwif, i);
489	return -ENOMEM;
490}
491
492/**
493 * hinic_wq_allocate - Allocate the WQ resources from the WQS
494 * @wqs: WQ set from which to allocate the WQ resources
495 * @wq: WQ to allocate resources for it from the WQ set
496 * @wqebb_size: Work Queue Block Byte Size
497 * @wq_page_size: the page size in the Work Queue
498 * @q_depth: number of wqebbs in WQ
499 * @max_wqe_size: maximum WQE size that will be used in the WQ
500 *
501 * Return 0 - Success, negative - Failure
502 **/
503int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
504		      u16 wqebb_size, u32 wq_page_size, u16 q_depth,
505		      u16 max_wqe_size)
506{
507	struct hinic_hwif *hwif = wqs->hwif;
508	struct pci_dev *pdev = hwif->pdev;
509	u16 num_wqebbs_per_page;
510	u16 wqebb_size_shift;
511	int err;
512
513	if (!is_power_of_2(wqebb_size)) {
514		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
515		return -EINVAL;
516	}
517
518	if (wq_page_size == 0) {
519		dev_err(&pdev->dev, "wq_page_size must be > 0\n");
520		return -EINVAL;
521	}
522
523	if (q_depth & (q_depth - 1)) {
524		dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
525		return -EINVAL;
526	}
527
528	wqebb_size_shift = ilog2(wqebb_size);
529	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
530				>> wqebb_size_shift;
531
532	if (!is_power_of_2(num_wqebbs_per_page)) {
533		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
534		return -EINVAL;
535	}
536
537	wq->hwif = hwif;
538
539	err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
540	if (err) {
541		dev_err(&pdev->dev, "Failed to get free wqs next block\n");
542		return err;
543	}
544
545	wq->wqebb_size = wqebb_size;
546	wq->wq_page_size = wq_page_size;
547	wq->q_depth = q_depth;
548	wq->max_wqe_size = max_wqe_size;
549	wq->num_wqebbs_per_page = num_wqebbs_per_page;
550	wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
551	wq->wqebb_size_shift = wqebb_size_shift;
552	wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
553	wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
554	wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
555
556	err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
557	if (err) {
558		dev_err(&pdev->dev, "Failed to allocate wq pages\n");
559		goto err_alloc_wq_pages;
560	}
561
562	atomic_set(&wq->cons_idx, 0);
563	atomic_set(&wq->prod_idx, 0);
564	atomic_set(&wq->delta, q_depth);
565	wq->mask = q_depth - 1;
566
567	return 0;
568
569err_alloc_wq_pages:
570	wqs_return_block(wqs, wq->page_idx, wq->block_idx);
571	return err;
572}
573
574/**
575 * hinic_wq_free - Free the WQ resources to the WQS
576 * @wqs: WQ set to free the WQ resources to it
577 * @wq: WQ to free its resources to the WQ set resources
578 **/
579void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
580{
581	free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
582
583	wqs_return_block(wqs, wq->page_idx, wq->block_idx);
584}
585
586/**
587 * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
588 * @cmdq_pages: will hold the pages of the cmdq
589 * @wq: returned wqs
590 * @hwif: HW interface
591 * @cmdq_blocks: number of cmdq blocks/wq to allocate
592 * @wqebb_size: Work Queue Block Byte Size
593 * @wq_page_size: the page size in the Work Queue
594 * @q_depth: number of wqebbs in WQ
595 * @max_wqe_size: maximum WQE size that will be used in the WQ
596 *
597 * Return 0 - Success, negative - Failure
598 **/
599int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
600			 struct hinic_wq *wq, struct hinic_hwif *hwif,
601			 int cmdq_blocks, u16 wqebb_size, u32 wq_page_size,
602			 u16 q_depth, u16 max_wqe_size)
603{
604	struct pci_dev *pdev = hwif->pdev;
605	u16 num_wqebbs_per_page_shift;
606	u16 num_wqebbs_per_page;
607	u16 wqebb_size_shift;
608	int i, j, err = -ENOMEM;
609
610	if (!is_power_of_2(wqebb_size)) {
611		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
612		return -EINVAL;
613	}
614
615	if (wq_page_size == 0) {
616		dev_err(&pdev->dev, "wq_page_size must be > 0\n");
617		return -EINVAL;
618	}
619
620	if (q_depth & (q_depth - 1)) {
621		dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
622		return -EINVAL;
623	}
624
625	wqebb_size_shift = ilog2(wqebb_size);
626	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
627				>> wqebb_size_shift;
628
629	if (!is_power_of_2(num_wqebbs_per_page)) {
630		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
631		return -EINVAL;
632	}
633
634	cmdq_pages->hwif = hwif;
635
636	err = cmdq_allocate_page(cmdq_pages);
637	if (err) {
638		dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
639		return err;
640	}
641	num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
642
643	for (i = 0; i < cmdq_blocks; i++) {
644		wq[i].hwif = hwif;
645		wq[i].page_idx = 0;
646		wq[i].block_idx = i;
647
648		wq[i].wqebb_size = wqebb_size;
649		wq[i].wq_page_size = wq_page_size;
650		wq[i].q_depth = q_depth;
651		wq[i].max_wqe_size = max_wqe_size;
652		wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
653		wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
654		wq[i].wqebb_size_shift = wqebb_size_shift;
655		wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
656		wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
657		wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
658
659		err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
660				     CMDQ_WQ_MAX_PAGES);
661		if (err) {
662			dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
663			goto err_cmdq_block;
664		}
665
666		atomic_set(&wq[i].cons_idx, 0);
667		atomic_set(&wq[i].prod_idx, 0);
668		atomic_set(&wq[i].delta, q_depth);
669		wq[i].mask = q_depth - 1;
670	}
671
672	return 0;
673
674err_cmdq_block:
675	for (j = 0; j < i; j++)
676		free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
677
678	cmdq_free_page(cmdq_pages);
679	return err;
680}
681
682/**
683 * hinic_wqs_cmdq_free - Free wqs from cmdqs
684 * @cmdq_pages: hold the pages of the cmdq
685 * @wq: wqs to free
686 * @cmdq_blocks: number of wqs to free
687 **/
688void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
689			 struct hinic_wq *wq, int cmdq_blocks)
690{
691	int i;
692
693	for (i = 0; i < cmdq_blocks; i++)
694		free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
695
696	cmdq_free_page(cmdq_pages);
697}
698
699static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
700			       int num_wqebbs, u16 idx)
701{
702	void *wqebb_addr;
703	int i;
704
705	for (i = 0; i < num_wqebbs; i++, idx++) {
706		idx = MASKED_WQE_IDX(wq, idx);
707		wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
708			     WQE_PAGE_OFF(wq, idx);
709
710		memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
711
712		shadow_addr += wq->wqebb_size;
713	}
714}
715
716static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
717				 int num_wqebbs, u16 idx)
718{
719	void *wqebb_addr;
720	int i;
721
722	for (i = 0; i < num_wqebbs; i++, idx++) {
723		idx = MASKED_WQE_IDX(wq, idx);
724		wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
725			     WQE_PAGE_OFF(wq, idx);
726
727		memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
728		shadow_addr += wq->wqebb_size;
729	}
730}
731
732/**
733 * hinic_get_wqe - get wqe ptr in the current pi and update the pi
734 * @wq: wq to get wqe from
735 * @wqe_size: wqe size
736 * @prod_idx: returned pi
737 *
738 * Return wqe pointer
739 **/
740struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
741				   u16 *prod_idx)
742{
743	int curr_pg, end_pg, num_wqebbs;
744	u16 curr_prod_idx, end_prod_idx;
745
746	*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
747
748	num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
749
750	if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
751		atomic_add(num_wqebbs, &wq->delta);
752		return ERR_PTR(-EBUSY);
753	}
754
755	end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
756
757	end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
758	curr_prod_idx = end_prod_idx - num_wqebbs;
759	curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
760
761	/* end prod index points to the next wqebb, therefore minus 1 */
762	end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
763
764	curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
765	end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
766
767	*prod_idx = curr_prod_idx;
768
769	/* If we only have one page, still need to get shadown wqe when
770	 * wqe rolling-over page
771	 */
772	if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
773		void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
774
775		copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
776
777		wq->shadow_idx[curr_pg] = *prod_idx;
778		return shadow_addr;
779	}
780
781	return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
782}
783
784/**
785 * hinic_return_wqe - return the wqe when transmit failed
786 * @wq: wq to return wqe
787 * @wqe_size: wqe size
788 **/
789void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
790{
791	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
792
793	atomic_sub(num_wqebbs, &wq->prod_idx);
794
795	atomic_add(num_wqebbs, &wq->delta);
796}
797
798/**
799 * hinic_put_wqe - return the wqe place to use for a new wqe
800 * @wq: wq to return wqe
801 * @wqe_size: wqe size
802 **/
803void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
804{
805	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
806			>> wq->wqebb_size_shift;
807
808	atomic_add(num_wqebbs, &wq->cons_idx);
809
810	atomic_add(num_wqebbs, &wq->delta);
811}
812
813/**
814 * hinic_read_wqe - read wqe ptr in the current ci
815 * @wq: wq to get read from
816 * @wqe_size: wqe size
817 * @cons_idx: returned ci
818 *
819 * Return wqe pointer
820 **/
821struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
822				    u16 *cons_idx)
823{
824	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
825			>> wq->wqebb_size_shift;
826	u16 curr_cons_idx, end_cons_idx;
827	int curr_pg, end_pg;
828
829	if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
830		return ERR_PTR(-EBUSY);
831
832	curr_cons_idx = atomic_read(&wq->cons_idx);
833
834	curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
835	end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
836
837	curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
838	end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
839
840	*cons_idx = curr_cons_idx;
841
842	/* If we only have one page, still need to get shadown wqe when
843	 * wqe rolling-over page
844	 */
845	if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
846		void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
847
848		copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
849		return shadow_addr;
850	}
851
852	return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
853}
854
855/**
856 * hinic_read_wqe_direct - read wqe directly from ci position
857 * @wq: wq
858 * @cons_idx: ci position
859 *
860 * Return wqe
861 **/
862struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
863{
864	return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
865}
866
867/**
868 * wqe_shadow - check if a wqe is shadow
869 * @wq: wq of the wqe
870 * @wqe: the wqe for shadow checking
871 *
872 * Return true - shadow, false - Not shadow
873 **/
874static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
875{
876	size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
877
878	return WQE_IN_RANGE(wqe, wq->shadow_wqe,
879			    &wq->shadow_wqe[wqe_shadow_size]);
880}
881
882/**
883 * hinic_write_wqe - write the wqe to the wq
884 * @wq: wq to write wqe to
885 * @wqe: wqe to write
886 * @wqe_size: wqe size
887 **/
888void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
889		     unsigned int wqe_size)
890{
891	int curr_pg, num_wqebbs;
892	void *shadow_addr;
893	u16 prod_idx;
894
895	if (wqe_shadow(wq, wqe)) {
896		curr_pg = WQE_SHADOW_PAGE(wq, wqe);
897
898		prod_idx = wq->shadow_idx[curr_pg];
899		num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
900		shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
901
902		copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
903	}
904}
905