1234353Sdim// SPDX-License-Identifier: GPL-2.0
2218885Sdim/*
3218885Sdim * Copyright (C) 2018-2022 Marvell International Ltd.
4218885Sdim */
5218885Sdim
6218885Sdim#include <errno.h>
7218885Sdim#include <log.h>
8218885Sdim#include <time.h>
9218885Sdim#include <linux/delay.h>
10218885Sdim
11218885Sdim#include <mach/cvmx-regs.h>
12218885Sdim#include <mach/cvmx-csr.h>
13218885Sdim#include <mach/cvmx-bootmem.h>
14218885Sdim#include <mach/octeon-model.h>
15218885Sdim#include <mach/cvmx-fuse.h>
16218885Sdim#include <mach/octeon-feature.h>
17218885Sdim#include <mach/cvmx-qlm.h>
18221345Sdim#include <mach/octeon_qlm.h>
19249423Sdim#include <mach/cvmx-pcie.h>
20249423Sdim#include <mach/cvmx-coremask.h>
21249423Sdim
22218885Sdim#include <mach/cvmx-agl-defs.h>
23218885Sdim#include <mach/cvmx-bgxx-defs.h>
24218885Sdim#include <mach/cvmx-ciu-defs.h>
25218885Sdim#include <mach/cvmx-gmxx-defs.h>
26218885Sdim#include <mach/cvmx-gserx-defs.h>
27218885Sdim#include <mach/cvmx-ilk-defs.h>
28249423Sdim#include <mach/cvmx-ipd-defs.h>
29218885Sdim#include <mach/cvmx-pcsx-defs.h>
30218885Sdim#include <mach/cvmx-pcsxx-defs.h>
31218885Sdim#include <mach/cvmx-pki-defs.h>
32218885Sdim#include <mach/cvmx-pko-defs.h>
33218885Sdim#include <mach/cvmx-xcv-defs.h>
34218885Sdim
35218885Sdim#include <mach/cvmx-hwpko.h>
36218885Sdim#include <mach/cvmx-ilk.h>
37218885Sdim#include <mach/cvmx-pki.h>
38218885Sdim#include <mach/cvmx-pko3.h>
39218885Sdim#include <mach/cvmx-pko3-queue.h>
40218885Sdim#include <mach/cvmx-pko3-resources.h>
41218885Sdim
42218885Sdim#include <mach/cvmx-helper.h>
43218885Sdim#include <mach/cvmx-helper-board.h>
44218885Sdim#include <mach/cvmx-helper-cfg.h>
45218885Sdim
46218885Sdim#include <mach/cvmx-helper-bgx.h>
47218885Sdim#include <mach/cvmx-helper-cfg.h>
48218885Sdim#include <mach/cvmx-helper-util.h>
49218885Sdim#include <mach/cvmx-helper-pki.h>
50218885Sdim
51218885Sdimstatic const int debug;
52218885Sdim
53218885Sdim#define CVMX_DUMP_REGX(reg)						\
54243830Sdim	if (debug)							\
55243830Sdim		debug("%s=%#llx\n", #reg, (long long)csr_rd_node(node, reg))
56218885Sdim
57218885Sdimstatic int cvmx_pko_setup_macs(int node);
58221345Sdim
59218885Sdim/*
60218885Sdim * PKO descriptor queue operation error string
61218885Sdim *
62218885Sdim * @param dqstatus is the enumeration returned from hardware,
63218885Sdim *	  PKO_QUERY_RTN_S[DQSTATUS].
64243830Sdim *
65218885Sdim * @return static constant string error description
66221345Sdim */
67218885Sdimconst char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus)
68218885Sdim{
69218885Sdim	char *str = "PKO Undefined error";
70218885Sdim
71218885Sdim	switch (dqstatus) {
72218885Sdim	case PKO_DQSTATUS_PASS:
73218885Sdim		str = "No error";
74218885Sdim		break;
75218885Sdim	case PKO_DQSTATUS_BADSTATE:
76218885Sdim		str = "PKO queue not ready";
77218885Sdim		break;
78218885Sdim	case PKO_DQSTATUS_NOFPABUF:
79218885Sdim		str = "PKO failed to allocate buffer from FPA";
80218885Sdim		break;
81218885Sdim	case PKO_DQSTATUS_NOPKOBUF:
82218885Sdim		str = "PKO out of buffers";
83218885Sdim		break;
84218885Sdim	case PKO_DQSTATUS_FAILRTNPTR:
85218885Sdim		str = "PKO failed to return buffer to FPA";
86218885Sdim		break;
87218885Sdim	case PKO_DQSTATUS_ALREADY:
88218885Sdim		str = "PKO queue already opened";
89218885Sdim		break;
90218885Sdim	case PKO_DQSTATUS_NOTCREATED:
91218885Sdim		str = "PKO queue has not been created";
92218885Sdim		break;
93218885Sdim	case PKO_DQSTATUS_NOTEMPTY:
94218885Sdim		str = "PKO queue is not empty";
95218885Sdim		break;
96218885Sdim	case PKO_DQSTATUS_SENDPKTDROP:
97218885Sdim		str = "Illegal PKO command construct";
98218885Sdim		break;
99218885Sdim	}
100218885Sdim	return str;
101218885Sdim}
102218885Sdim
103218885Sdim/*
104218885Sdim * PKO global initialization for 78XX.
105218885Sdim *
106218885Sdim * @param node is the node on which PKO block is initialized.
107218885Sdim * @return none.
108218885Sdim */
109218885Sdimint cvmx_pko3_hw_init_global(int node, uint16_t aura)
110218885Sdim{
111218885Sdim	cvmx_pko_dpfi_flush_t pko_flush;
112218885Sdim	cvmx_pko_dpfi_fpa_aura_t pko_aura;
113218885Sdim	cvmx_pko_dpfi_ena_t dpfi_enable;
114218885Sdim	cvmx_pko_ptf_iobp_cfg_t ptf_iobp_cfg;
115218885Sdim	cvmx_pko_pdm_cfg_t pko_pdm_cfg;
116218885Sdim	cvmx_pko_enable_t pko_enable;
117218885Sdim	cvmx_pko_dpfi_status_t dpfi_status;
118218885Sdim	cvmx_pko_status_t pko_status;
119218885Sdim	cvmx_pko_shaper_cfg_t shaper_cfg;
120218885Sdim	u64 cycles;
121218885Sdim	const unsigned int timeout = 100; /* 100 milliseconds */
122218885Sdim
123218885Sdim	if (node != (aura >> 10))
124218885Sdim		cvmx_printf("WARNING: AURA vs PKO node mismatch\n");
125218885Sdim
126218885Sdim	pko_enable.u64 = csr_rd_node(node, CVMX_PKO_ENABLE);
127218885Sdim	if (pko_enable.s.enable) {
128218885Sdim		cvmx_printf("WARNING: %s: PKO already enabled on node %u\n",
129218885Sdim			    __func__, node);
130218885Sdim		return 0;
131218885Sdim	}
132218885Sdim	/* Enable color awareness. */
133218885Sdim	shaper_cfg.u64 = csr_rd_node(node, CVMX_PKO_SHAPER_CFG);
134218885Sdim	shaper_cfg.s.color_aware = 1;
135218885Sdim	csr_wr_node(node, CVMX_PKO_SHAPER_CFG, shaper_cfg.u64);
136218885Sdim
137218885Sdim	/* Clear FLUSH command to be sure */
138218885Sdim	pko_flush.u64 = 0;
139218885Sdim	pko_flush.s.flush_en = 0;
140243830Sdim	csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
141243830Sdim
142243830Sdim	/* set the aura number in pko, use aura node from parameter */
143243830Sdim	pko_aura.u64 = 0;
144243830Sdim	pko_aura.s.node = aura >> 10;
145243830Sdim	pko_aura.s.laura = aura;
146243830Sdim	csr_wr_node(node, CVMX_PKO_DPFI_FPA_AURA, pko_aura.u64);
147243830Sdim
148243830Sdim	CVMX_DUMP_REGX(CVMX_PKO_DPFI_FPA_AURA);
149243830Sdim
150243830Sdim	dpfi_enable.u64 = 0;
151243830Sdim	dpfi_enable.s.enable = 1;
152243830Sdim	csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
153243830Sdim
154243830Sdim	/* Prepare timeout */
155243830Sdim	cycles = get_timer(0);
156243830Sdim
157243830Sdim	/* Wait until all pointers have been returned */
158243830Sdim	do {
159243830Sdim		pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
160243830Sdim		if (get_timer(cycles) > timeout)
161243830Sdim			break;
162243830Sdim	} while (!pko_status.s.pko_rdy);
163243830Sdim
164243830Sdim	if (!pko_status.s.pko_rdy) {
165243830Sdim		dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
166243830Sdim		cvmx_printf("ERROR: %s: PKO DFPI failed, PKO_STATUS=%#llx DPFI_STATUS=%#llx\n",
167243830Sdim			    __func__, (unsigned long long)pko_status.u64,
168243830Sdim			    (unsigned long long)dpfi_status.u64);
169243830Sdim		return -1;
170243830Sdim	}
171243830Sdim
172243830Sdim	/* Set max outstanding requests in IOBP for any FIFO.*/
173243830Sdim	ptf_iobp_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTF_IOBP_CFG);
174243830Sdim	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
175243830Sdim		ptf_iobp_cfg.s.max_read_size = 0x10; /* Recommended by HRM.*/
176243830Sdim	else
177243830Sdim		/* Reduce the value from recommended 0x10 to avoid
178243830Sdim		 * getting "underflow" condition in the BGX TX FIFO.
179243830Sdim		 */
180243830Sdim		ptf_iobp_cfg.s.max_read_size = 3;
181243830Sdim	csr_wr_node(node, CVMX_PKO_PTF_IOBP_CFG, ptf_iobp_cfg.u64);
182243830Sdim
183243830Sdim	/* Set minimum packet size per Ethernet standard */
184218885Sdim	pko_pdm_cfg.u64 = 0;
185219077Sdim	pko_pdm_cfg.s.pko_pad_minlen = 0x3c; /* 60 bytes before FCS */
186224145Sdim	csr_wr_node(node, CVMX_PKO_PDM_CFG, pko_pdm_cfg.u64);
187224145Sdim
188234353Sdim	/* Initialize MACs and FIFOs */
189218885Sdim	cvmx_pko_setup_macs(node);
190224145Sdim
191219077Sdim	/* enable PKO, although interfaces and queues are not up yet */
192219077Sdim	pko_enable.u64 = 0;
193219077Sdim	pko_enable.s.enable = 1;
194219077Sdim	csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
195218885Sdim
196218885Sdim	/* PKO_RDY set indicates successful initialization */
197218885Sdim	pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
198243830Sdim	if (pko_status.s.pko_rdy)
199243830Sdim		return 0;
200243830Sdim
201243830Sdim	cvmx_printf("ERROR: %s: failed, PKO_STATUS=%#llx\n", __func__,
202243830Sdim		    (unsigned long long)pko_status.u64);
203243830Sdim	return -1;
204243830Sdim}
205243830Sdim
206243830Sdim/*
207243830Sdim * Configure Channel credit level in PKO.
208243830Sdim *
209243830Sdim * @param node is to specify the node to which this configuration is applied.
210243830Sdim * @param level specifies the level at which pko channel queues will be configured,
211218885Sdim * @return returns 0 if successful and -1 on failure.
212221345Sdim */
213218885Sdimint cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level)
214218885Sdim{
215218885Sdim	union cvmx_pko_channel_level channel_level;
216218885Sdim
217218885Sdim	channel_level.u64 = 0;
218218885Sdim
219218885Sdim	if (level == CVMX_PKO_L2_QUEUES)
220221345Sdim		channel_level.s.cc_level = 0;
221218885Sdim	else if (level == CVMX_PKO_L3_QUEUES)
222218885Sdim		channel_level.s.cc_level = 1;
223218885Sdim	else
224218885Sdim		return -1;
225218885Sdim
226218885Sdim	csr_wr_node(node, CVMX_PKO_CHANNEL_LEVEL, channel_level.u64);
227218885Sdim
228218885Sdim	return 0;
229221345Sdim}
230218885Sdim
231221345Sdim/** Open configured descriptor queues before queueing packets into them.
232218885Sdim *
233243830Sdim * @param node is to specify the node to which this configuration is applied.
234243830Sdim * @param dq is the descriptor queue number to be opened.
235243830Sdim * @return returns 0 on success or -1 on failure.
236243830Sdim */
237243830Sdimint cvmx_pko_dq_open(int node, int dq)
238243830Sdim{
239221345Sdim	cvmx_pko_query_rtn_t pko_status;
240221345Sdim	pko_query_dqstatus_t dqstatus;
241221345Sdim	cvmx_pko3_dq_params_t *p_param;
242218885Sdim
243218885Sdim	if (debug)
244218885Sdim		debug("%s: DEBUG: dq %u\n", __func__, dq);
245218885Sdim
246218885Sdim	__cvmx_pko3_dq_param_setup(node);
247243830Sdim
248243830Sdim	pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
249218885Sdim
250218885Sdim	dqstatus = pko_status.s.dqstatus;
251218885Sdim
252218885Sdim	if (dqstatus == PKO_DQSTATUS_ALREADY)
253218885Sdim		return 0;
254218885Sdim	if (dqstatus != PKO_DQSTATUS_PASS) {
255221345Sdim		cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
256221345Sdim			    dq, pko_dqstatus_error(dqstatus));
257221345Sdim		return -1;
258221345Sdim	}
259218885Sdim
260218885Sdim	/* Setup the descriptor queue software parameters */
261221345Sdim	p_param = cvmx_pko3_dq_parameters(node, dq);
262218885Sdim	if (p_param) {
263218885Sdim		p_param->depth = pko_status.s.depth;
264218885Sdim		if (p_param->limit == 0)
265218885Sdim			p_param->limit = 1024; /* last-resort default */
266218885Sdim	}
267218885Sdim
268218885Sdim	return 0;
269218885Sdim}
270218885Sdim
271218885Sdim/*
272218885Sdim * PKO initialization of MACs and FIFOs
273218885Sdim *
274218885Sdim * All MACs are configured and assigned a specific FIFO,
275218885Sdim * and each FIFO is configured with size for a best utilization
276218885Sdim * of available FIFO resources.
277218885Sdim *
278218885Sdim * @param node is to specify which node's pko block for this setup.
279218885Sdim * @return returns 0 if successful and -1 on failure.
280218885Sdim *
281218885Sdim * Note: This function contains model-specific code.
282218885Sdim */
283218885Sdimstatic int cvmx_pko_setup_macs(int node)
284218885Sdim{
285218885Sdim	unsigned int interface;
286224145Sdim	unsigned int port, num_ports;
287224145Sdim	unsigned int mac_num, fifo, pri, cnt;
288239462Sdim	cvmx_helper_interface_mode_t mode;
289239462Sdim	const unsigned int num_interfaces =
290239462Sdim		cvmx_helper_get_number_of_interfaces();
291218885Sdim	u8 fifo_group_cfg[8];
292234353Sdim	u8 fifo_group_spd[8];
293218885Sdim	unsigned int fifo_count = 0;
294218885Sdim	unsigned int max_fifos = 0, fifo_groups = 0;
295218885Sdim	struct {
296218885Sdim		u8 fifo_cnt;
297218885Sdim		u8 fifo_id;
298218885Sdim		u8 pri;
299234353Sdim		u8 spd;
300218885Sdim		u8 mac_fifo_cnt;
301218885Sdim	} cvmx_pko3_mac_table[32];
302218885Sdim
303218885Sdim	if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
304218885Sdim		max_fifos = 28;	 /* exclusive of NULL FIFO */
305218885Sdim		fifo_groups = 8; /* inclusive of NULL PTGF */
306218885Sdim	}
307218885Sdim	if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
308218885Sdim		max_fifos = 16;
309218885Sdim		fifo_groups = 5;
310218885Sdim	}
311218885Sdim
312218885Sdim	/* Initialize FIFO allocation table */
313218885Sdim	memset(&fifo_group_cfg, 0, sizeof(fifo_group_cfg));
314218885Sdim	memset(&fifo_group_spd, 0, sizeof(fifo_group_spd));
315218885Sdim	memset(cvmx_pko3_mac_table, 0, sizeof(cvmx_pko3_mac_table));
316218885Sdim
317218885Sdim	/* Initialize all MACs as disabled */
318218885Sdim	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
319218885Sdim		cvmx_pko3_mac_table[mac_num].pri = 0;
320218885Sdim		cvmx_pko3_mac_table[mac_num].fifo_cnt = 0;
321218885Sdim		cvmx_pko3_mac_table[mac_num].fifo_id = 0x1f;
322218885Sdim	}
323218885Sdim
324218885Sdim	for (interface = 0; interface < num_interfaces; interface++) {
325218885Sdim		int xiface =
326218885Sdim			cvmx_helper_node_interface_to_xiface(node, interface);
327218885Sdim		/* Interface type for ALL interfaces */
328218885Sdim		mode = cvmx_helper_interface_get_mode(xiface);
329218885Sdim		num_ports = cvmx_helper_interface_enumerate(xiface);
330218885Sdim
331221345Sdim		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
332218885Sdim			continue;
333218885Sdim		/*
334218885Sdim		 * Non-BGX interfaces:
335218885Sdim		 * Each of these interfaces has a single MAC really.
336218885Sdim		 */
337218885Sdim		if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
338218885Sdim		    mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
339218885Sdim		    mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
340218885Sdim			num_ports = 1;
341218885Sdim
342218885Sdim		for (port = 0; port < num_ports; port++) {
343224145Sdim			int i;
344234353Sdim
345218885Sdim			/* Get the per-port mode for BGX-interfaces */
346218885Sdim			if (interface < CVMX_HELPER_MAX_GMX)
347218885Sdim				mode = cvmx_helper_bgx_get_mode(xiface, port);
348218885Sdim			/* In MIXED mode, LMACs can run different protocols */
349218885Sdim
350218885Sdim			/* convert interface/port to mac number */
351224145Sdim			i = __cvmx_pko3_get_mac_num(xiface, port);
352218885Sdim			if (i < 0 || i >= (int)__cvmx_pko3_num_macs()) {
353218885Sdim				cvmx_printf("%s: ERROR: interface %d:%u port %d has no MAC %d/%d\n",
354218885Sdim					    __func__, node, interface, port, i,
355218885Sdim					    __cvmx_pko3_num_macs());
356218885Sdim				continue;
357218885Sdim			}
358218885Sdim
359218885Sdim			if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
360218885Sdim				unsigned int bgx_fifo_size =
361224145Sdim					__cvmx_helper_bgx_fifo_size(xiface,
362218885Sdim								    port);
363218885Sdim
364218885Sdim				cvmx_pko3_mac_table[i].mac_fifo_cnt =
365218885Sdim					bgx_fifo_size /
366218885Sdim					(CVMX_BGX_TX_FIFO_SIZE / 4);
367218885Sdim				cvmx_pko3_mac_table[i].pri = 2;
368218885Sdim				cvmx_pko3_mac_table[i].spd = 10;
369218885Sdim				cvmx_pko3_mac_table[i].fifo_cnt = 2;
370218885Sdim			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) {
371218885Sdim				unsigned int bgx_fifo_size =
372218885Sdim					__cvmx_helper_bgx_fifo_size(xiface,
373218885Sdim								    port);
374218885Sdim
375218885Sdim				cvmx_pko3_mac_table[i].mac_fifo_cnt =
376218885Sdim					bgx_fifo_size /
377218885Sdim					(CVMX_BGX_TX_FIFO_SIZE / 4);
378218885Sdim				cvmx_pko3_mac_table[i].pri = 4;
379218885Sdim				cvmx_pko3_mac_table[i].spd = 40;
380218885Sdim				cvmx_pko3_mac_table[i].fifo_cnt = 4;
381218885Sdim			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
382218885Sdim				unsigned int bgx_fifo_size =
383221345Sdim					__cvmx_helper_bgx_fifo_size(xiface,
384243830Sdim								    port);
385243830Sdim
386218885Sdim				cvmx_pko3_mac_table[i].mac_fifo_cnt =
387218885Sdim					bgx_fifo_size /
388218885Sdim					(CVMX_BGX_TX_FIFO_SIZE / 4);
389218885Sdim				cvmx_pko3_mac_table[i].pri = 3;
390218885Sdim				cvmx_pko3_mac_table[i].fifo_cnt = 4;
391218885Sdim				/* DXAUI at 20G, or XAU at 10G */
392218885Sdim				cvmx_pko3_mac_table[i].spd = 20;
393218885Sdim			} else if (mode == CVMX_HELPER_INTERFACE_MODE_XFI) {
394218885Sdim				unsigned int bgx_fifo_size =
395218885Sdim					__cvmx_helper_bgx_fifo_size(xiface,
396218885Sdim								    port);
397218885Sdim
398218885Sdim				cvmx_pko3_mac_table[i].mac_fifo_cnt =
399218885Sdim					bgx_fifo_size /
400					(CVMX_BGX_TX_FIFO_SIZE / 4);
401				cvmx_pko3_mac_table[i].pri = 3;
402				cvmx_pko3_mac_table[i].fifo_cnt = 4;
403				cvmx_pko3_mac_table[i].spd = 10;
404			} else if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
405				cvmx_pko3_mac_table[i].fifo_cnt = 1;
406				cvmx_pko3_mac_table[i].pri = 1;
407				cvmx_pko3_mac_table[i].spd = 1;
408				cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
409			} else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
410				   mode == CVMX_HELPER_INTERFACE_MODE_SRIO) {
411				cvmx_pko3_mac_table[i].fifo_cnt = 4;
412				cvmx_pko3_mac_table[i].pri = 3;
413				/* ILK/SRIO: speed depends on lane count */
414				cvmx_pko3_mac_table[i].spd = 40;
415				cvmx_pko3_mac_table[i].mac_fifo_cnt = 4;
416			} else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
417				cvmx_pko3_mac_table[i].fifo_cnt = 4;
418				cvmx_pko3_mac_table[i].pri = 2;
419				/* Actual speed depends on PCIe lanes/mode */
420				cvmx_pko3_mac_table[i].spd = 50;
421				/* SLI Tx FIFO size to be revisitted */
422				cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
423			} else {
424				/* Other BGX interface modes: SGMII/RGMII */
425				unsigned int bgx_fifo_size =
426					__cvmx_helper_bgx_fifo_size(xiface,
427								    port);
428
429				cvmx_pko3_mac_table[i].mac_fifo_cnt =
430					bgx_fifo_size /
431					(CVMX_BGX_TX_FIFO_SIZE / 4);
432				cvmx_pko3_mac_table[i].fifo_cnt = 1;
433				cvmx_pko3_mac_table[i].pri = 1;
434				cvmx_pko3_mac_table[i].spd = 1;
435			}
436
437			if (debug)
438				debug("%s: intf %d:%u port %u %s mac %02u cnt %u macfifo %uk spd %u\n",
439				      __func__, node, interface, port,
440				      cvmx_helper_interface_mode_to_string(mode),
441				      i, cvmx_pko3_mac_table[i].fifo_cnt,
442				      cvmx_pko3_mac_table[i].mac_fifo_cnt * 8,
443				      cvmx_pko3_mac_table[i].spd);
444
445		} /* for port */
446	}	  /* for interface */
447
448	/* Count the number of requested FIFOs */
449	for (fifo_count = mac_num = 0; mac_num < __cvmx_pko3_num_macs();
450	     mac_num++)
451		fifo_count += cvmx_pko3_mac_table[mac_num].fifo_cnt;
452
453	if (debug)
454		debug("%s: initially requested FIFO count %u\n", __func__,
455		      fifo_count);
456
457	/* Heuristically trim FIFO count to fit in available number */
458	pri = 1;
459	cnt = 4;
460	while (fifo_count > max_fifos) {
461		for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
462			if (cvmx_pko3_mac_table[mac_num].fifo_cnt == cnt &&
463			    cvmx_pko3_mac_table[mac_num].pri <= pri) {
464				cvmx_pko3_mac_table[mac_num].fifo_cnt >>= 1;
465				fifo_count -=
466					cvmx_pko3_mac_table[mac_num].fifo_cnt;
467			}
468			if (fifo_count <= max_fifos)
469				break;
470		}
471		if (pri >= 4) {
472			pri = 1;
473			cnt >>= 1;
474		} else {
475			pri++;
476		}
477		if (cnt == 0)
478			break;
479	}
480
481	if (debug)
482		debug("%s: adjusted FIFO count %u\n", __func__, fifo_count);
483
484	/* Special case for NULL Virtual FIFO */
485	fifo_group_cfg[fifo_groups - 1] = 0;
486	/* there is no MAC connected to NULL FIFO */
487
488	/* Configure MAC units, and attach a FIFO to each */
489	for (fifo = 0, cnt = 4; cnt > 0; cnt >>= 1) {
490		unsigned int g;
491
492		for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
493			if (cvmx_pko3_mac_table[mac_num].fifo_cnt < cnt ||
494			    cvmx_pko3_mac_table[mac_num].fifo_id != 0x1f)
495				continue;
496
497			/* Attach FIFO to MAC */
498			cvmx_pko3_mac_table[mac_num].fifo_id = fifo;
499			g = fifo >> 2;
500			/* Sum speed for FIFO group */
501			fifo_group_spd[g] += cvmx_pko3_mac_table[mac_num].spd;
502
503			if (cnt == 4)
504				fifo_group_cfg[g] = 4; /* 10k,0,0,0 */
505			else if (cnt == 2 && (fifo & 0x3) == 0)
506				fifo_group_cfg[g] = 3; /* 5k,0,5k,0 */
507			else if (cnt == 2 && fifo_group_cfg[g] == 3)
508				/* no change */;
509			else if (cnt == 1 && (fifo & 0x2) &&
510				 fifo_group_cfg[g] == 3)
511				fifo_group_cfg[g] = 1; /* 5k,0,2.5k 2.5k*/
512			else if (cnt == 1 && (fifo & 0x3) == 0x3)
513				/* no change */;
514			else if (cnt == 1)
515				fifo_group_cfg[g] = 0; /* 2.5k x 4 */
516			else
517				cvmx_printf("ERROR: %s: internal error\n",
518					    __func__);
519
520			fifo += cnt;
521		}
522	}
523
524	/* Check if there was no error in FIFO allocation */
525	if (fifo > max_fifos) {
526		cvmx_printf("ERROR: %s: Internal error FIFO %u\n", __func__,
527			    fifo);
528		return -1;
529	}
530
531	if (debug)
532		debug("%s: used %u of FIFOs\n", __func__, fifo);
533
534	/* Now configure all FIFO groups */
535	for (fifo = 0; fifo < fifo_groups; fifo++) {
536		cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
537
538		pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
539		if (pko_ptgfx_cfg.s.size != fifo_group_cfg[fifo])
540			pko_ptgfx_cfg.s.reset = 1;
541		pko_ptgfx_cfg.s.size = fifo_group_cfg[fifo];
542		if (fifo_group_spd[fifo] >= 40)
543			if (pko_ptgfx_cfg.s.size >= 3)
544				pko_ptgfx_cfg.s.rate = 3; /* 50 Gbps */
545			else
546				pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
547		else if (fifo_group_spd[fifo] >= 20)
548			pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
549		else if (fifo_group_spd[fifo] >= 10)
550			pko_ptgfx_cfg.s.rate = 1; /* 12.5 Gbps */
551		else
552			pko_ptgfx_cfg.s.rate = 0; /* 6.25 Gbps */
553
554		if (debug)
555			debug("%s: FIFO %#x-%#x size=%u speed=%d rate=%d\n",
556			      __func__, fifo * 4, fifo * 4 + 3,
557			      pko_ptgfx_cfg.s.size, fifo_group_spd[fifo],
558			      pko_ptgfx_cfg.s.rate);
559
560		csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
561		pko_ptgfx_cfg.s.reset = 0;
562		csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
563	}
564
565	/* Configure all MACs assigned FIFO number */
566	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
567		cvmx_pko_macx_cfg_t pko_mac_cfg;
568
569		if (debug)
570			debug("%s: mac#%02u: fifo=%#x cnt=%u speed=%d\n",
571			      __func__, mac_num,
572			      cvmx_pko3_mac_table[mac_num].fifo_id,
573			      cvmx_pko3_mac_table[mac_num].fifo_cnt,
574			      cvmx_pko3_mac_table[mac_num].spd);
575
576		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
577		pko_mac_cfg.s.fifo_num = cvmx_pko3_mac_table[mac_num].fifo_id;
578		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
579	}
580
581	/* Setup PKO MCI0/MCI1/SKID credits */
582	for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
583		cvmx_pko_mci0_max_credx_t pko_mci0_max_cred;
584		cvmx_pko_mci1_max_credx_t pko_mci1_max_cred;
585		cvmx_pko_macx_cfg_t pko_mac_cfg;
586		unsigned int fifo_credit, mac_credit, skid_credit;
587		unsigned int pko_fifo_cnt, fifo_size;
588		unsigned int mac_fifo_cnt;
589		unsigned int tmp;
590		int saved_fifo_num;
591
592		pko_fifo_cnt = cvmx_pko3_mac_table[mac_num].fifo_cnt;
593		mac_fifo_cnt = cvmx_pko3_mac_table[mac_num].mac_fifo_cnt;
594
595		/* Skip unused MACs */
596		if (pko_fifo_cnt == 0)
597			continue;
598
599		/* Check for sanity */
600		if (pko_fifo_cnt > 4)
601			pko_fifo_cnt = 1;
602
603		fifo_size = (2 * 1024) + (1024 / 2); /* 2.5KiB */
604		fifo_credit = pko_fifo_cnt * fifo_size;
605
606		if (mac_num == 0) {
607			/* loopback */
608			mac_credit = 4096; /* From HRM Sec 13.0 */
609			skid_credit = 0;
610		} else if (mac_num == 1) {
611			/* DPI */
612			mac_credit = 2 * 1024;
613			skid_credit = 0;
614		} else if (octeon_has_feature(OCTEON_FEATURE_ILK) &&
615			   (mac_num & 0xfe) == 2) {
616			/* ILK0, ILK1: MAC 2,3 */
617			mac_credit = 4 * 1024; /* 4KB fifo */
618			skid_credit = 0;
619		} else if (octeon_has_feature(OCTEON_FEATURE_SRIO) &&
620			   (mac_num >= 6) && (mac_num <= 9)) {
621			/* SRIO0, SRIO1: MAC 6..9 */
622			mac_credit = 1024 / 2;
623			skid_credit = 0;
624		} else {
625			/* BGX */
626			mac_credit = mac_fifo_cnt * 8 * 1024;
627			skid_credit = mac_fifo_cnt * 256;
628		}
629
630		if (debug)
631			debug("%s: mac %u pko_fifo_credit=%u mac_credit=%u\n",
632			      __func__, mac_num, fifo_credit, mac_credit);
633
634		tmp = (fifo_credit + mac_credit) / 16;
635		pko_mci0_max_cred.u64 = 0;
636		pko_mci0_max_cred.s.max_cred_lim = tmp;
637
638		/* Check for overflow */
639		if (pko_mci0_max_cred.s.max_cred_lim != tmp) {
640			cvmx_printf("WARNING: %s: MCI0 credit overflow\n",
641				    __func__);
642			pko_mci0_max_cred.s.max_cred_lim = 0xfff;
643		}
644
645		/* Pass 2 PKO hardware does not use the MCI0 credits */
646		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
647			csr_wr_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num),
648				    pko_mci0_max_cred.u64);
649
650		/* The original CSR formula is the correct one after all */
651		tmp = (mac_credit) / 16;
652		pko_mci1_max_cred.u64 = 0;
653		pko_mci1_max_cred.s.max_cred_lim = tmp;
654
655		/* Check for overflow */
656		if (pko_mci1_max_cred.s.max_cred_lim != tmp) {
657			cvmx_printf("WARNING: %s: MCI1 credit overflow\n",
658				    __func__);
659			pko_mci1_max_cred.s.max_cred_lim = 0xfff;
660		}
661
662		csr_wr_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num),
663			    pko_mci1_max_cred.u64);
664
665		tmp = (skid_credit / 256) >> 1; /* valid 0,1,2 */
666		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
667
668		/* The PKO_MACX_CFG bits cannot be changed unless FIFO_MUM=0x1f (unused fifo) */
669		saved_fifo_num = pko_mac_cfg.s.fifo_num;
670		pko_mac_cfg.s.fifo_num = 0x1f;
671		pko_mac_cfg.s.skid_max_cnt = tmp;
672		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
673
674		pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
675		pko_mac_cfg.s.fifo_num = saved_fifo_num;
676		csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
677
678		if (debug) {
679			pko_mci0_max_cred.u64 =
680				csr_rd_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num));
681			pko_mci1_max_cred.u64 =
682				csr_rd_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num));
683			pko_mac_cfg.u64 =
684				csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
685			debug("%s: mac %u PKO_MCI0_MAX_CREDX=%u PKO_MCI1_MAX_CREDX=%u PKO_MACX_CFG[SKID_MAX_CNT]=%u\n",
686			      __func__, mac_num,
687			      pko_mci0_max_cred.s.max_cred_lim,
688			      pko_mci1_max_cred.s.max_cred_lim,
689			      pko_mac_cfg.s.skid_max_cnt);
690		}
691	} /* for mac_num */
692
693	return 0;
694}
695
696/** Set MAC options
697 *
698 * The options supported are the parameters below:
699 *
700 * @param xiface The physical interface number
701 * @param index The physical sub-interface port
702 * @param fcs_enable Enable FCS generation
703 * @param pad_enable Enable padding to minimum packet size
704 * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
705 *
706 * The typical use for `fcs_sop_off` is when the interface is configured
707 * to use a header such as HighGig to precede every Ethernet packet,
708 * such a header usually does not partake in the CRC32 computation stream,
709 * and its size must be set with this parameter.
710 *
711 * @return Returns 0 on success, -1 if interface/port is invalid.
712 */
713int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
714				bool pad_enable, unsigned int fcs_sop_off)
715{
716	int mac_num;
717	cvmx_pko_macx_cfg_t pko_mac_cfg;
718	unsigned int fifo_num;
719	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
720
721	if (debug)
722		debug("%s: intf %u:%u/%u fcs=%d pad=%d\n", __func__, xi.node,
723		      xi.interface, index, fcs_enable, pad_enable);
724
725	mac_num = __cvmx_pko3_get_mac_num(xiface, index);
726	if (mac_num < 0) {
727		cvmx_printf("ERROR: %s: invalid interface %u:%u/%u\n", __func__,
728			    xi.node, xi.interface, index);
729		return -1;
730	}
731
732	pko_mac_cfg.u64 = csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num));
733
734	/* If MAC is not assigned, return an error */
735	if (pko_mac_cfg.s.fifo_num == 0x1f) {
736		cvmx_printf("ERROR: %s: unused interface %u:%u/%u\n", __func__,
737			    xi.node, xi.interface, index);
738		return -1;
739	}
740
741	if (pko_mac_cfg.s.min_pad_ena == pad_enable &&
742	    pko_mac_cfg.s.fcs_ena == fcs_enable) {
743		if (debug)
744			debug("%s: mac %#x unchanged\n", __func__, mac_num);
745		return 0;
746	}
747
748	/* WORKAROUND: Pass1 won't allow change any bits unless FIFO_NUM=0x1f */
749	fifo_num = pko_mac_cfg.s.fifo_num;
750	pko_mac_cfg.s.fifo_num = 0x1f;
751
752	pko_mac_cfg.s.min_pad_ena = pad_enable;
753	pko_mac_cfg.s.fcs_ena = fcs_enable;
754	pko_mac_cfg.s.fcs_sop_off = fcs_sop_off;
755
756	csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
757
758	pko_mac_cfg.s.fifo_num = fifo_num;
759	csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
760
761	if (debug)
762		debug("%s: PKO_MAC[%u]CFG=%#llx\n", __func__, mac_num,
763		      (unsigned long long)csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num)));
764
765	return 0;
766}
767
768/** Set Descriptor Queue options
769 *
770 * The `min_pad` parameter must be in agreement with the interface-level
771 * padding option for all descriptor queues assigned to that particular
772 * interface/port.
773 *
774 * @param node on which to operate
775 * @param dq descriptor queue to set
776 * @param min_pad minimum padding to set for dq
777 */
778void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad)
779{
780	cvmx_pko_pdm_dqx_minpad_t reg;
781
782	dq &= (1 << 10) - 1;
783	reg.u64 = csr_rd_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq));
784	reg.s.minpad = min_pad;
785	csr_wr_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq), reg.u64);
786}
787