1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * PKOv3 helper file
6 */
7
8#include <errno.h>
9#include <log.h>
10#include <time.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/cvmx-csr.h>
15#include <mach/cvmx-bootmem.h>
16#include <mach/octeon-model.h>
17#include <mach/cvmx-fuse.h>
18#include <mach/octeon-feature.h>
19#include <mach/cvmx-qlm.h>
20#include <mach/octeon_qlm.h>
21#include <mach/cvmx-pcie.h>
22#include <mach/cvmx-coremask.h>
23#include <mach/cvmx-range.h>
24#include <mach/cvmx-global-resources.h>
25
26#include <mach/cvmx-agl-defs.h>
27#include <mach/cvmx-bgxx-defs.h>
28#include <mach/cvmx-ciu-defs.h>
29#include <mach/cvmx-gmxx-defs.h>
30#include <mach/cvmx-gserx-defs.h>
31#include <mach/cvmx-ilk-defs.h>
32#include <mach/cvmx-ipd-defs.h>
33#include <mach/cvmx-pcsx-defs.h>
34#include <mach/cvmx-pcsxx-defs.h>
35#include <mach/cvmx-pki-defs.h>
36#include <mach/cvmx-pko-defs.h>
37#include <mach/cvmx-xcv-defs.h>
38
39#include <mach/cvmx-hwpko.h>
40#include <mach/cvmx-ilk.h>
41#include <mach/cvmx-ipd.h>
42#include <mach/cvmx-pki.h>
43#include <mach/cvmx-pko3.h>
44#include <mach/cvmx-pko3-queue.h>
45#include <mach/cvmx-pko3-resources.h>
46
47#include <mach/cvmx-helper.h>
48#include <mach/cvmx-helper-board.h>
49#include <mach/cvmx-helper-cfg.h>
50
51#include <mach/cvmx-helper-bgx.h>
52#include <mach/cvmx-helper-cfg.h>
53#include <mach/cvmx-helper-util.h>
54#include <mach/cvmx-helper-pki.h>
55
56/* channels are present at L2 queue level by default */
57static const enum cvmx_pko3_level_e cvmx_pko_default_channel_level =
58	CVMX_PKO_L2_QUEUES;
59
60static const int debug;
61
62static int __pko_pkt_budget, __pko_pkt_quota;
63
64/* These global variables are relevant for boot CPU only */
65static cvmx_fpa3_gaura_t __cvmx_pko3_aura[CVMX_MAX_NODES];
66
67/* This constant can not be modified, defined here for clarity only */
68#define CVMX_PKO3_POOL_BUFFER_SIZE 4096 /* 78XX PKO requires 4KB */
69
70/**
71 * @INTERNAL
72 *
73 * Build an owner tag based on interface/port
74 */
75static int __cvmx_helper_pko3_res_owner(int ipd_port)
76{
77	int res_owner;
78	const int res_owner_pfix = 0x19d0 << 14;
79
80	ipd_port &= 0x3fff; /* 12-bit for local CHAN_E value + node */
81
82	res_owner = res_owner_pfix | ipd_port;
83
84	return res_owner;
85}
86
87/**
88 * Configure an AURA/POOL designated for PKO internal use.
89 *
90 * This pool is used for (a) memory buffers that store PKO descriptor queues,
91 * (b) buffers for use with PKO_SEND_JUMP_S sub-header.
92 *
93 * The buffers of type (a) are never accessed by software, and their number
94 * should be at least equal to 4 times the number of descriptor queues
95 * in use.
96 *
97 * Type (b) buffers are consumed by PKO3 command-composition code,
98 * and are released by the hardware upon completion of transmission.
99 *
100 * @returns -1 if the pool could not be established or 12-bit AURA
101 * that includes the node number for use in PKO3 initialization call.
102 *
103 * NOTE: Linux kernel should pass its own aura to PKO3 initialization
104 * function so that the buffers can be mapped into kernel space
105 * for when software needs to adccess their contents.
106 *
107 */
108static int __cvmx_pko3_config_memory(unsigned int node)
109{
110	cvmx_fpa3_gaura_t aura;
111	int aura_num;
112	unsigned int buf_count;
113	bool small_mem;
114	int i, num_intf = 0;
115	const unsigned int pkt_per_buf =
116		(CVMX_PKO3_POOL_BUFFER_SIZE / sizeof(u64) / 16);
117	const unsigned int base_buf_count = 1024 * 4;
118
119	/* Simulator has limited memory, but uses one interface at a time */
120	//	small_mem = cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM;
121	small_mem = false;
122
123	/* Count the number of live interfaces */
124	for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
125		int xiface = cvmx_helper_node_interface_to_xiface(node, i);
126
127		if (CVMX_HELPER_INTERFACE_MODE_DISABLED !=
128		    cvmx_helper_interface_get_mode(xiface))
129			num_intf++;
130	}
131
132	buf_count = 1024;
133	__pko_pkt_quota = buf_count * pkt_per_buf;
134	__pko_pkt_budget = __pko_pkt_quota * num_intf;
135	(void)small_mem;
136	(void)base_buf_count;
137
138	if (debug)
139		debug("%s: Creating AURA with %u buffers for up to %d total packets, %d packets per interface\n",
140		      __func__, buf_count, __pko_pkt_budget, __pko_pkt_quota);
141
142	aura = cvmx_fpa3_setup_aura_and_pool(node, -1, "PKO3 AURA", NULL,
143					     CVMX_PKO3_POOL_BUFFER_SIZE,
144					     buf_count);
145
146	if (!__cvmx_fpa3_aura_valid(aura)) {
147		printf("ERROR: %s AURA create failed\n", __func__);
148		return -1;
149	}
150
151	aura_num = aura.node << 10 | aura.laura;
152
153	/* Store handle for destruction */
154	__cvmx_pko3_aura[node] = aura;
155
156	return aura_num;
157}
158
159/** Initialize a channelized port
160 * This is intended for LOOP, ILK and NPI interfaces which have one MAC
161 * per interface and need a channel per subinterface (e.g. ring).
162 * Each channel then may have 'num_queues' descriptor queues
163 * attached to it, which can also be prioritized or fair.
164 */
165static int __cvmx_pko3_config_chan_interface(int xiface, unsigned int num_chans,
166					     u8 num_queues, bool prioritized)
167{
168	int l1_q_num;
169	int l2_q_base;
170	enum cvmx_pko3_level_e level;
171	int res;
172	int parent_q, child_q;
173	unsigned int chan, dq;
174	int pko_mac_num;
175	u16 ipd_port;
176	int res_owner, prio;
177	unsigned int i;
178	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
179	unsigned int node = xi.node;
180	char b1[12];
181
182	if (num_queues == 0)
183		num_queues = 1;
184	if ((cvmx_pko3_num_level_queues(CVMX_PKO_DESCR_QUEUES) / num_chans) < 3)
185		num_queues = 1;
186
187	if (prioritized && num_queues > 1)
188		prio = num_queues;
189	else
190		prio = -1;
191
192	if (debug)
193		debug("%s: configuring xiface %u:%u with %u chans %u queues each\n",
194		      __func__, xi.node, xi.interface, num_chans, num_queues);
195
196	/* all channels all go to the same mac */
197	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, 0);
198	if (pko_mac_num < 0) {
199		printf("ERROR: %s: Invalid interface\n", __func__);
200		return -1;
201	}
202
203	/* Resources of all channels on this port have common owner */
204	ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
205
206	/* Build an identifiable owner */
207	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
208
209	/* Start configuration at L1/PQ */
210	level = CVMX_PKO_PORT_QUEUES;
211
212	/* Reserve port queue to make sure the MAC is not already configured */
213	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
214
215	if (l1_q_num < 0) {
216		printf("ERROR: %s: Reserving L1 PQ\n", __func__);
217		return -1;
218	}
219
220	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
221	if (res < 0) {
222		printf("ERROR: %s: Configuring L1 PQ\n", __func__);
223		return -1;
224	}
225
226	/* next queue level = L2/SQ */
227	level = __cvmx_pko3_sq_lvl_next(level);
228
229	/* allocate level 2 queues, one per channel */
230	l2_q_base =
231		cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
232	if (l2_q_base < 0) {
233		printf("ERROR: %s: allocation L2 SQ\n", __func__);
234		return -1;
235	}
236
237	/* Configre <num_chans> L2 children for PQ, non-prioritized */
238	res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
239					   num_chans, -1);
240
241	if (res < 0) {
242		printf("ERROR: %s: Failed channel queues\n", __func__);
243		return -1;
244	}
245
246	/* map channels to l2 queues */
247	for (chan = 0; chan < num_chans; chan++) {
248		ipd_port = cvmx_helper_get_ipd_port(xiface, chan);
249		cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + chan,
250				      ipd_port);
251	}
252
253	/* next queue level = L3/SQ */
254	level = __cvmx_pko3_sq_lvl_next(level);
255	parent_q = l2_q_base;
256
257	do {
258		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
259						num_chans);
260
261		if (child_q < 0) {
262			printf("ERROR: %s: allocating %s\n", __func__,
263			       __cvmx_pko3_sq_str(b1, level, child_q));
264			return -1;
265		}
266
267		for (i = 0; i < num_chans; i++) {
268			res = cvmx_pko3_sq_config_children(
269				node, level, parent_q + i, child_q + i, 1, 1);
270
271			if (res < 0) {
272				printf("ERROR: %s: configuring %s\n", __func__,
273				       __cvmx_pko3_sq_str(b1, level, child_q));
274				return -1;
275			}
276
277		} /* for i */
278
279		parent_q = child_q;
280		level = __cvmx_pko3_sq_lvl_next(level);
281
282		/* Terminate loop on DQ level, it has special handling */
283	} while (level != CVMX_PKO_DESCR_QUEUES &&
284		 level != CVMX_PKO_LEVEL_INVAL);
285
286	if (level != CVMX_PKO_DESCR_QUEUES) {
287		printf("ERROR: %s: level sequence error\n", __func__);
288		return -1;
289	}
290
291	/* Configure DQs, num_dqs per chan */
292	for (chan = 0; chan < num_chans; chan++) {
293		res = cvmx_pko_alloc_queues(node, level, res_owner, -1,
294					    num_queues);
295
296		if (res < 0)
297			goto _fail;
298		dq = res;
299
300		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0) && (dq & 7))
301			debug("WARNING: %s: DQ# %u not integral of 8\n",
302			      __func__, dq);
303
304		res = cvmx_pko3_sq_config_children(node, level, parent_q + chan,
305						   dq, num_queues, prio);
306		if (res < 0)
307			goto _fail;
308
309		/* register DQ range with the translation table */
310		res = __cvmx_pko3_ipd_dq_register(xiface, chan, dq, num_queues);
311		if (res < 0)
312			goto _fail;
313	}
314
315	return 0;
316_fail:
317	debug("ERROR: %s: configuring queues for xiface %u:%u chan %u\n",
318	      __func__, xi.node, xi.interface, i);
319	return -1;
320}
321
322/** Initialize a single Ethernet port with PFC-style channels
323 *
324 * One interface can contain multiple ports, this function is per-port
325 * Here, a physical port is allocated 8 logical channel, one per VLAN
326 * tag priority, one DQ is assigned to each channel, and all 8 DQs
327 * are registered for that IPD port.
328 * Note that the DQs are arrange such that the Ethernet QoS/PCP field
329 * can be used as an offset to the value returned by cvmx_pko_base_queue_get().
330 *
331 * For HighGig2 mode, 16 channels may be desired, instead of 8,
332 * but this function does not support that.
333 */
334static int __cvmx_pko3_config_pfc_interface(int xiface, unsigned int port)
335{
336	enum cvmx_pko3_level_e level;
337	int pko_mac_num;
338	int l1_q_num, l2_q_base;
339	int child_q, parent_q;
340	int dq_base;
341	int res;
342	const unsigned int num_chans = 8;
343	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
344	unsigned int node = xi.node;
345	u16 ipd_port;
346	int res_owner;
347	char b1[12];
348	unsigned int i;
349
350	if (debug)
351		debug("%s: configuring xiface %u:%u port %u with %u PFC channels\n",
352		      __func__, node, xi.interface, port, num_chans);
353
354	/* Get MAC number for the iface/port */
355	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, port);
356	if (pko_mac_num < 0) {
357		printf("ERROR: %s: Invalid interface\n", __func__);
358		return -1;
359	}
360
361	ipd_port = cvmx_helper_get_ipd_port(xiface, port);
362
363	/* Build an identifiable owner identifier */
364	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
365
366	level = CVMX_PKO_PORT_QUEUES;
367
368	/* Allocate port queue to make sure the MAC is not already configured */
369	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
370
371	if (l1_q_num < 0) {
372		printf("ERROR: %s: allocation L1 PQ\n", __func__);
373		return -1;
374	}
375
376	res = cvmx_pko3_pq_config(xi.node, pko_mac_num, l1_q_num);
377	if (res < 0) {
378		printf("ERROR: %s: Configuring %s\n", __func__,
379		       __cvmx_pko3_sq_str(b1, level, l1_q_num));
380		return -1;
381	}
382
383	/* Determine the next queue level */
384	level = __cvmx_pko3_sq_lvl_next(level);
385
386	/* Allocate 'num_chans' L2 queues, one per channel */
387	l2_q_base =
388		cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
389	if (l2_q_base < 0) {
390		printf("ERROR: %s: allocation L2 SQ\n", __func__);
391		return -1;
392	}
393
394	/* Configre <num_chans> L2 children for PQ, with static priority */
395	res = cvmx_pko3_sq_config_children(node, level, l1_q_num, l2_q_base,
396					   num_chans, num_chans);
397
398	if (res < 0) {
399		printf("ERROR: %s: Configuring %s for PFC\n", __func__,
400		       __cvmx_pko3_sq_str(b1, level, l1_q_num));
401		return -1;
402	}
403
404	/* Map each of the allocated channels */
405	for (i = 0; i < num_chans; i++) {
406		u16 chan;
407
408		/* Get CHAN_E value for this PFC channel, PCP in low 3 bits */
409		chan = ipd_port | cvmx_helper_prio2qos(i);
410
411		cvmx_pko3_map_channel(node, l1_q_num, l2_q_base + i, chan);
412	}
413
414	/* Iterate through the levels until DQ and allocate 'num_chans'
415	 * consecutive queues at each level and hook them up
416	 * one-to-one with the parent level queues
417	 */
418
419	parent_q = l2_q_base;
420	level = __cvmx_pko3_sq_lvl_next(level);
421
422	do {
423		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1,
424						num_chans);
425
426		if (child_q < 0) {
427			printf("ERROR: %s: allocating %s\n", __func__,
428			       __cvmx_pko3_sq_str(b1, level, child_q));
429			return -1;
430		}
431
432		for (i = 0; i < num_chans; i++) {
433			res = cvmx_pko3_sq_config_children(
434				node, level, parent_q + i, child_q + i, 1, 1);
435
436			if (res < 0) {
437				printf("ERROR: %s: configuring %s\n", __func__,
438				       __cvmx_pko3_sq_str(b1, level, child_q));
439				return -1;
440			}
441
442		} /* for i */
443
444		parent_q = child_q;
445		level = __cvmx_pko3_sq_lvl_next(level);
446
447		/* Terminate loop on DQ level, it has special handling */
448	} while (level != CVMX_PKO_DESCR_QUEUES &&
449		 level != CVMX_PKO_LEVEL_INVAL);
450
451	if (level != CVMX_PKO_DESCR_QUEUES) {
452		printf("ERROR: %s: level sequence error\n", __func__);
453		return -1;
454	}
455
456	dq_base = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_chans);
457	if (dq_base < 0) {
458		printf("ERROR: %s: allocating %s\n", __func__,
459		       __cvmx_pko3_sq_str(b1, level, dq_base));
460		return -1;
461	}
462
463	/* Configure DQs in QoS order, so that QoS/PCP can be index */
464	for (i = 0; i < num_chans; i++) {
465		int dq_num = dq_base + cvmx_helper_prio2qos(i);
466
467		res = cvmx_pko3_sq_config_children(node, level, parent_q + i,
468						   dq_num, 1, 1);
469		if (res < 0) {
470			printf("ERROR: %s: configuring %s\n", __func__,
471			       __cvmx_pko3_sq_str(b1, level, dq_num));
472			return -1;
473		}
474	}
475
476	/* register entire DQ range with the IPD translation table */
477	__cvmx_pko3_ipd_dq_register(xiface, port, dq_base, num_chans);
478
479	return 0;
480}
481
482/**
483 * Initialize a simple interface with a a given number of
484 * fair or prioritized queues.
485 * This function will assign one channel per sub-interface.
486 */
487int __cvmx_pko3_config_gen_interface(int xiface, uint8_t subif, u8 num_queues,
488				     bool prioritized)
489{
490	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
491	u8 node = xi.node;
492	int l1_q_num;
493	int parent_q, child_q;
494	int dq;
495	int res, res_owner;
496	int pko_mac_num;
497	enum cvmx_pko3_level_e level;
498	u16 ipd_port;
499	int static_pri;
500	char b1[12];
501
502	num_queues = 1;
503
504	if (num_queues == 0) {
505		num_queues = 1;
506		printf("WARNING: %s: xiface %#x misconfigured\n", __func__,
507		       xiface);
508	}
509
510	/* Configure DQs relative priority (a.k.a. scheduling) */
511	if (prioritized) {
512		/* With 8 queues or fewer, use static priority, else WRR */
513		static_pri = (num_queues < 9) ? num_queues : 0;
514	} else {
515		/* Set equal-RR scheduling among queues */
516		static_pri = -1;
517	}
518
519	if (debug)
520		debug("%s: configuring xiface %u:%u/%u nq=%u %s\n", __func__,
521		      xi.node, xi.interface, subif, num_queues,
522		      (prioritized) ? "qos" : "fair");
523
524	/* Get MAC number for the iface/port */
525	pko_mac_num = __cvmx_pko3_get_mac_num(xiface, subif);
526	if (pko_mac_num < 0) {
527		printf("ERROR: %s: Invalid interface %u:%u\n", __func__,
528		       xi.node, xi.interface);
529		return -1;
530	}
531
532	ipd_port = cvmx_helper_get_ipd_port(xiface, subif);
533
534	if (debug)
535		debug("%s: xiface %u:%u/%u ipd_port=%#03x\n", __func__, xi.node,
536		      xi.interface, subif, ipd_port);
537
538	/* Build an identifiable owner identifier */
539	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
540
541	level = CVMX_PKO_PORT_QUEUES;
542
543	/* Reserve port queue to make sure the MAC is not already configured */
544	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
545
546	if (l1_q_num < 0) {
547		printf("ERROR %s: xiface %u:%u/%u failed allocation L1 PQ\n",
548		       __func__, xi.node, xi.interface, subif);
549		return -1;
550	}
551
552	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
553	if (res < 0) {
554		printf("ERROR %s: Configuring L1 PQ\n", __func__);
555		return -1;
556	}
557
558	parent_q = l1_q_num;
559
560	/* Determine the next queue level */
561	level = __cvmx_pko3_sq_lvl_next(level);
562
563	/* Simply chain queues 1-to-1 from L2 to one before DQ level */
564	do {
565		/* allocate next level queue */
566		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
567
568		if (child_q < 0) {
569			printf("ERROR: %s: allocating %s\n", __func__,
570			       __cvmx_pko3_sq_str(b1, level, child_q));
571			return -1;
572		}
573
574		/* Configre newly allocated queue */
575		res = cvmx_pko3_sq_config_children(node, level, parent_q,
576						   child_q, 1, 1);
577
578		if (res < 0) {
579			printf("ERROR: %s: configuring %s\n", __func__,
580			       __cvmx_pko3_sq_str(b1, level, child_q));
581			return -1;
582		}
583
584		/* map IPD/channel to L2/L3 queues */
585		if (level == cvmx_pko_default_channel_level)
586			cvmx_pko3_map_channel(node, l1_q_num, child_q,
587					      ipd_port);
588
589		/* Prepare for next level */
590		level = __cvmx_pko3_sq_lvl_next(level);
591		parent_q = child_q;
592
593		/* Terminate loop on DQ level, it has special handling */
594	} while (level != CVMX_PKO_DESCR_QUEUES &&
595		 level != CVMX_PKO_LEVEL_INVAL);
596
597	if (level != CVMX_PKO_DESCR_QUEUES) {
598		printf("ERROR: %s: level sequence error\n", __func__);
599		return -1;
600	}
601
602	/* Allocate descriptor queues for the port */
603	dq = cvmx_pko_alloc_queues(node, level, res_owner, -1, num_queues);
604	if (dq < 0) {
605		printf("ERROR: %s: could not reserve DQs\n", __func__);
606		return -1;
607	}
608
609	res = cvmx_pko3_sq_config_children(node, level, parent_q, dq,
610					   num_queues, static_pri);
611	if (res < 0) {
612		printf("ERROR: %s: configuring %s\n", __func__,
613		       __cvmx_pko3_sq_str(b1, level, dq));
614		return -1;
615	}
616
617	/* register DQ/IPD translation */
618	__cvmx_pko3_ipd_dq_register(xiface, subif, dq, num_queues);
619
620	if (debug)
621		debug("%s: xiface %u:%u/%u qs %u-%u\n", __func__, xi.node,
622		      xi.interface, subif, dq, dq + num_queues - 1);
623	return 0;
624}
625
626/** Initialize the NULL interface
627 *
628 * A NULL interface is a special case in that it is not
629 * one of the enumerated interfaces in the system, and does
630 * not apply to input either. Still, it can be very handy
631 * for dealing with packets that should be discarded in
632 * a generic, streamlined way.
633 *
634 * The Descriptor Queue 0 will be reserved for the NULL interface
635 * and the normalized (i.e. IPD) port number has the all-ones value.
636 */
637static int __cvmx_pko3_config_null_interface(unsigned int node)
638{
639	int l1_q_num;
640	int parent_q, child_q;
641	enum cvmx_pko3_level_e level;
642	int i, res, res_owner;
643	int xiface, ipd_port;
644	int num_dq = 1;	  /* # of DQs for NULL */
645	const int dq = 0; /* Reserve DQ#0 for NULL */
646	char pko_mac_num;
647	char b1[12];
648
649	if (OCTEON_IS_MODEL(OCTEON_CN78XX))
650		pko_mac_num = 0x1C; /* MAC# 28 virtual MAC for NULL */
651	else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
652		pko_mac_num = 0x0F; /* MAC# 16 virtual MAC for NULL */
653	else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
654		pko_mac_num = 0x0A; /* MAC# 10 virtual MAC for NULL */
655	else
656		return -1;
657
658	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
659		num_dq = 8;
660
661	if (debug)
662		debug("%s: null iface dq=%u-%u\n", __func__, dq,
663		      dq + num_dq - 1);
664
665	ipd_port = cvmx_helper_node_to_ipd_port(node, CVMX_PKO3_IPD_PORT_NULL);
666
667	/* Build an identifiable owner identifier by MAC# for easy release */
668	res_owner = __cvmx_helper_pko3_res_owner(ipd_port);
669	if (res_owner < 0) {
670		debug("%s: ERROR Invalid interface\n", __func__);
671		return -1;
672	}
673
674	level = CVMX_PKO_PORT_QUEUES;
675
676	/* Allocate a port queue */
677	l1_q_num = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
678
679	if (l1_q_num < 0) {
680		debug("%s: ERROR reserving L1 SQ\n", __func__);
681		return -1;
682	}
683
684	res = cvmx_pko3_pq_config(node, pko_mac_num, l1_q_num);
685	if (res < 0) {
686		printf("ERROR: %s: PQ/L1 queue configuration\n", __func__);
687		return -1;
688	}
689
690	parent_q = l1_q_num;
691
692	/* Determine the next queue level */
693	level = __cvmx_pko3_sq_lvl_next(level);
694
695	/* Simply chain queues 1-to-1 from L2 to one before DQ level */
696	do {
697		/* allocate next level queue */
698		child_q = cvmx_pko_alloc_queues(node, level, res_owner, -1, 1);
699
700		if (child_q < 0) {
701			printf("ERROR: %s: allocating %s\n", __func__,
702			       __cvmx_pko3_sq_str(b1, level, child_q));
703			return -1;
704		}
705
706		/* Configre newly allocated queue */
707		res = cvmx_pko3_sq_config_children(node, level, parent_q,
708						   child_q, 1, 1);
709
710		if (res < 0) {
711			printf("ERROR: %s: configuring %s\n", __func__,
712			       __cvmx_pko3_sq_str(b1, level, child_q));
713			return -1;
714		}
715
716		/* Prepare for next level */
717		level = __cvmx_pko3_sq_lvl_next(level);
718		parent_q = child_q;
719
720		/* Terminate loop on DQ level, it has special handling */
721	} while (level != CVMX_PKO_DESCR_QUEUES &&
722		 level != CVMX_PKO_LEVEL_INVAL);
723
724	if (level != CVMX_PKO_DESCR_QUEUES) {
725		printf("ERROR: %s: level sequence error\n", __func__);
726		return -1;
727	}
728
729	/* Reserve 'num_dq' DQ's at 0 by convention */
730	res = cvmx_pko_alloc_queues(node, level, res_owner, dq, num_dq);
731	if (dq != res) {
732		debug("%s: ERROR: could not reserve DQs\n", __func__);
733		return -1;
734	}
735
736	res = cvmx_pko3_sq_config_children(node, level, parent_q, dq, num_dq,
737					   num_dq);
738	if (res < 0) {
739		printf("ERROR: %s: configuring %s\n", __func__,
740		       __cvmx_pko3_sq_str(b1, level, dq));
741		return -1;
742	}
743
744	/* NULL interface does not need to map to a CHAN_E */
745
746	/* register DQ/IPD translation */
747	xiface = cvmx_helper_node_interface_to_xiface(node, __CVMX_XIFACE_NULL);
748	__cvmx_pko3_ipd_dq_register(xiface, 0, dq, num_dq);
749
750	/* open the null DQs here */
751	for (i = 0; i < num_dq; i++) {
752		unsigned int limit = 128; /* NULL never really uses much */
753
754		cvmx_pko_dq_open(node, dq + i);
755		cvmx_pko3_dq_set_limit(node, dq + i, limit);
756	}
757
758	return 0;
759}
760
761/** Open all descriptor queues belonging to an interface/port
762 * @INTERNAL
763 */
764int __cvmx_pko3_helper_dqs_activate(int xiface, int index, bool min_pad)
765{
766	int ipd_port, dq_base, dq_count, i;
767	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
768	unsigned int limit;
769
770	/* Get local IPD port for the interface */
771	ipd_port = cvmx_helper_get_ipd_port(xiface, index);
772	if (ipd_port < 0) {
773		printf("ERROR: %s: No IPD port for interface %d port %d\n",
774		       __func__, xiface, index);
775		return -1;
776	}
777
778	/* Get DQ# range for the IPD port */
779	dq_base = cvmx_pko3_get_queue_base(ipd_port);
780	dq_count = cvmx_pko3_get_queue_num(ipd_port);
781	if (dq_base < 0 || dq_count <= 0) {
782		printf("ERROR: %s: No descriptor queues for interface %d port %d\n",
783		       __func__, xiface, index);
784		return -1;
785	}
786
787	/* Mask out node from global DQ# */
788	dq_base &= (1 << 10) - 1;
789
790	limit = __pko_pkt_quota / dq_count /
791		cvmx_helper_interface_enumerate(xiface);
792
793	for (i = 0; i < dq_count; i++) {
794		/* FIXME: 2ms at 1Gbps max packet rate, make speed dependent */
795		cvmx_pko_dq_open(xi.node, dq_base + i);
796		cvmx_pko3_dq_options(xi.node, dq_base + i, min_pad);
797
798		if (debug)
799			debug("%s: DQ%u limit %d\n", __func__, dq_base + i,
800			      limit);
801
802		cvmx_pko3_dq_set_limit(xi.node, dq_base + i, limit);
803		__pko_pkt_budget -= limit;
804	}
805
806	if (__pko_pkt_budget < 0)
807		printf("WARNING: %s: PKO buffer deficit %d\n", __func__,
808		       __pko_pkt_budget);
809	else if (debug)
810		debug("%s: PKO remaining packet budget: %d\n", __func__,
811		      __pko_pkt_budget);
812
813	return i;
814}
815
816/** Configure and initialize PKO3 for an interface
817 *
818 * @param xiface is the interface number to configure
819 * @return 0 on success.
820 */
821int cvmx_helper_pko3_init_interface(int xiface)
822{
823	cvmx_helper_interface_mode_t mode;
824	int node, iface, subif, num_ports;
825	bool fcs_enable, pad_enable, pad_enable_pko;
826	u8 fcs_sof_off = 0;
827	u8 num_queues = 1;
828	bool qos = false, pfc = false;
829	int res = -1;
830	cvmx_xiface_t xi = cvmx_helper_xiface_to_node_interface(xiface);
831
832	node = xi.node;
833	iface = xi.interface;
834	mode = cvmx_helper_interface_get_mode(xiface);
835	num_ports = cvmx_helper_interface_enumerate(xiface);
836	subif = 0;
837
838	if ((unsigned int)iface <
839	    NUM_ELEMENTS(__cvmx_pko_queue_static_config[node].pknd.pko_cfg_iface)) {
840		pfc = __cvmx_pko_queue_static_config[node]
841			      .pknd.pko_cfg_iface[iface]
842			      .pfc_enable;
843		num_queues = __cvmx_pko_queue_static_config[node]
844				     .pknd.pko_cfg_iface[iface]
845				     .queues_per_port;
846		qos = __cvmx_pko_queue_static_config[node]
847			      .pknd.pko_cfg_iface[iface]
848			      .qos_enable;
849	}
850
851	/* Force 8 DQs per port for pass 1.0 to circumvent limitations */
852	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
853		num_queues = 8;
854
855	/* For ILK there is one IPD port per channel */
856	if (mode == CVMX_HELPER_INTERFACE_MODE_ILK)
857		num_ports = __cvmx_helper_ilk_enumerate(xiface);
858
859	/* Skip non-existent interfaces */
860	if (num_ports < 1) {
861		debug("ERROR: %s: invalid iface %u:%u\n", __func__, node,
862		      iface);
863		return -1;
864	}
865
866	if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
867		num_queues = __cvmx_pko_queue_static_config[node]
868				     .pknd.pko_cfg_loop.queues_per_port;
869		qos = __cvmx_pko_queue_static_config[node]
870			      .pknd.pko_cfg_loop.qos_enable;
871
872		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
873			num_queues = 8;
874
875		res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
876							num_queues, qos);
877		if (res < 0)
878			goto __cfg_error;
879	} else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
880		num_queues = __cvmx_pko_queue_static_config[node]
881				     .pknd.pko_cfg_npi.queues_per_port;
882		qos = __cvmx_pko_queue_static_config[node]
883			      .pknd.pko_cfg_npi.qos_enable;
884
885		if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
886			num_queues = 8;
887
888		res = __cvmx_pko3_config_chan_interface(xiface, num_ports,
889							num_queues, qos);
890		if (res < 0)
891			goto __cfg_error;
892	}
893	/* ILK-specific queue configuration */
894	else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK) {
895		unsigned int num_chans = __cvmx_helper_ilk_enumerate(xiface);
896
897		num_queues = 8;
898		qos = true;
899		pfc = false;
900
901		if (num_chans >= 128)
902			num_queues = 1;
903		else if (num_chans >= 64)
904			num_queues = 2;
905		else if (num_chans >= 32)
906			num_queues = 4;
907		else
908			num_queues = 8;
909
910		res = __cvmx_pko3_config_chan_interface(xiface, num_chans,
911							num_queues, qos);
912	}
913	/* Setup all ethernet configured for PFC */
914	else if (pfc) {
915		/* PFC interfaces have 8 prioritized queues */
916		for (subif = 0; subif < num_ports; subif++) {
917			res = __cvmx_pko3_config_pfc_interface(xiface, subif);
918			if (res < 0)
919				goto __cfg_error;
920
921			/* Enable PFC/CBFC on BGX */
922			__cvmx_helper_bgx_xaui_config_pfc(node, iface, subif,
923							  true);
924		}
925	} else {
926		/* All other interfaces follow static configuration */
927		for (subif = 0; subif < num_ports; subif++) {
928			res = __cvmx_pko3_config_gen_interface(xiface, subif,
929							       num_queues, qos);
930			if (res < 0)
931				goto __cfg_error;
932		}
933	}
934
935	fcs_enable = __cvmx_helper_get_has_fcs(xiface);
936	pad_enable = __cvmx_helper_get_pko_padding(xiface);
937
938	/* Do not use PKO PAD/FCS generation on o78p1.x on BGX interfaces */
939	if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
940		pad_enable_pko = false;
941	else
942		pad_enable_pko = pad_enable;
943
944	if (debug)
945		debug("%s: iface %u:%u FCS=%d pad=%d pko=%d\n", __func__, node,
946		      iface, fcs_enable, pad_enable, pad_enable_pko);
947
948	/* Setup interface options */
949	for (subif = 0; subif < num_ports; subif++) {
950		/* Open interface/port DQs to allow transmission to begin */
951		res = __cvmx_pko3_helper_dqs_activate(xiface, subif,
952						      pad_enable_pko);
953
954		if (res < 0)
955			goto __cfg_error;
956
957		/* ILK has only one MAC, subif == logical-channel */
958		if (mode == CVMX_HELPER_INTERFACE_MODE_ILK && subif > 0)
959			continue;
960
961		/* LOOP has only one MAC, subif == logical-channel */
962		if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP && subif > 0)
963			continue;
964
965		/* NPI has only one MAC, subif == 'ring' */
966		if (mode == CVMX_HELPER_INTERFACE_MODE_NPI && subif > 0)
967			continue;
968
969		/* for sRIO there is 16 byte sRIO header, outside of FCS */
970		if (mode == CVMX_HELPER_INTERFACE_MODE_SRIO)
971			fcs_sof_off = 16;
972
973		if (iface >= CVMX_HELPER_MAX_GMX) {
974			/* Non-BGX interface, use PKO for FCS/PAD */
975			res = cvmx_pko3_interface_options(xiface, subif,
976							  fcs_enable,
977							  pad_enable_pko,
978							  fcs_sof_off);
979		} else if (pad_enable == pad_enable_pko) {
980			/* BGX interface: FCS/PAD done by PKO */
981			res = cvmx_pko3_interface_options(xiface, subif,
982							  fcs_enable,
983							  pad_enable,
984							  fcs_sof_off);
985			cvmx_helper_bgx_tx_options(node, iface, subif, false,
986						   false);
987		} else {
988			/* BGX interface: FCS/PAD done by BGX */
989			res = cvmx_pko3_interface_options(xiface, subif, false,
990							  false, fcs_sof_off);
991			cvmx_helper_bgx_tx_options(node, iface, subif,
992						   fcs_enable, pad_enable);
993		}
994
995		if (res < 0)
996			debug("WARNING: %s: option set failed on iface %u:%u/%u\n",
997			      __func__, node, iface, subif);
998		if (debug)
999			debug("%s: face %u:%u/%u fifo size %d\n", __func__,
1000			      node, iface, subif,
1001			      cvmx_pko3_port_fifo_size(xiface, subif));
1002	}
1003	return 0;
1004
1005__cfg_error:
1006	debug("ERROR: %s: failed on iface %u:%u/%u\n", __func__, node, iface,
1007	      subif);
1008	return -1;
1009}
1010
1011/**
1012 * Global initialization for PKO3
1013 *
1014 * Should only be called once on each node
1015 *
1016 * TBD: Resolve the kernel case.
1017 * When Linux eats up the entire memory, bootmem will be unable to
1018 * satisfy our request, and the memory needs to come from Linux free pages.
1019 */
1020int __cvmx_helper_pko3_init_global(unsigned int node, uint16_t gaura)
1021{
1022	int res;
1023
1024	res = cvmx_pko3_hw_init_global(node, gaura);
1025	if (res < 0) {
1026		debug("ERROR: %s:failed block initialization\n", __func__);
1027		return res;
1028	}
1029
1030	/* configure channel level */
1031	cvmx_pko3_channel_credit_level(node, cvmx_pko_default_channel_level);
1032
1033	/* add NULL MAC/DQ setup */
1034	res = __cvmx_pko3_config_null_interface(node);
1035	if (res < 0)
1036		debug("ERROR: %s: creating NULL interface\n", __func__);
1037
1038	return res;
1039}
1040
1041/**
1042 * Global initialization for PKO3
1043 *
1044 * Should only be called once on each node
1045 *
1046 * When Linux eats up the entire memory, bootmem will be unable to
1047 * satisfy our request, and the memory needs to come from Linux free pages.
1048 */
1049int cvmx_helper_pko3_init_global(unsigned int node)
1050{
1051	void *ptr;
1052	int res = -1;
1053	unsigned int aura_num = ~0;
1054	cvmx_fpa3_gaura_t aura;
1055
1056	/* Allocate memory required by PKO3 */
1057	res = __cvmx_pko3_config_memory(node);
1058	if (res < 0) {
1059		debug("ERROR: %s: PKO3 memory allocation error\n", __func__);
1060		return res;
1061	}
1062
1063	aura_num = res;
1064	aura = __cvmx_pko3_aura[node];
1065
1066	/* Exercise the FPA to make sure the AURA is functional */
1067	ptr = cvmx_fpa3_alloc(aura);
1068
1069	if (!ptr) {
1070		res = -1;
1071	} else {
1072		cvmx_fpa3_free_nosync(ptr, aura, 0);
1073		res = 0;
1074	}
1075
1076	if (res < 0) {
1077		debug("ERROR: %s: FPA failure AURA=%u:%d\n", __func__,
1078		      aura.node, aura.laura);
1079		return -1;
1080	}
1081
1082	res = __cvmx_helper_pko3_init_global(node, aura_num);
1083
1084	if (res < 0)
1085		debug("ERROR: %s: failed to start PPKO\n", __func__);
1086
1087	return res;
1088}
1089