1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#ifndef ECORE_INIT_H
33#define ECORE_INIT_H
34
35/* Init operation types and structures */
36enum {
37	OP_RD = 0x1,	/* read a single register */
38	OP_WR,		/* write a single register */
39	OP_SW,		/* copy a string to the device */
40	OP_ZR,		/* clear memory */
41	OP_ZP,		/* unzip then copy with DMAE */
42	OP_WR_64,	/* write 64 bit pattern */
43	OP_WB,		/* copy a string using DMAE */
44#ifndef FW_ZIP_SUPPORT
45	OP_FW,		/* copy an array from fw data (only used with unzipped FW) */
46#endif
47	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
48	OP_IF_MODE_OR,  /* Skip the following ops if all init modes don't match */
49	OP_IF_MODE_AND, /* Skip the following ops if any init modes don't match */
50	OP_IF_PHASE,
51	OP_RT,
52	OP_DELAY,
53	OP_VERIFY,
54	OP_MAX
55};
56
57enum {
58	STAGE_START,
59	STAGE_END,
60};
61
62/* Returns the index of start or end of a specific block stage in ops array*/
63#define BLOCK_OPS_IDX(block, stage, end) \
64	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
65
66
67/* structs for the various opcodes */
68struct raw_op {
69	uint32_t op:8;
70	uint32_t offset:24;
71	uint32_t raw_data;
72};
73
74struct op_read {
75	uint32_t op:8;
76	uint32_t offset:24;
77	uint32_t val;
78};
79
80struct op_write {
81	uint32_t op:8;
82	uint32_t offset:24;
83	uint32_t val;
84};
85
86struct op_arr_write {
87	uint32_t op:8;
88	uint32_t offset:24;
89#ifdef __BIG_ENDIAN
90	uint16_t data_len;
91	uint16_t data_off;
92#else /* __LITTLE_ENDIAN */
93	uint16_t data_off;
94	uint16_t data_len;
95#endif
96};
97
98struct op_zero {
99	uint32_t op:8;
100	uint32_t offset:24;
101	uint32_t len;
102};
103
104struct op_if_mode {
105	uint32_t op:8;
106	uint32_t cmd_offset:24;
107	uint32_t mode_bit_map;
108};
109
110struct op_if_phase {
111	uint32_t op:8;
112	uint32_t cmd_offset:24;
113	uint32_t phase_bit_map;
114};
115
116struct op_delay {
117	uint32_t op:8;
118	uint32_t reserved:24;
119	uint32_t delay;
120};
121
122union init_op {
123	struct op_read		read;
124	struct op_write		write;
125	struct op_arr_write	arr_wr;
126	struct op_zero		zero;
127	struct raw_op		raw;
128	struct op_if_mode	if_mode;
129	struct op_if_phase	if_phase;
130	struct op_delay		delay;
131};
132
133
134/* Init Phases */
135enum {
136	PHASE_COMMON,
137	PHASE_PORT0,
138	PHASE_PORT1,
139	PHASE_PF0,
140	PHASE_PF1,
141	PHASE_PF2,
142	PHASE_PF3,
143	PHASE_PF4,
144	PHASE_PF5,
145	PHASE_PF6,
146	PHASE_PF7,
147	NUM_OF_INIT_PHASES
148};
149
150/* Init Modes */
151enum {
152	MODE_ASIC                      = 0x00000001,
153	MODE_FPGA                      = 0x00000002,
154	MODE_EMUL                      = 0x00000004,
155	MODE_E2                        = 0x00000008,
156	MODE_E3                        = 0x00000010,
157	MODE_PORT2                     = 0x00000020,
158	MODE_PORT4                     = 0x00000040,
159	MODE_SF                        = 0x00000080,
160	MODE_MF                        = 0x00000100,
161	MODE_MF_SD                     = 0x00000200,
162	MODE_MF_SI                     = 0x00000400,
163	MODE_MF_AFEX                   = 0x00000800,
164	MODE_E3_A0                     = 0x00001000,
165	MODE_E3_B0                     = 0x00002000,
166	MODE_COS3                      = 0x00004000,
167	MODE_COS6                      = 0x00008000,
168	MODE_LITTLE_ENDIAN             = 0x00010000,
169	MODE_BIG_ENDIAN                = 0x00020000,
170};
171
172/* Init Blocks */
173enum {
174	BLOCK_ATC,
175	BLOCK_BRB1,
176	BLOCK_CCM,
177	BLOCK_CDU,
178	BLOCK_CFC,
179	BLOCK_CSDM,
180	BLOCK_CSEM,
181	BLOCK_DBG,
182	BLOCK_DMAE,
183	BLOCK_DORQ,
184	BLOCK_HC,
185	BLOCK_IGU,
186	BLOCK_MISC,
187	BLOCK_NIG,
188	BLOCK_PBF,
189	BLOCK_PGLUE_B,
190	BLOCK_PRS,
191	BLOCK_PXP2,
192	BLOCK_PXP,
193	BLOCK_QM,
194	BLOCK_SRC,
195	BLOCK_TCM,
196	BLOCK_TM,
197	BLOCK_TSDM,
198	BLOCK_TSEM,
199	BLOCK_UCM,
200	BLOCK_UPB,
201	BLOCK_USDM,
202	BLOCK_USEM,
203	BLOCK_XCM,
204	BLOCK_XPB,
205	BLOCK_XSDM,
206	BLOCK_XSEM,
207	BLOCK_MISC_AEU,
208	NUM_OF_INIT_BLOCKS
209};
210
211
212
213
214
215
216
217
218/* Vnics per mode */
219#define ECORE_PORT2_MODE_NUM_VNICS 4
220
221
222/* QM queue numbers */
223#define ECORE_ETH_Q		0
224#define ECORE_TOE_Q		3
225#define ECORE_TOE_ACK_Q		6
226#define ECORE_ISCSI_Q		9
227#define ECORE_ISCSI_ACK_Q	11
228#define ECORE_FCOE_Q		10
229
230/* Vnics per mode */
231#define ECORE_PORT4_MODE_NUM_VNICS 2
232
233/* COS offset for port1 in E3 B0 4port mode */
234#define ECORE_E3B0_PORT1_COS_OFFSET 3
235
236/* QM Register addresses */
237#define ECORE_Q_VOQ_REG_ADDR(pf_q_num)\
238	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
239#define ECORE_VOQ_Q_REG_ADDR(cos, pf_q_num)\
240	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
241#define ECORE_Q_CMDQ_REG_ADDR(pf_q_num)\
242	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
243
244/* extracts the QM queue number for the specified port and vnic */
245#define ECORE_PF_Q_NUM(q_num, port, vnic)\
246	((((port) << 1) | (vnic)) * 16 + (q_num))
247
248
249/* Maps the specified queue to the specified COS */
250static inline void ecore_map_q_cos(struct bxe_softc *sc, uint32_t q_num, uint32_t new_cos)
251{
252	/* find current COS mapping */
253	uint32_t curr_cos = REG_RD(sc, QM_REG_QVOQIDX_0 + q_num * 4);
254
255	/* check if queue->COS mapping has changed */
256	if (curr_cos != new_cos) {
257		uint32_t num_vnics = ECORE_PORT2_MODE_NUM_VNICS;
258		uint32_t reg_addr, reg_bit_map, vnic;
259
260		/* update parameters for 4port mode */
261		if (INIT_MODE_FLAGS(sc) & MODE_PORT4) {
262			num_vnics = ECORE_PORT4_MODE_NUM_VNICS;
263			if (PORT_ID(sc)) {
264				curr_cos += ECORE_E3B0_PORT1_COS_OFFSET;
265				new_cos += ECORE_E3B0_PORT1_COS_OFFSET;
266			}
267		}
268
269		/* change queue mapping for each VNIC */
270		for (vnic = 0; vnic < num_vnics; vnic++) {
271			uint32_t pf_q_num =
272				ECORE_PF_Q_NUM(q_num, PORT_ID(sc), vnic);
273			uint32_t q_bit_map = 1 << (pf_q_num & 0x1f);
274
275			/* overwrite queue->VOQ mapping */
276			REG_WR(sc, ECORE_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
277
278			/* clear queue bit from current COS bit map */
279			reg_addr = ECORE_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
280			reg_bit_map = REG_RD(sc, reg_addr);
281			REG_WR(sc, reg_addr, reg_bit_map & (~q_bit_map));
282
283			/* set queue bit in new COS bit map */
284			reg_addr = ECORE_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
285			reg_bit_map = REG_RD(sc, reg_addr);
286			REG_WR(sc, reg_addr, reg_bit_map | q_bit_map);
287
288			/* set/clear queue bit in command-queue bit map
289			(E2/E3A0 only, valid COS values are 0/1) */
290			if (!(INIT_MODE_FLAGS(sc) & MODE_E3_B0)) {
291				reg_addr = ECORE_Q_CMDQ_REG_ADDR(pf_q_num);
292				reg_bit_map = REG_RD(sc, reg_addr);
293				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
294				reg_bit_map = new_cos ?
295					      (reg_bit_map | q_bit_map) :
296					      (reg_bit_map & (~q_bit_map));
297				REG_WR(sc, reg_addr, reg_bit_map);
298			}
299		}
300	}
301}
302
303/* Configures the QM according to the specified per-traffic-type COSes */
304static inline void ecore_dcb_config_qm(struct bxe_softc *sc, enum cos_mode mode,
305				       struct priority_cos *traffic_cos)
306{
307	ecore_map_q_cos(sc, ECORE_FCOE_Q,
308			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
309	ecore_map_q_cos(sc, ECORE_ISCSI_Q,
310			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
311	ecore_map_q_cos(sc, ECORE_ISCSI_ACK_Q,
312		traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
313	if (mode != STATIC_COS) {
314		/* required only in OVERRIDE_COS mode */
315		ecore_map_q_cos(sc, ECORE_ETH_Q,
316				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
317		ecore_map_q_cos(sc, ECORE_TOE_Q,
318				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
319		ecore_map_q_cos(sc, ECORE_TOE_ACK_Q,
320				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
321	}
322}
323
324
325/*
326 * congestion management port init api description
327 * the api works as follows:
328 * the driver should pass the cmng_init_input struct, the port_init function
329 * will prepare the required internal ram structure which will be passed back
330 * to the driver (cmng_init) that will write it into the internal ram.
331 *
332 * IMPORTANT REMARKS:
333 * 1. the cmng_init struct does not represent the contiguous internal ram
334 *    structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
335 *    offset in order to write the port sub struct and the
336 *    PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
337 *    words - don't use memcpy!).
338 * 2. although the cmng_init struct is filled for the maximal vnic number
339 *    possible, the driver should only write the valid vnics into the internal
340 *    ram according to the appropriate port mode.
341 */
342#define BITS_TO_BYTES(x) ((x)/8)
343
344/* CMNG constants, as derived from system spec calculations */
345
346/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
347#define DEF_MIN_RATE 100
348
349/* resolution of the rate shaping timer - 400 usec */
350#define RS_PERIODIC_TIMEOUT_USEC 400
351
352/*
353 *  number of bytes in single QM arbitration cycle -
354 *  coefficient for calculating the fairness timer
355 */
356#define QM_ARB_BYTES 160000
357
358/* resolution of Min algorithm 1:100 */
359#define MIN_RES 100
360
361/*
362 *  how many bytes above threshold for
363 *  the minimal credit of Min algorithm
364 */
365#define MIN_ABOVE_THRESH 32768
366
367/*
368 *  Fairness algorithm integration time coefficient -
369 *  for calculating the actual Tfair
370 */
371#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
372
373/* Memory of fairness algorithm - 2 cycles */
374#define FAIR_MEM 2
375#define SAFC_TIMEOUT_USEC 52
376
377#define SDM_TICKS 4
378
379
380static inline void ecore_init_max(const struct cmng_init_input *input_data,
381				  uint32_t r_param, struct cmng_init *ram_data)
382{
383	uint32_t vnic;
384	struct cmng_vnic *vdata = &ram_data->vnic;
385	struct cmng_struct_per_port *pdata = &ram_data->port;
386	/*
387	 * rate shaping per-port variables
388	 *  100 micro seconds in SDM ticks = 25
389	 *  since each tick is 4 microSeconds
390	 */
391
392	pdata->rs_vars.rs_periodic_timeout =
393	RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
394
395	/* this is the threshold below which no timer arming will occur.
396	 *  1.25 coefficient is for the threshold to be a little bigger
397	 *  then the real time to compensate for timer in-accuracy
398	 */
399	pdata->rs_vars.rs_threshold =
400	(5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
401
402	/* rate shaping per-vnic variables */
403	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
404		/* global vnic counter */
405		vdata->vnic_max_rate[vnic].vn_counter.rate =
406		input_data->vnic_max_rate[vnic];
407		/*
408		 * maximal Mbps for this vnic
409		 * the quota in each timer period - number of bytes
410		 * transmitted in this period
411		 */
412		vdata->vnic_max_rate[vnic].vn_counter.quota =
413			RS_PERIODIC_TIMEOUT_USEC *
414			(uint32_t)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
415	}
416
417}
418
419static inline void ecore_init_max_per_vn(uint16_t vnic_max_rate,
420				  struct rate_shaping_vars_per_vn *ram_data)
421{
422	/* global vnic counter */
423	ram_data->vn_counter.rate = vnic_max_rate;
424
425	/*
426	* maximal Mbps for this vnic
427	* the quota in each timer period - number of bytes
428	* transmitted in this period
429	*/
430	ram_data->vn_counter.quota =
431		RS_PERIODIC_TIMEOUT_USEC * (uint32_t)vnic_max_rate / 8;
432}
433
434static inline void ecore_init_min(const struct cmng_init_input *input_data,
435				  uint32_t r_param, struct cmng_init *ram_data)
436{
437	uint32_t vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
438	struct cmng_vnic *vdata = &ram_data->vnic;
439	struct cmng_struct_per_port *pdata = &ram_data->port;
440
441	/* this is the resolution of the fairness timer */
442	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
443
444	/*
445	 * fairness per-port variables
446	 * for 10G it is 1000usec. for 1G it is 10000usec.
447	 */
448	tFair = T_FAIR_COEF / input_data->port_rate;
449
450	/* this is the threshold below which we won't arm the timer anymore */
451	pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
452
453	/*
454	 *  we multiply by 1e3/8 to get bytes/msec. We don't want the credits
455	 *  to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
456	 */
457	pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
458
459	/* since each tick is 4 microSeconds */
460	pdata->fair_vars.fairness_timeout =
461				fair_periodic_timeout_usec / SDM_TICKS;
462
463	/* calculate sum of weights */
464	vnicWeightSum = 0;
465
466	for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++)
467		vnicWeightSum += input_data->vnic_min_rate[vnic];
468
469	/* global vnic counter */
470	if (vnicWeightSum > 0) {
471		/* fairness per-vnic variables */
472		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
473			/*
474			 *  this is the credit for each period of the fairness
475			 *  algorithm - number of bytes in T_FAIR (this vnic
476			 *  share of the port rate)
477			 */
478			vdata->vnic_min_rate[vnic].vn_credit_delta =
479				((uint32_t)(input_data->vnic_min_rate[vnic]) * 100 *
480				(T_FAIR_COEF / (8 * 100 * vnicWeightSum)));
481			if (vdata->vnic_min_rate[vnic].vn_credit_delta <
482			    pdata->fair_vars.fair_threshold +
483			    MIN_ABOVE_THRESH) {
484				vdata->vnic_min_rate[vnic].vn_credit_delta =
485					pdata->fair_vars.fair_threshold +
486					MIN_ABOVE_THRESH;
487			}
488		}
489	}
490}
491
492static inline void ecore_init_fw_wrr(const struct cmng_init_input *input_data,
493				     uint32_t r_param, struct cmng_init *ram_data)
494{
495	uint32_t vnic, cos;
496	uint32_t cosWeightSum = 0;
497	struct cmng_vnic *vdata = &ram_data->vnic;
498	struct cmng_struct_per_port *pdata = &ram_data->port;
499
500	for (cos = 0; cos < MAX_COS_NUMBER; cos++)
501		cosWeightSum += input_data->cos_min_rate[cos];
502
503	if (cosWeightSum > 0) {
504
505		for (vnic = 0; vnic < ECORE_PORT2_MODE_NUM_VNICS; vnic++) {
506			/*
507			 *  Since cos and vnic shouldn't work together the rate
508			 *  to divide between the coses is the port rate.
509			 */
510			uint32_t *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
511			for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
512				/*
513				 * this is the credit for each period of
514				 * the fairness algorithm - number of bytes
515				 * in T_FAIR (this cos share of the vnic rate)
516				 */
517				ccd[cos] =
518				    ((uint32_t)input_data->cos_min_rate[cos] * 100 *
519				    (T_FAIR_COEF / (8 * 100 * cosWeightSum)));
520				 if (ccd[cos] < pdata->fair_vars.fair_threshold
521						+ MIN_ABOVE_THRESH) {
522					ccd[cos] =
523					    pdata->fair_vars.fair_threshold +
524					    MIN_ABOVE_THRESH;
525				}
526			}
527		}
528	}
529}
530
531static inline void ecore_init_safc(const struct cmng_init_input *input_data,
532				   struct cmng_init *ram_data)
533{
534	/* in microSeconds */
535	ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
536}
537
538/* Congestion management port init */
539static inline void ecore_init_cmng(const struct cmng_init_input *input_data,
540				   struct cmng_init *ram_data)
541{
542	uint32_t r_param;
543	ECORE_MEMSET(ram_data, 0,sizeof(struct cmng_init));
544
545	ram_data->port.flags = input_data->flags;
546
547	/*
548	 *  number of bytes transmitted in a rate of 10Gbps
549	 *  in one usec = 1.25KB.
550	 */
551	r_param = BITS_TO_BYTES(input_data->port_rate);
552	ecore_init_max(input_data, r_param, ram_data);
553	ecore_init_min(input_data, r_param, ram_data);
554	ecore_init_fw_wrr(input_data, r_param, ram_data);
555	ecore_init_safc(input_data, ram_data);
556}
557
558
559
560
561/* Returns the index of start or end of a specific block stage in ops array*/
562#define BLOCK_OPS_IDX(block, stage, end) \
563			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
564
565
566#define INITOP_SET		0	/* set the HW directly */
567#define INITOP_CLEAR		1	/* clear the HW directly */
568#define INITOP_INIT		2	/* set the init-value array */
569
570/****************************************************************************
571* ILT management
572****************************************************************************/
573struct ilt_line {
574	ecore_dma_addr_t page_mapping;
575	void *page;
576	uint32_t size;
577};
578
579struct ilt_client_info {
580	uint32_t page_size;
581	uint16_t start;
582	uint16_t end;
583	uint16_t client_num;
584	uint16_t flags;
585#define ILT_CLIENT_SKIP_INIT	0x1
586#define ILT_CLIENT_SKIP_MEM	0x2
587};
588
589struct ecore_ilt {
590	uint32_t start_line;
591	struct ilt_line		*lines;
592	struct ilt_client_info	clients[4];
593#define ILT_CLIENT_CDU	0
594#define ILT_CLIENT_QM	1
595#define ILT_CLIENT_SRC	2
596#define ILT_CLIENT_TM	3
597};
598
599/****************************************************************************
600* SRC configuration
601****************************************************************************/
602struct src_ent {
603	uint8_t opaque[56];
604	uint64_t next;
605};
606
607/****************************************************************************
608* Parity configuration
609****************************************************************************/
610#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
611{ \
612	block##_REG_##block##_PRTY_MASK, \
613	block##_REG_##block##_PRTY_STS_CLR, \
614	en_mask, {m1, m1h, m2, m3}, #block \
615}
616
617#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
618{ \
619	block##_REG_##block##_PRTY_MASK_0, \
620	block##_REG_##block##_PRTY_STS_CLR_0, \
621	en_mask, {m1, m1h, m2, m3}, #block"_0" \
622}
623
624#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
625{ \
626	block##_REG_##block##_PRTY_MASK_1, \
627	block##_REG_##block##_PRTY_STS_CLR_1, \
628	en_mask, {m1, m1h, m2, m3}, #block"_1" \
629}
630
631static const struct {
632	uint32_t mask_addr;
633	uint32_t sts_clr_addr;
634	uint32_t en_mask;		/* Mask to enable parity attentions */
635	struct {
636		uint32_t e1;		/* 57710 */
637		uint32_t e1h;	/* 57711 */
638		uint32_t e2;		/* 57712 */
639		uint32_t e3;		/* 578xx */
640	} reg_mask;		/* Register mask (all valid bits) */
641	char name[8];		/* Block's longest name is 7 characters long
642				 * (name + suffix)
643				 */
644} ecore_blocks_parity_data[] = {
645	/* bit 19 masked */
646	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
647	/* bit 5,18,20-31 */
648	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
649	/* bit 5 */
650	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
651	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
652	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
653
654	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
655	 * want to handle "system kill" flow at the moment.
656	 */
657	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
658			0x7ffffff),
659	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
660			  0xffffffff),
661	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
662	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
663	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
664	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
665	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
666	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
667	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
668	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
669	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
670	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
671	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
672	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
673		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
674		{0xf, 0xf, 0xf, 0xf}, "UPB"},
675	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
676		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
677		{0xf, 0xf, 0xf, 0xf}, "XPB"},
678	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
679	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
680	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
681	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
682	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
683	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
684	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
685	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
686	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
687	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
688	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
689	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
690	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
691	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
692	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
693	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
694	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
695	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
696			  0xffffffff),
697	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
698	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
699			  0xffffffff),
700	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
701	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
702			  0xffffffff),
703	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
704	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
705			  0xffffffff),
706	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
707};
708
709
710/* [28] MCP Latched rom_parity
711 * [29] MCP Latched ump_rx_parity
712 * [30] MCP Latched ump_tx_parity
713 * [31] MCP Latched scpad_parity
714 */
715#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\
716	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
717	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
718	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
719
720#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
721	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
722	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
723
724/* Below registers control the MCP parity attention output. When
725 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
726 * enabled, when cleared - disabled.
727 */
728static const struct {
729	uint32_t addr;
730	uint32_t bits;
731} mcp_attn_ctl_regs[] = {
732	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
733		MISC_AEU_ENABLE_MCP_PRTY_BITS },
734	{ MISC_REG_AEU_ENABLE4_NIG_0,
735		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
736	{ MISC_REG_AEU_ENABLE4_PXP_0,
737		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
738	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
739		MISC_AEU_ENABLE_MCP_PRTY_BITS },
740	{ MISC_REG_AEU_ENABLE4_NIG_1,
741		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
742	{ MISC_REG_AEU_ENABLE4_PXP_1,
743		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
744};
745
746static inline void ecore_set_mcp_parity(struct bxe_softc *sc, uint8_t enable)
747{
748	int i;
749	uint32_t reg_val;
750
751	for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) {
752		reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr);
753
754		if (enable)
755			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
756		else
757			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */
758
759		REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val);
760	}
761}
762
763static inline uint32_t ecore_parity_reg_mask(struct bxe_softc *sc, int idx)
764{
765	if (CHIP_IS_E1(sc))
766		return ecore_blocks_parity_data[idx].reg_mask.e1;
767	else if (CHIP_IS_E1H(sc))
768		return ecore_blocks_parity_data[idx].reg_mask.e1h;
769	else if (CHIP_IS_E2(sc))
770		return ecore_blocks_parity_data[idx].reg_mask.e2;
771	else /* CHIP_IS_E3 */
772		return ecore_blocks_parity_data[idx].reg_mask.e3;
773}
774
775static inline void ecore_disable_blocks_parity(struct bxe_softc *sc)
776{
777	int i;
778
779	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
780		uint32_t dis_mask = ecore_parity_reg_mask(sc, i);
781
782		if (dis_mask) {
783			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
784			       dis_mask);
785			ECORE_MSG(sc, "Setting parity mask "
786						 "for %s to\t\t0x%x\n",
787				    ecore_blocks_parity_data[i].name, dis_mask);
788		}
789	}
790
791	/* Disable MCP parity attentions */
792	ecore_set_mcp_parity(sc, FALSE);
793}
794
795/**
796 * Clear the parity error status registers.
797 */
798static inline void ecore_clear_blocks_parity(struct bxe_softc *sc)
799{
800	int i;
801	uint32_t reg_val, mcp_aeu_bits =
802		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
803		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
804		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
805		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
806
807	/* Clear SEM_FAST parities */
808	REG_WR(sc, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
809	REG_WR(sc, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
810	REG_WR(sc, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
811	REG_WR(sc, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
812
813	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
814		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
815
816		if (reg_mask) {
817			reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
818					 sts_clr_addr);
819			if (reg_val & reg_mask)
820				ECORE_MSG(sc,
821					   "Parity errors in %s: 0x%x\n",
822					   ecore_blocks_parity_data[i].name,
823					   reg_val & reg_mask);
824		}
825	}
826
827	/* Check if there were parity attentions in MCP */
828	reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
829	if (reg_val & mcp_aeu_bits)
830		ECORE_MSG(sc, "Parity error in MCP: 0x%x\n",
831			   reg_val & mcp_aeu_bits);
832
833	/* Clear parity attentions in MCP:
834	 * [7]  clears Latched rom_parity
835	 * [8]  clears Latched ump_rx_parity
836	 * [9]  clears Latched ump_tx_parity
837	 * [10] clears Latched scpad_parity (both ports)
838	 */
839	REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
840}
841
842static inline void ecore_enable_blocks_parity(struct bxe_softc *sc)
843{
844	int i;
845
846	for (i = 0; i < ARRSIZE(ecore_blocks_parity_data); i++) {
847		uint32_t reg_mask = ecore_parity_reg_mask(sc, i);
848
849		if (reg_mask)
850			REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
851				ecore_blocks_parity_data[i].en_mask & reg_mask);
852	}
853
854	/* Enable MCP parity attentions */
855	ecore_set_mcp_parity(sc, TRUE);
856}
857
858
859#endif /* ECORE_INIT_H */
860
861