1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Header Parser helpers for Marvell PPv2 Network Controller
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/netdevice.h>
12#include <linux/etherdevice.h>
13#include <linux/platform_device.h>
14#include <uapi/linux/ppp_defs.h>
15#include <net/ip.h>
16#include <net/ipv6.h>
17
18#include "mvpp2.h"
19#include "mvpp2_prs.h"
20
21/* Update parser tcam and sram hw entries */
22static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
23{
24	int i;
25
26	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
27		return -EINVAL;
28
29	/* Clear entry invalidation bit */
30	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
31
32	/* Write sram index - indirect access */
33	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
34	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
35		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
36
37	/* Write tcam index - indirect access */
38	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
39	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
40		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
41
42	return 0;
43}
44
45/* Initialize tcam entry from hw */
46int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
47			   int tid)
48{
49	int i;
50
51	if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
52		return -EINVAL;
53
54	memset(pe, 0, sizeof(*pe));
55	pe->index = tid;
56
57	/* Write tcam index - indirect access */
58	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
59
60	pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
61			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
62	if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
63		return MVPP2_PRS_TCAM_ENTRY_INVALID;
64
65	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
66		pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
67
68	/* Write sram index - indirect access */
69	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
70	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
71		pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
72
73	return 0;
74}
75
76/* Invalidate tcam hw entry */
77static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
78{
79	/* Write index - indirect access */
80	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
81	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
82		    MVPP2_PRS_TCAM_INV_MASK);
83}
84
85/* Enable shadow table entry and set its lookup ID */
86static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
87{
88	priv->prs_shadow[index].valid = true;
89	priv->prs_shadow[index].lu = lu;
90}
91
92/* Update ri fields in shadow table entry */
93static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
94				    unsigned int ri, unsigned int ri_mask)
95{
96	priv->prs_shadow[index].ri_mask = ri_mask;
97	priv->prs_shadow[index].ri = ri;
98}
99
100/* Update lookup field in tcam sw entry */
101static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
102{
103	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
104	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
105	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
106	pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
107}
108
109/* Update mask for single port in tcam sw entry */
110static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
111				    unsigned int port, bool add)
112{
113	if (add)
114		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
115	else
116		pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
117}
118
119/* Update port map in tcam sw entry */
120static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
121					unsigned int ports)
122{
123	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
124	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
125	pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
126}
127
128/* Obtain port map from tcam sw entry */
129unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
130{
131	return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
132}
133
134/* Set byte of data and its enable bits in tcam sw entry */
135static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
136					 unsigned int offs, unsigned char byte,
137					 unsigned char enable)
138{
139	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
140
141	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
142	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
143	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
144	pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
145}
146
147/* Get byte of data and its enable bits from tcam sw entry */
148void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
149				  unsigned int offs, unsigned char *byte,
150				  unsigned char *enable)
151{
152	int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
153
154	*byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
155	*enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
156}
157
158/* Compare tcam data bytes with a pattern */
159static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
160				    u16 data)
161{
162	u16 tcam_data;
163
164	tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
165	return tcam_data == data;
166}
167
168/* Update ai bits in tcam sw entry */
169static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
170				     unsigned int bits, unsigned int enable)
171{
172	int i;
173
174	for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
175		if (!(enable & BIT(i)))
176			continue;
177
178		if (bits & BIT(i))
179			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
180		else
181			pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
182	}
183
184	pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
185}
186
187/* Get ai bits from tcam sw entry */
188static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
189{
190	return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
191}
192
193/* Set ethertype in tcam sw entry */
194static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
195				  unsigned short ethertype)
196{
197	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
198	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
199}
200
201/* Set vid in tcam sw entry */
202static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
203				unsigned short vid)
204{
205	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
206	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
207}
208
209/* Set bits in sram sw entry */
210static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
211				    u32 val)
212{
213	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
214}
215
216/* Clear bits in sram sw entry */
217static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
218				      u32 val)
219{
220	pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
221}
222
223/* Update ri bits in sram sw entry */
224static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
225				     unsigned int bits, unsigned int mask)
226{
227	unsigned int i;
228
229	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
230		if (!(mask & BIT(i)))
231			continue;
232
233		if (bits & BIT(i))
234			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
235						1);
236		else
237			mvpp2_prs_sram_bits_clear(pe,
238						  MVPP2_PRS_SRAM_RI_OFFS + i,
239						  1);
240
241		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
242	}
243}
244
245/* Obtain ri bits from sram sw entry */
246static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
247{
248	return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
249}
250
251/* Update ai bits in sram sw entry */
252static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
253				     unsigned int bits, unsigned int mask)
254{
255	unsigned int i;
256
257	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
258		if (!(mask & BIT(i)))
259			continue;
260
261		if (bits & BIT(i))
262			mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
263						1);
264		else
265			mvpp2_prs_sram_bits_clear(pe,
266						  MVPP2_PRS_SRAM_AI_OFFS + i,
267						  1);
268
269		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
270	}
271}
272
273/* Read ai bits from sram sw entry */
274static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
275{
276	u8 bits;
277	/* ai is stored on bits 90->97; so it spreads across two u32 */
278	int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
279	int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
280
281	bits = (pe->sram[ai_off] >> ai_shift) |
282	       (pe->sram[ai_off + 1] << (32 - ai_shift));
283
284	return bits;
285}
286
287/* In sram sw entry set lookup ID field of the tcam key to be used in the next
288 * lookup interation
289 */
290static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
291				       unsigned int lu)
292{
293	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
294
295	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
296				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
297	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
298}
299
300/* In the sram sw entry set sign and value of the next lookup offset
301 * and the offset value generated to the classifier
302 */
303static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
304				     unsigned int op)
305{
306	/* Set sign */
307	if (shift < 0) {
308		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
309		shift = 0 - shift;
310	} else {
311		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
312	}
313
314	/* Set value */
315	pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
316		shift & MVPP2_PRS_SRAM_SHIFT_MASK;
317
318	/* Reset and set operation */
319	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
320				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
321	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
322
323	/* Set base offset as current */
324	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
325}
326
327/* In the sram sw entry set sign and value of the user defined offset
328 * generated to the classifier
329 */
330static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
331				      unsigned int type, int offset,
332				      unsigned int op)
333{
334	/* Set sign */
335	if (offset < 0) {
336		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
337		offset = 0 - offset;
338	} else {
339		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
340	}
341
342	/* Set value */
343	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
344				  MVPP2_PRS_SRAM_UDF_MASK);
345	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346				offset & MVPP2_PRS_SRAM_UDF_MASK);
347
348	/* Set offset type */
349	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
350				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
351	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
352
353	/* Set offset operation */
354	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
355				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
356	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357				op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
358
359	/* Set base offset as current */
360	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
361}
362
363/* Find parser flow entry */
364static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
365{
366	struct mvpp2_prs_entry pe;
367	int tid;
368
369	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
371		u8 bits;
372
373		if (!priv->prs_shadow[tid].valid ||
374		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
375			continue;
376
377		mvpp2_prs_init_from_hw(priv, &pe, tid);
378		bits = mvpp2_prs_sram_ai_get(&pe);
379
380		/* Sram store classification lookup ID in AI bits [5:0] */
381		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
382			return tid;
383	}
384
385	return -ENOENT;
386}
387
388/* Return first free tcam index, seeking from start to end */
389static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
390				     unsigned char end)
391{
392	int tid;
393
394	if (start > end)
395		swap(start, end);
396
397	for (tid = start; tid <= end; tid++) {
398		if (!priv->prs_shadow[tid].valid)
399			return tid;
400	}
401
402	return -EINVAL;
403}
404
405/* Drop flow control pause frames */
406static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
407{
408	unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
409	struct mvpp2_prs_entry pe;
410	unsigned int len;
411
412	memset(&pe, 0, sizeof(pe));
413
414	/* For all ports - drop flow control frames */
415	pe.index = MVPP2_PE_FC_DROP;
416	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
417
418	/* Set match on DA */
419	len = ETH_ALEN;
420	while (len--)
421		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
422
423	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
424				 MVPP2_PRS_RI_DROP_MASK);
425
426	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
427	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
428
429	/* Mask all ports */
430	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
431
432	/* Update shadow table and hw entry */
433	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
434	mvpp2_prs_hw_write(priv, &pe);
435}
436
437/* Enable/disable dropping all mac da's */
438static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
439{
440	struct mvpp2_prs_entry pe;
441
442	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
443		/* Entry exist - update port only */
444		mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
445	} else {
446		/* Entry doesn't exist - create new */
447		memset(&pe, 0, sizeof(pe));
448		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
449		pe.index = MVPP2_PE_DROP_ALL;
450
451		/* Non-promiscuous mode for all ports - DROP unknown packets */
452		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
453					 MVPP2_PRS_RI_DROP_MASK);
454
455		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
456		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
457
458		/* Update shadow table */
459		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
460
461		/* Mask all ports */
462		mvpp2_prs_tcam_port_map_set(&pe, 0);
463	}
464
465	/* Update port mask */
466	mvpp2_prs_tcam_port_set(&pe, port, add);
467
468	mvpp2_prs_hw_write(priv, &pe);
469}
470
471/* Set port to unicast or multicast promiscuous mode */
472void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
473			       enum mvpp2_prs_l2_cast l2_cast, bool add)
474{
475	struct mvpp2_prs_entry pe;
476	unsigned char cast_match;
477	unsigned int ri;
478	int tid;
479
480	if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
481		cast_match = MVPP2_PRS_UCAST_VAL;
482		tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
483		ri = MVPP2_PRS_RI_L2_UCAST;
484	} else {
485		cast_match = MVPP2_PRS_MCAST_VAL;
486		tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
487		ri = MVPP2_PRS_RI_L2_MCAST;
488	}
489
490	/* promiscuous mode - Accept unknown unicast or multicast packets */
491	if (priv->prs_shadow[tid].valid) {
492		mvpp2_prs_init_from_hw(priv, &pe, tid);
493	} else {
494		memset(&pe, 0, sizeof(pe));
495		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
496		pe.index = tid;
497
498		/* Continue - set next lookup */
499		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
500
501		/* Set result info bits */
502		mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
503
504		/* Match UC or MC addresses */
505		mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
506					     MVPP2_PRS_CAST_MASK);
507
508		/* Shift to ethertype */
509		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
510					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
511
512		/* Mask all ports */
513		mvpp2_prs_tcam_port_map_set(&pe, 0);
514
515		/* Update shadow table */
516		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
517	}
518
519	/* Update port mask */
520	mvpp2_prs_tcam_port_set(&pe, port, add);
521
522	mvpp2_prs_hw_write(priv, &pe);
523}
524
525/* Set entry for dsa packets */
526static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
527				  bool tagged, bool extend)
528{
529	struct mvpp2_prs_entry pe;
530	int tid, shift;
531
532	if (extend) {
533		tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
534		shift = 8;
535	} else {
536		tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
537		shift = 4;
538	}
539
540	if (priv->prs_shadow[tid].valid) {
541		/* Entry exist - update port only */
542		mvpp2_prs_init_from_hw(priv, &pe, tid);
543	} else {
544		/* Entry doesn't exist - create new */
545		memset(&pe, 0, sizeof(pe));
546		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
547		pe.index = tid;
548
549		/* Update shadow table */
550		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
551
552		if (tagged) {
553			/* Set tagged bit in DSA tag */
554			mvpp2_prs_tcam_data_byte_set(&pe, 0,
555					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
556					     MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
557
558			/* Set ai bits for next iteration */
559			if (extend)
560				mvpp2_prs_sram_ai_update(&pe, 1,
561							MVPP2_PRS_SRAM_AI_MASK);
562			else
563				mvpp2_prs_sram_ai_update(&pe, 0,
564							MVPP2_PRS_SRAM_AI_MASK);
565
566			/* Set result info bits to 'single vlan' */
567			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
568						 MVPP2_PRS_RI_VLAN_MASK);
569			/* If packet is tagged continue check vid filtering */
570			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
571		} else {
572			/* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
573			mvpp2_prs_sram_shift_set(&pe, shift,
574					MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
575
576			/* Set result info bits to 'no vlans' */
577			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
578						 MVPP2_PRS_RI_VLAN_MASK);
579			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
580		}
581
582		/* Mask all ports */
583		mvpp2_prs_tcam_port_map_set(&pe, 0);
584	}
585
586	/* Update port mask */
587	mvpp2_prs_tcam_port_set(&pe, port, add);
588
589	mvpp2_prs_hw_write(priv, &pe);
590}
591
592/* Set entry for dsa ethertype */
593static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
594					    bool add, bool tagged, bool extend)
595{
596	struct mvpp2_prs_entry pe;
597	int tid, shift, port_mask;
598
599	if (extend) {
600		tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
601		      MVPP2_PE_ETYPE_EDSA_UNTAGGED;
602		port_mask = 0;
603		shift = 8;
604	} else {
605		tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
606		      MVPP2_PE_ETYPE_DSA_UNTAGGED;
607		port_mask = MVPP2_PRS_PORT_MASK;
608		shift = 4;
609	}
610
611	if (priv->prs_shadow[tid].valid) {
612		/* Entry exist - update port only */
613		mvpp2_prs_init_from_hw(priv, &pe, tid);
614	} else {
615		/* Entry doesn't exist - create new */
616		memset(&pe, 0, sizeof(pe));
617		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
618		pe.index = tid;
619
620		/* Set ethertype */
621		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
622		mvpp2_prs_match_etype(&pe, 2, 0);
623
624		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
625					 MVPP2_PRS_RI_DSA_MASK);
626		/* Shift ethertype + 2 byte reserved + tag*/
627		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
628					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
629
630		/* Update shadow table */
631		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
632
633		if (tagged) {
634			/* Set tagged bit in DSA tag */
635			mvpp2_prs_tcam_data_byte_set(&pe,
636						     MVPP2_ETH_TYPE_LEN + 2 + 3,
637						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
638						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
639			/* Clear all ai bits for next iteration */
640			mvpp2_prs_sram_ai_update(&pe, 0,
641						 MVPP2_PRS_SRAM_AI_MASK);
642			/* If packet is tagged continue check vlans */
643			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
644		} else {
645			/* Set result info bits to 'no vlans' */
646			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
647						 MVPP2_PRS_RI_VLAN_MASK);
648			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
649		}
650		/* Mask/unmask all ports, depending on dsa type */
651		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
652	}
653
654	/* Update port mask */
655	mvpp2_prs_tcam_port_set(&pe, port, add);
656
657	mvpp2_prs_hw_write(priv, &pe);
658}
659
660/* Search for existing single/triple vlan entry */
661static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
662{
663	struct mvpp2_prs_entry pe;
664	int tid;
665
666	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
667	for (tid = MVPP2_PE_FIRST_FREE_TID;
668	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
669		unsigned int ri_bits, ai_bits;
670		bool match;
671
672		if (!priv->prs_shadow[tid].valid ||
673		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
674			continue;
675
676		mvpp2_prs_init_from_hw(priv, &pe, tid);
677		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
678		if (!match)
679			continue;
680
681		/* Get vlan type */
682		ri_bits = mvpp2_prs_sram_ri_get(&pe);
683		ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
684
685		/* Get current ai value from tcam */
686		ai_bits = mvpp2_prs_tcam_ai_get(&pe);
687		/* Clear double vlan bit */
688		ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
689
690		if (ai != ai_bits)
691			continue;
692
693		if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
694		    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
695			return tid;
696	}
697
698	return -ENOENT;
699}
700
701/* Add/update single/triple vlan entry */
702static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
703			      unsigned int port_map)
704{
705	struct mvpp2_prs_entry pe;
706	int tid_aux, tid;
707	int ret = 0;
708
709	memset(&pe, 0, sizeof(pe));
710
711	tid = mvpp2_prs_vlan_find(priv, tpid, ai);
712
713	if (tid < 0) {
714		/* Create new tcam entry */
715		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
716						MVPP2_PE_FIRST_FREE_TID);
717		if (tid < 0)
718			return tid;
719
720		/* Get last double vlan tid */
721		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
722		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
723			unsigned int ri_bits;
724
725			if (!priv->prs_shadow[tid_aux].valid ||
726			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
727				continue;
728
729			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
730			ri_bits = mvpp2_prs_sram_ri_get(&pe);
731			if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
732			    MVPP2_PRS_RI_VLAN_DOUBLE)
733				break;
734		}
735
736		if (tid <= tid_aux)
737			return -EINVAL;
738
739		memset(&pe, 0, sizeof(pe));
740		pe.index = tid;
741		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
742
743		mvpp2_prs_match_etype(&pe, 0, tpid);
744
745		/* VLAN tag detected, proceed with VID filtering */
746		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
747
748		/* Clear all ai bits for next iteration */
749		mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
750
751		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
752			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
753						 MVPP2_PRS_RI_VLAN_MASK);
754		} else {
755			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
756			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
757						 MVPP2_PRS_RI_VLAN_MASK);
758		}
759		mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
760
761		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
762	} else {
763		mvpp2_prs_init_from_hw(priv, &pe, tid);
764	}
765	/* Update ports' mask */
766	mvpp2_prs_tcam_port_map_set(&pe, port_map);
767
768	mvpp2_prs_hw_write(priv, &pe);
769
770	return ret;
771}
772
773/* Get first free double vlan ai number */
774static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
775{
776	int i;
777
778	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
779		if (!priv->prs_double_vlans[i])
780			return i;
781	}
782
783	return -EINVAL;
784}
785
786/* Search for existing double vlan entry */
787static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
788				      unsigned short tpid2)
789{
790	struct mvpp2_prs_entry pe;
791	int tid;
792
793	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
794	for (tid = MVPP2_PE_FIRST_FREE_TID;
795	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
796		unsigned int ri_mask;
797		bool match;
798
799		if (!priv->prs_shadow[tid].valid ||
800		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
801			continue;
802
803		mvpp2_prs_init_from_hw(priv, &pe, tid);
804
805		match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
806			mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
807
808		if (!match)
809			continue;
810
811		ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
812		if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
813			return tid;
814	}
815
816	return -ENOENT;
817}
818
819/* Add or update double vlan entry */
820static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
821				     unsigned short tpid2,
822				     unsigned int port_map)
823{
824	int tid_aux, tid, ai, ret = 0;
825	struct mvpp2_prs_entry pe;
826
827	memset(&pe, 0, sizeof(pe));
828
829	tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
830
831	if (tid < 0) {
832		/* Create new tcam entry */
833		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
834				MVPP2_PE_LAST_FREE_TID);
835		if (tid < 0)
836			return tid;
837
838		/* Set ai value for new double vlan entry */
839		ai = mvpp2_prs_double_vlan_ai_free_get(priv);
840		if (ai < 0)
841			return ai;
842
843		/* Get first single/triple vlan tid */
844		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
845		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
846			unsigned int ri_bits;
847
848			if (!priv->prs_shadow[tid_aux].valid ||
849			    priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
850				continue;
851
852			mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
853			ri_bits = mvpp2_prs_sram_ri_get(&pe);
854			ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
855			if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
856			    ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
857				break;
858		}
859
860		if (tid >= tid_aux)
861			return -ERANGE;
862
863		memset(&pe, 0, sizeof(pe));
864		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
865		pe.index = tid;
866
867		priv->prs_double_vlans[ai] = true;
868
869		mvpp2_prs_match_etype(&pe, 0, tpid1);
870		mvpp2_prs_match_etype(&pe, 4, tpid2);
871
872		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
873		/* Shift 4 bytes - skip outer vlan tag */
874		mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
875					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
876		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
877					 MVPP2_PRS_RI_VLAN_MASK);
878		mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
879					 MVPP2_PRS_SRAM_AI_MASK);
880
881		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
882	} else {
883		mvpp2_prs_init_from_hw(priv, &pe, tid);
884	}
885
886	/* Update ports' mask */
887	mvpp2_prs_tcam_port_map_set(&pe, port_map);
888	mvpp2_prs_hw_write(priv, &pe);
889
890	return ret;
891}
892
893/* IPv4 header parsing for fragmentation and L4 offset */
894static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
895			       unsigned int ri, unsigned int ri_mask)
896{
897	struct mvpp2_prs_entry pe;
898	int tid;
899
900	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
901	    (proto != IPPROTO_IGMP))
902		return -EINVAL;
903
904	/* Not fragmented packet */
905	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
906					MVPP2_PE_LAST_FREE_TID);
907	if (tid < 0)
908		return tid;
909
910	memset(&pe, 0, sizeof(pe));
911	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
912	pe.index = tid;
913
914	/* Finished: go to flowid generation */
915	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
916	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
917
918	/* Set L3 offset */
919	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
920				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
921	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
922	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
923
924	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
925				     MVPP2_PRS_TCAM_PROTO_MASK_L);
926	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
927				     MVPP2_PRS_TCAM_PROTO_MASK);
928
929	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
930	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
931				 MVPP2_PRS_IPV4_DIP_AI_BIT);
932	/* Unmask all ports */
933	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
934
935	/* Update shadow table and hw entry */
936	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
937	mvpp2_prs_hw_write(priv, &pe);
938
939	/* Fragmented packet */
940	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
941					MVPP2_PE_LAST_FREE_TID);
942	if (tid < 0)
943		return tid;
944
945	pe.index = tid;
946	/* Clear ri before updating */
947	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
948	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
949	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
950
951	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
952				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
953
954	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
955	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
956
957	/* Update shadow table and hw entry */
958	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
959	mvpp2_prs_hw_write(priv, &pe);
960
961	return 0;
962}
963
964/* IPv4 L3 multicast or broadcast */
965static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
966{
967	struct mvpp2_prs_entry pe;
968	int mask, tid;
969
970	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
971					MVPP2_PE_LAST_FREE_TID);
972	if (tid < 0)
973		return tid;
974
975	memset(&pe, 0, sizeof(pe));
976	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
977	pe.index = tid;
978
979	switch (l3_cast) {
980	case MVPP2_PRS_L3_MULTI_CAST:
981		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
982					     MVPP2_PRS_IPV4_MC_MASK);
983		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
984					 MVPP2_PRS_RI_L3_ADDR_MASK);
985		break;
986	case  MVPP2_PRS_L3_BROAD_CAST:
987		mask = MVPP2_PRS_IPV4_BC_MASK;
988		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
989		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
990		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
991		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
992		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
993					 MVPP2_PRS_RI_L3_ADDR_MASK);
994		break;
995	default:
996		return -EINVAL;
997	}
998
999	/* Go again to ipv4 */
1000	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1001
1002	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1003				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1004
1005	/* Shift back to IPv4 proto */
1006	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1007
1008	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1009
1010	/* Unmask all ports */
1011	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1012
1013	/* Update shadow table and hw entry */
1014	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1015	mvpp2_prs_hw_write(priv, &pe);
1016
1017	return 0;
1018}
1019
1020/* Set entries for protocols over IPv6  */
1021static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1022			       unsigned int ri, unsigned int ri_mask)
1023{
1024	struct mvpp2_prs_entry pe;
1025	int tid;
1026
1027	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1028	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1029		return -EINVAL;
1030
1031	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1032					MVPP2_PE_LAST_FREE_TID);
1033	if (tid < 0)
1034		return tid;
1035
1036	memset(&pe, 0, sizeof(pe));
1037	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1038	pe.index = tid;
1039
1040	/* Finished: go to flowid generation */
1041	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1042	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1043	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1044	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1045				  sizeof(struct ipv6hdr) - 6,
1046				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1047
1048	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1049	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1050				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1051	/* Unmask all ports */
1052	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1053
1054	/* Write HW */
1055	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1056	mvpp2_prs_hw_write(priv, &pe);
1057
1058	return 0;
1059}
1060
1061/* IPv6 L3 multicast entry */
1062static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1063{
1064	struct mvpp2_prs_entry pe;
1065	int tid;
1066
1067	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1068		return -EINVAL;
1069
1070	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1071					MVPP2_PE_LAST_FREE_TID);
1072	if (tid < 0)
1073		return tid;
1074
1075	memset(&pe, 0, sizeof(pe));
1076	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1077	pe.index = tid;
1078
1079	/* Finished: go to flowid generation */
1080	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1081	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1082				 MVPP2_PRS_RI_L3_ADDR_MASK);
1083	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1084				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1085	/* Shift back to IPv6 NH */
1086	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1087
1088	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1089				     MVPP2_PRS_IPV6_MC_MASK);
1090	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1091	/* Unmask all ports */
1092	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1093
1094	/* Update shadow table and hw entry */
1095	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1096	mvpp2_prs_hw_write(priv, &pe);
1097
1098	return 0;
1099}
1100
1101/* Parser per-port initialization */
1102static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1103				   int lu_max, int offset)
1104{
1105	u32 val;
1106
1107	/* Set lookup ID */
1108	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1109	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1110	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1111	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1112
1113	/* Set maximum number of loops for packet received from port */
1114	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1115	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1116	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1117	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1118
1119	/* Set initial offset for packet header extraction for the first
1120	 * searching loop
1121	 */
1122	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1123	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1124	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1125	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1126}
1127
1128/* Default flow entries initialization for all ports */
1129static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1130{
1131	struct mvpp2_prs_entry pe;
1132	int port;
1133
1134	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1135		memset(&pe, 0, sizeof(pe));
1136		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1137		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1138
1139		/* Mask all ports */
1140		mvpp2_prs_tcam_port_map_set(&pe, 0);
1141
1142		/* Set flow ID*/
1143		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1144		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1145
1146		/* Update shadow table and hw entry */
1147		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1148		mvpp2_prs_hw_write(priv, &pe);
1149	}
1150}
1151
1152/* Set default entry for Marvell Header field */
1153static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1154{
1155	struct mvpp2_prs_entry pe;
1156
1157	memset(&pe, 0, sizeof(pe));
1158
1159	pe.index = MVPP2_PE_MH_DEFAULT;
1160	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1161	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1162				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1163	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1164
1165	/* Unmask all ports */
1166	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1167
1168	/* Update shadow table and hw entry */
1169	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1170	mvpp2_prs_hw_write(priv, &pe);
1171
1172	/* Set MH entry that skip parser */
1173	pe.index = MVPP2_PE_MH_SKIP_PRS;
1174	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1175	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1176				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1177	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1178	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1179
1180	/* Mask all ports */
1181	mvpp2_prs_tcam_port_map_set(&pe, 0);
1182
1183	/* Update shadow table and hw entry */
1184	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1185	mvpp2_prs_hw_write(priv, &pe);
1186}
1187
1188/* Set default entires (place holder) for promiscuous, non-promiscuous and
1189 * multicast MAC addresses
1190 */
1191static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1192{
1193	struct mvpp2_prs_entry pe;
1194
1195	memset(&pe, 0, sizeof(pe));
1196
1197	/* Non-promiscuous mode for all ports - DROP unknown packets */
1198	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1199	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1200
1201	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1202				 MVPP2_PRS_RI_DROP_MASK);
1203	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1204	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1205
1206	/* Unmask all ports */
1207	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1208
1209	/* Update shadow table and hw entry */
1210	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1211	mvpp2_prs_hw_write(priv, &pe);
1212
1213	/* Create dummy entries for drop all and promiscuous modes */
1214	mvpp2_prs_drop_fc(priv);
1215	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1216	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1217	mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1218}
1219
1220/* Set default entries for various types of dsa packets */
1221static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1222{
1223	struct mvpp2_prs_entry pe;
1224
1225	/* None tagged EDSA entry - place holder */
1226	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1227			      MVPP2_PRS_EDSA);
1228
1229	/* Tagged EDSA entry - place holder */
1230	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1231
1232	/* None tagged DSA entry - place holder */
1233	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1234			      MVPP2_PRS_DSA);
1235
1236	/* Tagged DSA entry - place holder */
1237	mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1238
1239	/* None tagged EDSA ethertype entry - place holder*/
1240	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1241					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1242
1243	/* Tagged EDSA ethertype entry - place holder*/
1244	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1245					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1246
1247	/* None tagged DSA ethertype entry */
1248	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1249					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1250
1251	/* Tagged DSA ethertype entry */
1252	mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1253					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1254
1255	/* Set default entry, in case DSA or EDSA tag not found */
1256	memset(&pe, 0, sizeof(pe));
1257	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1258	pe.index = MVPP2_PE_DSA_DEFAULT;
1259	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1260
1261	/* Shift 0 bytes */
1262	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1263	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1264
1265	/* Clear all sram ai bits for next iteration */
1266	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1267
1268	/* Unmask all ports */
1269	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1270
1271	mvpp2_prs_hw_write(priv, &pe);
1272}
1273
1274/* Initialize parser entries for VID filtering */
1275static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1276{
1277	struct mvpp2_prs_entry pe;
1278
1279	memset(&pe, 0, sizeof(pe));
1280
1281	/* Set default vid entry */
1282	pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1283	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1284
1285	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1286
1287	/* Skip VLAN header - Set offset to 4 bytes */
1288	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1289				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1290
1291	/* Clear all ai bits for next iteration */
1292	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1293
1294	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1295
1296	/* Unmask all ports */
1297	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1298
1299	/* Update shadow table and hw entry */
1300	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1301	mvpp2_prs_hw_write(priv, &pe);
1302
1303	/* Set default vid entry for extended DSA*/
1304	memset(&pe, 0, sizeof(pe));
1305
1306	/* Set default vid entry */
1307	pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1308	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1309
1310	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1311				 MVPP2_PRS_EDSA_VID_AI_BIT);
1312
1313	/* Skip VLAN header - Set offset to 8 bytes */
1314	mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1315				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1316
1317	/* Clear all ai bits for next iteration */
1318	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1319
1320	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1321
1322	/* Unmask all ports */
1323	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1324
1325	/* Update shadow table and hw entry */
1326	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1327	mvpp2_prs_hw_write(priv, &pe);
1328}
1329
1330/* Match basic ethertypes */
1331static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1332{
1333	struct mvpp2_prs_entry pe;
1334	int tid, ihl;
1335
1336	/* Ethertype: PPPoE */
1337	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1338					MVPP2_PE_LAST_FREE_TID);
1339	if (tid < 0)
1340		return tid;
1341
1342	memset(&pe, 0, sizeof(pe));
1343	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1344	pe.index = tid;
1345
1346	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1347
1348	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1349				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1350	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1351	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1352				 MVPP2_PRS_RI_PPPOE_MASK);
1353
1354	/* Update shadow table and hw entry */
1355	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1356	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1357	priv->prs_shadow[pe.index].finish = false;
1358	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1359				MVPP2_PRS_RI_PPPOE_MASK);
1360	mvpp2_prs_hw_write(priv, &pe);
1361
1362	/* Ethertype: ARP */
1363	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1364					MVPP2_PE_LAST_FREE_TID);
1365	if (tid < 0)
1366		return tid;
1367
1368	memset(&pe, 0, sizeof(pe));
1369	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1370	pe.index = tid;
1371
1372	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1373
1374	/* Generate flow in the next iteration*/
1375	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1376	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1377	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1378				 MVPP2_PRS_RI_L3_PROTO_MASK);
1379	/* Set L3 offset */
1380	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1381				  MVPP2_ETH_TYPE_LEN,
1382				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1383
1384	/* Update shadow table and hw entry */
1385	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1386	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1387	priv->prs_shadow[pe.index].finish = true;
1388	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1389				MVPP2_PRS_RI_L3_PROTO_MASK);
1390	mvpp2_prs_hw_write(priv, &pe);
1391
1392	/* Ethertype: LBTD */
1393	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1394					MVPP2_PE_LAST_FREE_TID);
1395	if (tid < 0)
1396		return tid;
1397
1398	memset(&pe, 0, sizeof(pe));
1399	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1400	pe.index = tid;
1401
1402	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1403
1404	/* Generate flow in the next iteration*/
1405	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1406	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1407	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1408				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1409				 MVPP2_PRS_RI_CPU_CODE_MASK |
1410				 MVPP2_PRS_RI_UDF3_MASK);
1411	/* Set L3 offset */
1412	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1413				  MVPP2_ETH_TYPE_LEN,
1414				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1415
1416	/* Update shadow table and hw entry */
1417	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1418	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1419	priv->prs_shadow[pe.index].finish = true;
1420	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1421				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1422				MVPP2_PRS_RI_CPU_CODE_MASK |
1423				MVPP2_PRS_RI_UDF3_MASK);
1424	mvpp2_prs_hw_write(priv, &pe);
1425
1426	/* Ethertype: IPv4 with header length >= 5 */
1427	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1428		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1429						MVPP2_PE_LAST_FREE_TID);
1430		if (tid < 0)
1431			return tid;
1432
1433		memset(&pe, 0, sizeof(pe));
1434		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1435		pe.index = tid;
1436
1437		mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1438		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1439					     MVPP2_PRS_IPV4_HEAD | ihl,
1440					     MVPP2_PRS_IPV4_HEAD_MASK |
1441					     MVPP2_PRS_IPV4_IHL_MASK);
1442
1443		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1444		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1445					 MVPP2_PRS_RI_L3_PROTO_MASK);
1446		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1447		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1448					 sizeof(struct iphdr) - 4,
1449					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1450		/* Set L4 offset */
1451		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1452					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
1453					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1454
1455		/* Update shadow table and hw entry */
1456		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1457		priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1458		priv->prs_shadow[pe.index].finish = false;
1459		mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1460					MVPP2_PRS_RI_L3_PROTO_MASK);
1461		mvpp2_prs_hw_write(priv, &pe);
1462	}
1463
1464	/* Ethertype: IPv6 without options */
1465	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1466					MVPP2_PE_LAST_FREE_TID);
1467	if (tid < 0)
1468		return tid;
1469
1470	memset(&pe, 0, sizeof(pe));
1471	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1472	pe.index = tid;
1473
1474	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1475
1476	/* Skip DIP of IPV6 header */
1477	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1478				 MVPP2_MAX_L3_ADDR_SIZE,
1479				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1480	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1481	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1482				 MVPP2_PRS_RI_L3_PROTO_MASK);
1483	/* Set L3 offset */
1484	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1485				  MVPP2_ETH_TYPE_LEN,
1486				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1487
1488	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1489	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1490	priv->prs_shadow[pe.index].finish = false;
1491	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1492				MVPP2_PRS_RI_L3_PROTO_MASK);
1493	mvpp2_prs_hw_write(priv, &pe);
1494
1495	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1496	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1497	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1498	pe.index = MVPP2_PE_ETH_TYPE_UN;
1499
1500	/* Unmask all ports */
1501	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1502
1503	/* Generate flow in the next iteration*/
1504	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1505	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1506	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1507				 MVPP2_PRS_RI_L3_PROTO_MASK);
1508	/* Set L3 offset even it's unknown L3 */
1509	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1510				  MVPP2_ETH_TYPE_LEN,
1511				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1512
1513	/* Update shadow table and hw entry */
1514	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1515	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1516	priv->prs_shadow[pe.index].finish = true;
1517	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1518				MVPP2_PRS_RI_L3_PROTO_MASK);
1519	mvpp2_prs_hw_write(priv, &pe);
1520
1521	return 0;
1522}
1523
1524/* Configure vlan entries and detect up to 2 successive VLAN tags.
1525 * Possible options:
1526 * 0x8100, 0x88A8
1527 * 0x8100, 0x8100
1528 * 0x8100
1529 * 0x88A8
1530 */
1531static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1532{
1533	struct mvpp2_prs_entry pe;
1534	int err;
1535
1536	priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1537					      MVPP2_PRS_DBL_VLANS_MAX,
1538					      GFP_KERNEL);
1539	if (!priv->prs_double_vlans)
1540		return -ENOMEM;
1541
1542	/* Double VLAN: 0x88A8, 0x8100 */
1543	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
1544					MVPP2_PRS_PORT_MASK);
1545	if (err)
1546		return err;
1547
1548	/* Double VLAN: 0x8100, 0x8100 */
1549	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1550					MVPP2_PRS_PORT_MASK);
1551	if (err)
1552		return err;
1553
1554	/* Single VLAN: 0x88a8 */
1555	err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1556				 MVPP2_PRS_PORT_MASK);
1557	if (err)
1558		return err;
1559
1560	/* Single VLAN: 0x8100 */
1561	err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1562				 MVPP2_PRS_PORT_MASK);
1563	if (err)
1564		return err;
1565
1566	/* Set default double vlan entry */
1567	memset(&pe, 0, sizeof(pe));
1568	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1569	pe.index = MVPP2_PE_VLAN_DBL;
1570
1571	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1572
1573	/* Clear ai for next iterations */
1574	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1575	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1576				 MVPP2_PRS_RI_VLAN_MASK);
1577
1578	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1579				 MVPP2_PRS_DBL_VLAN_AI_BIT);
1580	/* Unmask all ports */
1581	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1582
1583	/* Update shadow table and hw entry */
1584	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1585	mvpp2_prs_hw_write(priv, &pe);
1586
1587	/* Set default vlan none entry */
1588	memset(&pe, 0, sizeof(pe));
1589	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1590	pe.index = MVPP2_PE_VLAN_NONE;
1591
1592	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1593	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1594				 MVPP2_PRS_RI_VLAN_MASK);
1595
1596	/* Unmask all ports */
1597	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1598
1599	/* Update shadow table and hw entry */
1600	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1601	mvpp2_prs_hw_write(priv, &pe);
1602
1603	return 0;
1604}
1605
1606/* Set entries for PPPoE ethertype */
1607static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1608{
1609	struct mvpp2_prs_entry pe;
1610	int tid, ihl;
1611
1612	/* IPv4 over PPPoE with header length >= 5 */
1613	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
1614		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1615						MVPP2_PE_LAST_FREE_TID);
1616		if (tid < 0)
1617			return tid;
1618
1619		memset(&pe, 0, sizeof(pe));
1620		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1621		pe.index = tid;
1622
1623		mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1624		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1625					     MVPP2_PRS_IPV4_HEAD | ihl,
1626					     MVPP2_PRS_IPV4_HEAD_MASK |
1627					     MVPP2_PRS_IPV4_IHL_MASK);
1628
1629		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1630		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1631					 MVPP2_PRS_RI_L3_PROTO_MASK);
1632		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
1633		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
1634					 sizeof(struct iphdr) - 4,
1635					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1636		/* Set L3 offset */
1637		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1638					  MVPP2_ETH_TYPE_LEN,
1639					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1640		/* Set L4 offset */
1641		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1642					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
1643					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1644
1645		/* Update shadow table and hw entry */
1646		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1647		mvpp2_prs_hw_write(priv, &pe);
1648	}
1649
1650	/* IPv6 over PPPoE */
1651	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1652					MVPP2_PE_LAST_FREE_TID);
1653	if (tid < 0)
1654		return tid;
1655
1656	memset(&pe, 0, sizeof(pe));
1657	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1658	pe.index = tid;
1659
1660	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1661
1662	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1663	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1664				 MVPP2_PRS_RI_L3_PROTO_MASK);
1665	/* Jump to DIP of IPV6 header */
1666	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1667				 MVPP2_MAX_L3_ADDR_SIZE,
1668				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1669	/* Set L3 offset */
1670	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1671				  MVPP2_ETH_TYPE_LEN,
1672				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1673
1674	/* Update shadow table and hw entry */
1675	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1676	mvpp2_prs_hw_write(priv, &pe);
1677
1678	/* Non-IP over PPPoE */
1679	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1680					MVPP2_PE_LAST_FREE_TID);
1681	if (tid < 0)
1682		return tid;
1683
1684	memset(&pe, 0, sizeof(pe));
1685	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1686	pe.index = tid;
1687
1688	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1689				 MVPP2_PRS_RI_L3_PROTO_MASK);
1690
1691	/* Finished: go to flowid generation */
1692	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1693	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1694	/* Set L3 offset even if it's unknown L3 */
1695	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1696				  MVPP2_ETH_TYPE_LEN,
1697				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1698
1699	/* Update shadow table and hw entry */
1700	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1701	mvpp2_prs_hw_write(priv, &pe);
1702
1703	return 0;
1704}
1705
1706/* Initialize entries for IPv4 */
1707static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1708{
1709	struct mvpp2_prs_entry pe;
1710	int err;
1711
1712	/* Set entries for TCP, UDP and IGMP over IPv4 */
1713	err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1714				  MVPP2_PRS_RI_L4_PROTO_MASK);
1715	if (err)
1716		return err;
1717
1718	err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1719				  MVPP2_PRS_RI_L4_PROTO_MASK);
1720	if (err)
1721		return err;
1722
1723	err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1724				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1725				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1726				  MVPP2_PRS_RI_CPU_CODE_MASK |
1727				  MVPP2_PRS_RI_UDF3_MASK);
1728	if (err)
1729		return err;
1730
1731	/* IPv4 Broadcast */
1732	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1733	if (err)
1734		return err;
1735
1736	/* IPv4 Multicast */
1737	err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1738	if (err)
1739		return err;
1740
1741	/* Default IPv4 entry for unknown protocols */
1742	memset(&pe, 0, sizeof(pe));
1743	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1744	pe.index = MVPP2_PE_IP4_PROTO_UN;
1745
1746	/* Finished: go to flowid generation */
1747	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1748	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1749
1750	/* Set L3 offset */
1751	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, -4,
1752				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1753	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1754	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1755				 MVPP2_PRS_RI_L4_PROTO_MASK);
1756
1757	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1758				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1759	/* Unmask all ports */
1760	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1761
1762	/* Update shadow table and hw entry */
1763	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1764	mvpp2_prs_hw_write(priv, &pe);
1765
1766	/* Default IPv4 entry for unicast address */
1767	memset(&pe, 0, sizeof(pe));
1768	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1769	pe.index = MVPP2_PE_IP4_ADDR_UN;
1770
1771	/* Go again to ipv4 */
1772	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1773
1774	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1775				 MVPP2_PRS_IPV4_DIP_AI_BIT);
1776
1777	/* Shift back to IPv4 proto */
1778	mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1779
1780	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1781				 MVPP2_PRS_RI_L3_ADDR_MASK);
1782	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1783
1784	/* Unmask all ports */
1785	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1786
1787	/* Update shadow table and hw entry */
1788	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1789	mvpp2_prs_hw_write(priv, &pe);
1790
1791	return 0;
1792}
1793
1794/* Initialize entries for IPv6 */
1795static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1796{
1797	struct mvpp2_prs_entry pe;
1798	int tid, err;
1799
1800	/* Set entries for TCP, UDP and ICMP over IPv6 */
1801	err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1802				  MVPP2_PRS_RI_L4_TCP,
1803				  MVPP2_PRS_RI_L4_PROTO_MASK);
1804	if (err)
1805		return err;
1806
1807	err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1808				  MVPP2_PRS_RI_L4_UDP,
1809				  MVPP2_PRS_RI_L4_PROTO_MASK);
1810	if (err)
1811		return err;
1812
1813	err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1814				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1815				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1816				  MVPP2_PRS_RI_CPU_CODE_MASK |
1817				  MVPP2_PRS_RI_UDF3_MASK);
1818	if (err)
1819		return err;
1820
1821	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1822	/* Result Info: UDF7=1, DS lite */
1823	err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1824				  MVPP2_PRS_RI_UDF7_IP6_LITE,
1825				  MVPP2_PRS_RI_UDF7_MASK);
1826	if (err)
1827		return err;
1828
1829	/* IPv6 multicast */
1830	err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1831	if (err)
1832		return err;
1833
1834	/* Entry for checking hop limit */
1835	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1836					MVPP2_PE_LAST_FREE_TID);
1837	if (tid < 0)
1838		return tid;
1839
1840	memset(&pe, 0, sizeof(pe));
1841	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1842	pe.index = tid;
1843
1844	/* Finished: go to flowid generation */
1845	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1846	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1847	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1848				 MVPP2_PRS_RI_DROP_MASK,
1849				 MVPP2_PRS_RI_L3_PROTO_MASK |
1850				 MVPP2_PRS_RI_DROP_MASK);
1851
1852	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1853	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1854				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1855
1856	/* Update shadow table and hw entry */
1857	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1858	mvpp2_prs_hw_write(priv, &pe);
1859
1860	/* Default IPv6 entry for unknown protocols */
1861	memset(&pe, 0, sizeof(pe));
1862	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1863	pe.index = MVPP2_PE_IP6_PROTO_UN;
1864
1865	/* Finished: go to flowid generation */
1866	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1867	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1868	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1869				 MVPP2_PRS_RI_L4_PROTO_MASK);
1870	/* Set L4 offset relatively to our current place */
1871	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1872				  sizeof(struct ipv6hdr) - 4,
1873				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1874
1875	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1876				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1877	/* Unmask all ports */
1878	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1879
1880	/* Update shadow table and hw entry */
1881	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1882	mvpp2_prs_hw_write(priv, &pe);
1883
1884	/* Default IPv6 entry for unknown ext protocols */
1885	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1886	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1887	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1888
1889	/* Finished: go to flowid generation */
1890	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1891	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1892	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1893				 MVPP2_PRS_RI_L4_PROTO_MASK);
1894
1895	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1896				 MVPP2_PRS_IPV6_EXT_AI_BIT);
1897	/* Unmask all ports */
1898	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1899
1900	/* Update shadow table and hw entry */
1901	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1902	mvpp2_prs_hw_write(priv, &pe);
1903
1904	/* Default IPv6 entry for unicast address */
1905	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1906	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1907	pe.index = MVPP2_PE_IP6_ADDR_UN;
1908
1909	/* Finished: go to IPv6 again */
1910	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1911	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1912				 MVPP2_PRS_RI_L3_ADDR_MASK);
1913	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1914				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1915	/* Shift back to IPV6 NH */
1916	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1917
1918	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1919	/* Unmask all ports */
1920	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1921
1922	/* Update shadow table and hw entry */
1923	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1924	mvpp2_prs_hw_write(priv, &pe);
1925
1926	return 0;
1927}
1928
1929/* Find tcam entry with matched pair <vid,port> */
1930static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1931{
1932	unsigned char byte[2], enable[2];
1933	struct mvpp2_prs_entry pe;
1934	u16 rvid, rmask;
1935	int tid;
1936
1937	/* Go through the all entries with MVPP2_PRS_LU_VID */
1938	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1939	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1940		if (!port->priv->prs_shadow[tid].valid ||
1941		    port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1942			continue;
1943
1944		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1945
1946		mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1947		mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1948
1949		rvid = ((byte[0] & 0xf) << 8) + byte[1];
1950		rmask = ((enable[0] & 0xf) << 8) + enable[1];
1951
1952		if (rvid != vid || rmask != mask)
1953			continue;
1954
1955		return tid;
1956	}
1957
1958	return -ENOENT;
1959}
1960
1961/* Write parser entry for VID filtering */
1962int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1963{
1964	unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1965				 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1966	unsigned int mask = 0xfff, reg_val, shift;
1967	struct mvpp2 *priv = port->priv;
1968	struct mvpp2_prs_entry pe;
1969	int tid;
1970
1971	memset(&pe, 0, sizeof(pe));
1972
1973	/* Scan TCAM and see if entry with this <vid,port> already exist */
1974	tid = mvpp2_prs_vid_range_find(port, vid, mask);
1975
1976	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1977	if (reg_val & MVPP2_DSA_EXTENDED)
1978		shift = MVPP2_VLAN_TAG_EDSA_LEN;
1979	else
1980		shift = MVPP2_VLAN_TAG_LEN;
1981
1982	/* No such entry */
1983	if (tid < 0) {
1984
1985		/* Go through all entries from first to last in vlan range */
1986		tid = mvpp2_prs_tcam_first_free(priv, vid_start,
1987						vid_start +
1988						MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
1989
1990		/* There isn't room for a new VID filter */
1991		if (tid < 0)
1992			return tid;
1993
1994		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1995		pe.index = tid;
1996
1997		/* Mask all ports */
1998		mvpp2_prs_tcam_port_map_set(&pe, 0);
1999	} else {
2000		mvpp2_prs_init_from_hw(priv, &pe, tid);
2001	}
2002
2003	/* Enable the current port */
2004	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2005
2006	/* Continue - set next lookup */
2007	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2008
2009	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2010	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2011
2012	/* Set match on VID */
2013	mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2014
2015	/* Clear all ai bits for next iteration */
2016	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2017
2018	/* Update shadow table */
2019	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2020	mvpp2_prs_hw_write(priv, &pe);
2021
2022	return 0;
2023}
2024
2025/* Write parser entry for VID filtering */
2026void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2027{
2028	struct mvpp2 *priv = port->priv;
2029	int tid;
2030
2031	/* Scan TCAM and see if entry with this <vid,port> already exist */
2032	tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2033
2034	/* No such entry */
2035	if (tid < 0)
2036		return;
2037
2038	mvpp2_prs_hw_inv(priv, tid);
2039	priv->prs_shadow[tid].valid = false;
2040}
2041
2042/* Remove all existing VID filters on this port */
2043void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2044{
2045	struct mvpp2 *priv = port->priv;
2046	int tid;
2047
2048	for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2049	     tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2050		if (priv->prs_shadow[tid].valid) {
2051			mvpp2_prs_hw_inv(priv, tid);
2052			priv->prs_shadow[tid].valid = false;
2053		}
2054	}
2055}
2056
2057/* Remove VID filering entry for this port */
2058void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2059{
2060	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2061	struct mvpp2 *priv = port->priv;
2062
2063	/* Invalidate the guard entry */
2064	mvpp2_prs_hw_inv(priv, tid);
2065
2066	priv->prs_shadow[tid].valid = false;
2067}
2068
2069/* Add guard entry that drops packets when no VID is matched on this port */
2070void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2071{
2072	unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2073	struct mvpp2 *priv = port->priv;
2074	unsigned int reg_val, shift;
2075	struct mvpp2_prs_entry pe;
2076
2077	if (priv->prs_shadow[tid].valid)
2078		return;
2079
2080	memset(&pe, 0, sizeof(pe));
2081
2082	pe.index = tid;
2083
2084	reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2085	if (reg_val & MVPP2_DSA_EXTENDED)
2086		shift = MVPP2_VLAN_TAG_EDSA_LEN;
2087	else
2088		shift = MVPP2_VLAN_TAG_LEN;
2089
2090	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2091
2092	/* Mask all ports */
2093	mvpp2_prs_tcam_port_map_set(&pe, 0);
2094
2095	/* Update port mask */
2096	mvpp2_prs_tcam_port_set(&pe, port->id, true);
2097
2098	/* Continue - set next lookup */
2099	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2100
2101	/* Skip VLAN header - Set offset to 4 or 8 bytes */
2102	mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2103
2104	/* Drop VLAN packets that don't belong to any VIDs on this port */
2105	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2106				 MVPP2_PRS_RI_DROP_MASK);
2107
2108	/* Clear all ai bits for next iteration */
2109	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2110
2111	/* Update shadow table */
2112	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2113	mvpp2_prs_hw_write(priv, &pe);
2114}
2115
2116/* Parser default initialization */
2117int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2118{
2119	int err, index, i;
2120
2121	/* Enable tcam table */
2122	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2123
2124	/* Clear all tcam and sram entries */
2125	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2126		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2127		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2128			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2129
2130		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2131		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2132			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2133	}
2134
2135	/* Invalidate all tcam entries */
2136	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2137		mvpp2_prs_hw_inv(priv, index);
2138
2139	priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2140					sizeof(*priv->prs_shadow),
2141					GFP_KERNEL);
2142	if (!priv->prs_shadow)
2143		return -ENOMEM;
2144
2145	/* Always start from lookup = 0 */
2146	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2147		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2148				       MVPP2_PRS_PORT_LU_MAX, 0);
2149
2150	mvpp2_prs_def_flow_init(priv);
2151
2152	mvpp2_prs_mh_init(priv);
2153
2154	mvpp2_prs_mac_init(priv);
2155
2156	mvpp2_prs_dsa_init(priv);
2157
2158	mvpp2_prs_vid_init(priv);
2159
2160	err = mvpp2_prs_etype_init(priv);
2161	if (err)
2162		return err;
2163
2164	err = mvpp2_prs_vlan_init(pdev, priv);
2165	if (err)
2166		return err;
2167
2168	err = mvpp2_prs_pppoe_init(priv);
2169	if (err)
2170		return err;
2171
2172	err = mvpp2_prs_ip6_init(priv);
2173	if (err)
2174		return err;
2175
2176	err = mvpp2_prs_ip4_init(priv);
2177	if (err)
2178		return err;
2179
2180	return 0;
2181}
2182
2183/* Compare MAC DA with tcam entry data */
2184static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2185				       const u8 *da, unsigned char *mask)
2186{
2187	unsigned char tcam_byte, tcam_mask;
2188	int index;
2189
2190	for (index = 0; index < ETH_ALEN; index++) {
2191		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2192		if (tcam_mask != mask[index])
2193			return false;
2194
2195		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2196			return false;
2197	}
2198
2199	return true;
2200}
2201
2202/* Find tcam entry with matched pair <MAC DA, port> */
2203static int
2204mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2205			    unsigned char *mask, int udf_type)
2206{
2207	struct mvpp2_prs_entry pe;
2208	int tid;
2209
2210	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2211	for (tid = MVPP2_PE_MAC_RANGE_START;
2212	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2213		unsigned int entry_pmap;
2214
2215		if (!priv->prs_shadow[tid].valid ||
2216		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2217		    (priv->prs_shadow[tid].udf != udf_type))
2218			continue;
2219
2220		mvpp2_prs_init_from_hw(priv, &pe, tid);
2221		entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2222
2223		if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2224		    entry_pmap == pmap)
2225			return tid;
2226	}
2227
2228	return -ENOENT;
2229}
2230
2231/* Update parser's mac da entry */
2232int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2233{
2234	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2235	struct mvpp2 *priv = port->priv;
2236	unsigned int pmap, len, ri;
2237	struct mvpp2_prs_entry pe;
2238	int tid;
2239
2240	memset(&pe, 0, sizeof(pe));
2241
2242	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2243	tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2244					  MVPP2_PRS_UDF_MAC_DEF);
2245
2246	/* No such entry */
2247	if (tid < 0) {
2248		if (!add)
2249			return 0;
2250
2251		/* Create new TCAM entry */
2252		/* Go through the all entries from first to last */
2253		tid = mvpp2_prs_tcam_first_free(priv,
2254						MVPP2_PE_MAC_RANGE_START,
2255						MVPP2_PE_MAC_RANGE_END);
2256		if (tid < 0)
2257			return tid;
2258
2259		pe.index = tid;
2260
2261		/* Mask all ports */
2262		mvpp2_prs_tcam_port_map_set(&pe, 0);
2263	} else {
2264		mvpp2_prs_init_from_hw(priv, &pe, tid);
2265	}
2266
2267	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2268
2269	/* Update port mask */
2270	mvpp2_prs_tcam_port_set(&pe, port->id, add);
2271
2272	/* Invalidate the entry if no ports are left enabled */
2273	pmap = mvpp2_prs_tcam_port_map_get(&pe);
2274	if (pmap == 0) {
2275		if (add)
2276			return -EINVAL;
2277
2278		mvpp2_prs_hw_inv(priv, pe.index);
2279		priv->prs_shadow[pe.index].valid = false;
2280		return 0;
2281	}
2282
2283	/* Continue - set next lookup */
2284	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2285
2286	/* Set match on DA */
2287	len = ETH_ALEN;
2288	while (len--)
2289		mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2290
2291	/* Set result info bits */
2292	if (is_broadcast_ether_addr(da)) {
2293		ri = MVPP2_PRS_RI_L2_BCAST;
2294	} else if (is_multicast_ether_addr(da)) {
2295		ri = MVPP2_PRS_RI_L2_MCAST;
2296	} else {
2297		ri = MVPP2_PRS_RI_L2_UCAST;
2298
2299		if (ether_addr_equal(da, port->dev->dev_addr))
2300			ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2301	}
2302
2303	mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2304				 MVPP2_PRS_RI_MAC_ME_MASK);
2305	mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2306				MVPP2_PRS_RI_MAC_ME_MASK);
2307
2308	/* Shift to ethertype */
2309	mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2310				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2311
2312	/* Update shadow table and hw entry */
2313	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2314	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2315	mvpp2_prs_hw_write(priv, &pe);
2316
2317	return 0;
2318}
2319
2320int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2321{
2322	struct mvpp2_port *port = netdev_priv(dev);
2323	int err;
2324
2325	/* Remove old parser entry */
2326	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2327	if (err)
2328		return err;
2329
2330	/* Add new parser entry */
2331	err = mvpp2_prs_mac_da_accept(port, da, true);
2332	if (err)
2333		return err;
2334
2335	/* Set addr in the device */
2336	eth_hw_addr_set(dev, da);
2337
2338	return 0;
2339}
2340
2341void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2342{
2343	struct mvpp2 *priv = port->priv;
2344	struct mvpp2_prs_entry pe;
2345	unsigned long pmap;
2346	int index, tid;
2347
2348	for (tid = MVPP2_PE_MAC_RANGE_START;
2349	     tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2350		unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2351
2352		if (!priv->prs_shadow[tid].valid ||
2353		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2354		    (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2355			continue;
2356
2357		mvpp2_prs_init_from_hw(priv, &pe, tid);
2358
2359		pmap = mvpp2_prs_tcam_port_map_get(&pe);
2360
2361		/* We only want entries active on this port */
2362		if (!test_bit(port->id, &pmap))
2363			continue;
2364
2365		/* Read mac addr from entry */
2366		for (index = 0; index < ETH_ALEN; index++)
2367			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2368						     &da_mask[index]);
2369
2370		/* Special cases : Don't remove broadcast and port's own
2371		 * address
2372		 */
2373		if (is_broadcast_ether_addr(da) ||
2374		    ether_addr_equal(da, port->dev->dev_addr))
2375			continue;
2376
2377		/* Remove entry from TCAM */
2378		mvpp2_prs_mac_da_accept(port, da, false);
2379	}
2380}
2381
2382int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2383{
2384	switch (type) {
2385	case MVPP2_TAG_TYPE_EDSA:
2386		/* Add port to EDSA entries */
2387		mvpp2_prs_dsa_tag_set(priv, port, true,
2388				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2389		mvpp2_prs_dsa_tag_set(priv, port, true,
2390				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2391		/* Remove port from DSA entries */
2392		mvpp2_prs_dsa_tag_set(priv, port, false,
2393				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2394		mvpp2_prs_dsa_tag_set(priv, port, false,
2395				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2396		break;
2397
2398	case MVPP2_TAG_TYPE_DSA:
2399		/* Add port to DSA entries */
2400		mvpp2_prs_dsa_tag_set(priv, port, true,
2401				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2402		mvpp2_prs_dsa_tag_set(priv, port, true,
2403				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2404		/* Remove port from EDSA entries */
2405		mvpp2_prs_dsa_tag_set(priv, port, false,
2406				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2407		mvpp2_prs_dsa_tag_set(priv, port, false,
2408				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2409		break;
2410
2411	case MVPP2_TAG_TYPE_MH:
2412	case MVPP2_TAG_TYPE_NONE:
2413		/* Remove port form EDSA and DSA entries */
2414		mvpp2_prs_dsa_tag_set(priv, port, false,
2415				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2416		mvpp2_prs_dsa_tag_set(priv, port, false,
2417				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2418		mvpp2_prs_dsa_tag_set(priv, port, false,
2419				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2420		mvpp2_prs_dsa_tag_set(priv, port, false,
2421				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2422		break;
2423
2424	default:
2425		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2426			return -EINVAL;
2427	}
2428
2429	return 0;
2430}
2431
2432int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2433{
2434	struct mvpp2_prs_entry pe;
2435	u8 *ri_byte, *ri_byte_mask;
2436	int tid, i;
2437
2438	memset(&pe, 0, sizeof(pe));
2439
2440	tid = mvpp2_prs_tcam_first_free(priv,
2441					MVPP2_PE_LAST_FREE_TID,
2442					MVPP2_PE_FIRST_FREE_TID);
2443	if (tid < 0)
2444		return tid;
2445
2446	pe.index = tid;
2447
2448	ri_byte = (u8 *)&ri;
2449	ri_byte_mask = (u8 *)&ri_mask;
2450
2451	mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2452	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2453
2454	for (i = 0; i < 4; i++) {
2455		mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2456					     ri_byte_mask[i]);
2457	}
2458
2459	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2460	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2461	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2462	mvpp2_prs_hw_write(priv, &pe);
2463
2464	return 0;
2465}
2466
2467/* Set prs flow for the port */
2468int mvpp2_prs_def_flow(struct mvpp2_port *port)
2469{
2470	struct mvpp2_prs_entry pe;
2471	int tid;
2472
2473	memset(&pe, 0, sizeof(pe));
2474
2475	tid = mvpp2_prs_flow_find(port->priv, port->id);
2476
2477	/* Such entry not exist */
2478	if (tid < 0) {
2479		/* Go through the all entires from last to first */
2480		tid = mvpp2_prs_tcam_first_free(port->priv,
2481						MVPP2_PE_LAST_FREE_TID,
2482					       MVPP2_PE_FIRST_FREE_TID);
2483		if (tid < 0)
2484			return tid;
2485
2486		pe.index = tid;
2487
2488		/* Set flow ID*/
2489		mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2490		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2491
2492		/* Update shadow table */
2493		mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2494	} else {
2495		mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2496	}
2497
2498	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2499	mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2500	mvpp2_prs_hw_write(port->priv, &pe);
2501
2502	return 0;
2503}
2504
2505int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2506{
2507	u32 val;
2508
2509	if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2510		return -EINVAL;
2511
2512	mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2513
2514	val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2515
2516	val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2517
2518	return val;
2519}
2520