1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2014-2019 aQuantia Corporation. */
3
4/* File aq_filters.c: RX filters related functions. */
5
6#include "aq_filters.h"
7
8static bool __must_check
9aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
10{
11	if (fsp->flow_type & FLOW_MAC_EXT)
12		return false;
13
14	switch (fsp->flow_type & ~FLOW_EXT) {
15	case ETHER_FLOW:
16	case TCP_V4_FLOW:
17	case UDP_V4_FLOW:
18	case SCTP_V4_FLOW:
19	case TCP_V6_FLOW:
20	case UDP_V6_FLOW:
21	case SCTP_V6_FLOW:
22	case IPV4_FLOW:
23	case IPV6_FLOW:
24		return true;
25	case IP_USER_FLOW:
26		switch (fsp->h_u.usr_ip4_spec.proto) {
27		case IPPROTO_TCP:
28		case IPPROTO_UDP:
29		case IPPROTO_SCTP:
30		case IPPROTO_IP:
31			return true;
32		default:
33			return false;
34			}
35	case IPV6_USER_FLOW:
36		switch (fsp->h_u.usr_ip6_spec.l4_proto) {
37		case IPPROTO_TCP:
38		case IPPROTO_UDP:
39		case IPPROTO_SCTP:
40		case IPPROTO_IP:
41			return true;
42		default:
43			return false;
44			}
45	default:
46		return false;
47	}
48
49	return false;
50}
51
52static bool __must_check
53aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54		struct ethtool_rx_flow_spec *fsp2)
55{
56	if (fsp1->flow_type != fsp2->flow_type ||
57	    memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58	    memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59	    memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60	    memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
61		return false;
62
63	return true;
64}
65
66static bool __must_check
67aq_rule_already_exists(struct aq_nic_s *aq_nic,
68		       struct ethtool_rx_flow_spec *fsp)
69{
70	struct aq_rx_filter *rule;
71	struct hlist_node *aq_node2;
72	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
73
74	hlist_for_each_entry_safe(rule, aq_node2,
75				  &rx_fltrs->filter_list, aq_node) {
76		if (rule->aq_fsp.location == fsp->location)
77			continue;
78		if (aq_match_filter(&rule->aq_fsp, fsp)) {
79			netdev_err(aq_nic->ndev,
80				   "ethtool: This filter is already set\n");
81			return true;
82		}
83	}
84
85	return false;
86}
87
88static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89				  struct aq_hw_rx_fltrs_s *rx_fltrs,
90				  struct ethtool_rx_flow_spec *fsp)
91{
92	u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
93			    aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
94
95	if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
96	    fsp->location > last_location) {
97		netdev_err(aq_nic->ndev,
98			   "ethtool: location must be in range [%d, %d]",
99			   AQ_RX_FIRST_LOC_FL3L4, last_location);
100		return -EINVAL;
101	}
102	if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
103		rx_fltrs->fl3l4.is_ipv6 = false;
104		netdev_err(aq_nic->ndev,
105			   "ethtool: mixing ipv4 and ipv6 is not allowed");
106		return -EINVAL;
107	} else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
108		rx_fltrs->fl3l4.is_ipv6 = true;
109		netdev_err(aq_nic->ndev,
110			   "ethtool: mixing ipv4 and ipv6 is not allowed");
111		return -EINVAL;
112	} else if (rx_fltrs->fl3l4.is_ipv6		      &&
113		   fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
114		   fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
115		netdev_err(aq_nic->ndev,
116			   "ethtool: The specified location for ipv6 must be %d or %d",
117			   AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
118		return -EINVAL;
119	}
120
121	return 0;
122}
123
124static int __must_check
125aq_check_approve_fl2(struct aq_nic_s *aq_nic,
126		     struct aq_hw_rx_fltrs_s *rx_fltrs,
127		     struct ethtool_rx_flow_spec *fsp)
128{
129	u32 last_location = AQ_RX_LAST_LOC_FETHERT -
130			    aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
131
132	if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
133	    fsp->location > last_location) {
134		netdev_err(aq_nic->ndev,
135			   "ethtool: location must be in range [%d, %d]",
136			   AQ_RX_FIRST_LOC_FETHERT,
137			   last_location);
138		return -EINVAL;
139	}
140
141	if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
142	    fsp->m_u.ether_spec.h_proto == 0U) {
143		netdev_err(aq_nic->ndev,
144			   "ethtool: proto (ether_type) parameter must be specified");
145		return -EINVAL;
146	}
147
148	return 0;
149}
150
151static int __must_check
152aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
153		       struct aq_hw_rx_fltrs_s *rx_fltrs,
154		       struct ethtool_rx_flow_spec *fsp)
155{
156	struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
157
158	if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
159	    fsp->location > AQ_RX_LAST_LOC_FVLANID) {
160		netdev_err(aq_nic->ndev,
161			   "ethtool: location must be in range [%d, %d]",
162			   AQ_RX_FIRST_LOC_FVLANID,
163			   AQ_RX_LAST_LOC_FVLANID);
164		return -EINVAL;
165	}
166
167	if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
168	    (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
169		       aq_nic->active_vlans))) {
170		netdev_err(aq_nic->ndev,
171			   "ethtool: unknown vlan-id specified");
172		return -EINVAL;
173	}
174
175	if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
176		netdev_err(aq_nic->ndev,
177			   "ethtool: queue number must be in range [0, %d]",
178			   cfg->num_rss_queues * cfg->tcs - 1);
179		return -EINVAL;
180	}
181	return 0;
182}
183
184static int __must_check
185aq_check_filter(struct aq_nic_s *aq_nic,
186		struct ethtool_rx_flow_spec *fsp)
187{
188	int err = 0;
189	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
190
191	if (fsp->flow_type & FLOW_EXT) {
192		if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
193			err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
194		} else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
195			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
196		} else {
197			netdev_err(aq_nic->ndev,
198				   "ethtool: invalid vlan mask 0x%x specified",
199				   be16_to_cpu(fsp->m_ext.vlan_tci));
200			err = -EINVAL;
201		}
202	} else {
203		switch (fsp->flow_type & ~FLOW_EXT) {
204		case ETHER_FLOW:
205			err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
206			break;
207		case TCP_V4_FLOW:
208		case UDP_V4_FLOW:
209		case SCTP_V4_FLOW:
210		case IPV4_FLOW:
211		case IP_USER_FLOW:
212			rx_fltrs->fl3l4.is_ipv6 = false;
213			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
214			break;
215		case TCP_V6_FLOW:
216		case UDP_V6_FLOW:
217		case SCTP_V6_FLOW:
218		case IPV6_FLOW:
219		case IPV6_USER_FLOW:
220			rx_fltrs->fl3l4.is_ipv6 = true;
221			err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
222			break;
223		default:
224			netdev_err(aq_nic->ndev,
225				   "ethtool: unknown flow-type specified");
226			err = -EINVAL;
227		}
228	}
229
230	return err;
231}
232
233static bool __must_check
234aq_rule_is_not_support(struct aq_nic_s *aq_nic,
235		       struct ethtool_rx_flow_spec *fsp)
236{
237	bool rule_is_not_support = false;
238
239	if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
240		netdev_err(aq_nic->ndev,
241			   "ethtool: Please, to enable the RX flow control:\n"
242			   "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
243		rule_is_not_support = true;
244	} else if (!aq_rule_is_approve(fsp)) {
245		netdev_err(aq_nic->ndev,
246			   "ethtool: The specified flow type is not supported\n");
247		rule_is_not_support = true;
248	} else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
249		   (fsp->h_u.tcp_ip4_spec.tos ||
250		    fsp->h_u.tcp_ip6_spec.tclass)) {
251		netdev_err(aq_nic->ndev,
252			   "ethtool: The specified tos tclass are not supported\n");
253		rule_is_not_support = true;
254	} else if (fsp->flow_type & FLOW_MAC_EXT) {
255		netdev_err(aq_nic->ndev,
256			   "ethtool: MAC_EXT is not supported");
257		rule_is_not_support = true;
258	}
259
260	return rule_is_not_support;
261}
262
263static bool __must_check
264aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
265		       struct ethtool_rx_flow_spec *fsp)
266{
267	struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
268	bool rule_is_not_correct = false;
269
270	if (!aq_nic) {
271		rule_is_not_correct = true;
272	} else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
273		netdev_err(aq_nic->ndev,
274			   "ethtool: The specified number %u rule is invalid\n",
275			   fsp->location);
276		rule_is_not_correct = true;
277	} else if (aq_check_filter(aq_nic, fsp)) {
278		rule_is_not_correct = true;
279	} else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
280		if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
281			netdev_err(aq_nic->ndev,
282				   "ethtool: The specified action is invalid.\n"
283				   "Maximum allowable value action is %u.\n",
284				   cfg->num_rss_queues * cfg->tcs - 1);
285			rule_is_not_correct = true;
286		}
287	}
288
289	return rule_is_not_correct;
290}
291
292static int __must_check
293aq_check_rule(struct aq_nic_s *aq_nic,
294	      struct ethtool_rx_flow_spec *fsp)
295{
296	int err = 0;
297
298	if (aq_rule_is_not_correct(aq_nic, fsp))
299		err = -EINVAL;
300	else if (aq_rule_is_not_support(aq_nic, fsp))
301		err = -EOPNOTSUPP;
302	else if (aq_rule_already_exists(aq_nic, fsp))
303		err = -EEXIST;
304
305	return err;
306}
307
308static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
309			    struct aq_rx_filter *aq_rx_fltr,
310			    struct aq_rx_filter_l2 *data, bool add)
311{
312	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
313
314	memset(data, 0, sizeof(*data));
315
316	data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
317
318	if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
319		data->queue = fsp->ring_cookie;
320	else
321		data->queue = -1;
322
323	data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
324	data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
325				 == VLAN_PRIO_MASK;
326	data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
327			       & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
328}
329
330static int aq_add_del_fether(struct aq_nic_s *aq_nic,
331			     struct aq_rx_filter *aq_rx_fltr, bool add)
332{
333	struct aq_rx_filter_l2 data;
334	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
335	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
336
337	aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
338
339	if (unlikely(!aq_hw_ops->hw_filter_l2_set))
340		return -EOPNOTSUPP;
341	if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
342		return -EOPNOTSUPP;
343
344	if (add)
345		return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
346	else
347		return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
348}
349
350static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
351{
352	int i;
353
354	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
355		if (aq_vlans[i].enable &&
356		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
357		    aq_vlans[i].vlan_id == vlan) {
358			return true;
359		}
360	}
361
362	return false;
363}
364
365/* Function rebuilds array of vlan filters so that filters with assigned
366 * queue have a precedence over just vlans on the interface.
367 */
368static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
369			     unsigned long *active_vlans,
370			     struct aq_rx_filter_vlan *aq_vlans)
371{
372	bool vlan_busy = false;
373	int vlan = -1;
374	int i;
375
376	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
377		if (aq_vlans[i].enable &&
378		    aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
379			continue;
380		do {
381			vlan = find_next_bit(active_vlans,
382					     VLAN_N_VID,
383					     vlan + 1);
384			if (vlan == VLAN_N_VID) {
385				aq_vlans[i].enable = 0U;
386				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
387				aq_vlans[i].vlan_id = 0;
388				continue;
389			}
390
391			vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
392			if (!vlan_busy) {
393				aq_vlans[i].enable = 1U;
394				aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
395				aq_vlans[i].vlan_id = vlan;
396			}
397		} while (vlan_busy && vlan != VLAN_N_VID);
398	}
399}
400
401static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
402			     struct aq_rx_filter *aq_rx_fltr,
403			     struct aq_rx_filter_vlan *aq_vlans, bool add)
404{
405	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
406	int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
407	int i;
408
409	memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
410
411	if (!add)
412		return 0;
413
414	/* remove vlan if it was in table without queue assignment */
415	for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
416		if (aq_vlans[i].vlan_id ==
417		   (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
418			aq_vlans[i].enable = false;
419		}
420	}
421
422	aq_vlans[location].location = location;
423	aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
424				     & VLAN_VID_MASK;
425	aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
426	aq_vlans[location].enable = 1U;
427
428	return 0;
429}
430
431int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
432{
433	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
434	struct aq_rx_filter *rule = NULL;
435	struct hlist_node *aq_node2;
436
437	hlist_for_each_entry_safe(rule, aq_node2,
438				  &rx_fltrs->filter_list, aq_node) {
439		if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
440			break;
441	}
442	if (rule && rule->type == aq_rx_filter_vlan &&
443	    be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
444		struct ethtool_rxnfc cmd;
445
446		cmd.fs.location = rule->aq_fsp.location;
447		return aq_del_rxnfc_rule(aq_nic, &cmd);
448	}
449
450	return -ENOENT;
451}
452
453static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
454			    struct aq_rx_filter *aq_rx_fltr, bool add)
455{
456	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
457
458	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
459		return -EOPNOTSUPP;
460
461	aq_set_data_fvlan(aq_nic,
462			  aq_rx_fltr,
463			  aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
464			  add);
465
466	return aq_filters_vlans_update(aq_nic);
467}
468
469static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
470			     struct aq_rx_filter *aq_rx_fltr,
471			     struct aq_rx_filter_l3l4 *data, bool add)
472{
473	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
474	const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
475
476	memset(data, 0, sizeof(*data));
477
478	data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
479	data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
480
481	if (!add) {
482		if (!data->is_ipv6)
483			rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
484		else
485			rx_fltrs->fl3l4.active_ipv6 &=
486				~BIT((data->location) / 4);
487
488		return 0;
489	}
490
491	data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
492
493	switch (fsp->flow_type) {
494	case TCP_V4_FLOW:
495	case TCP_V6_FLOW:
496		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
497		break;
498	case UDP_V4_FLOW:
499	case UDP_V6_FLOW:
500		data->cmd |= HW_ATL_RX_UDP;
501		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
502		break;
503	case SCTP_V4_FLOW:
504	case SCTP_V6_FLOW:
505		data->cmd |= HW_ATL_RX_SCTP;
506		data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
507		break;
508	default:
509		break;
510	}
511
512	if (!data->is_ipv6) {
513		data->ip_src[0] =
514			ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
515		data->ip_dst[0] =
516			ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
517		rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
518	} else {
519		int i;
520
521		rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
522		for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
523			data->ip_dst[i] =
524				ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
525			data->ip_src[i] =
526				ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
527		}
528		data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
529	}
530	if (fsp->flow_type != IP_USER_FLOW &&
531	    fsp->flow_type != IPV6_USER_FLOW) {
532		if (!data->is_ipv6) {
533			data->p_dst =
534				ntohs(fsp->h_u.tcp_ip4_spec.pdst);
535			data->p_src =
536				ntohs(fsp->h_u.tcp_ip4_spec.psrc);
537		} else {
538			data->p_dst =
539				ntohs(fsp->h_u.tcp_ip6_spec.pdst);
540			data->p_src =
541				ntohs(fsp->h_u.tcp_ip6_spec.psrc);
542		}
543	}
544	if (data->ip_src[0] && !data->is_ipv6)
545		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
546	if (data->ip_dst[0] && !data->is_ipv6)
547		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
548	if (data->p_dst)
549		data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
550	if (data->p_src)
551		data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
552	if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
553		data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
554		data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
555		data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
556	} else {
557		data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
558	}
559
560	return 0;
561}
562
563static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
564			const struct aq_hw_ops *aq_hw_ops,
565			struct aq_rx_filter_l3l4 *data)
566{
567	if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
568		return -EOPNOTSUPP;
569
570	return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
571}
572
573static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
574			    struct aq_rx_filter *aq_rx_fltr, bool add)
575{
576	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
577	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
578	struct aq_rx_filter_l3l4 data;
579
580	if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
581		     aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4  ||
582		     aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
583		return -EINVAL;
584
585	return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
586}
587
588static int aq_add_del_rule(struct aq_nic_s *aq_nic,
589			   struct aq_rx_filter *aq_rx_fltr, bool add)
590{
591	int err = -EINVAL;
592
593	if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
594		if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
595		    == VLAN_VID_MASK) {
596			aq_rx_fltr->type = aq_rx_filter_vlan;
597			err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
598		} else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
599			== VLAN_PRIO_MASK) {
600			aq_rx_fltr->type = aq_rx_filter_ethertype;
601			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
602		}
603	} else {
604		switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
605		case ETHER_FLOW:
606			aq_rx_fltr->type = aq_rx_filter_ethertype;
607			err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
608			break;
609		case TCP_V4_FLOW:
610		case UDP_V4_FLOW:
611		case SCTP_V4_FLOW:
612		case IP_USER_FLOW:
613		case TCP_V6_FLOW:
614		case UDP_V6_FLOW:
615		case SCTP_V6_FLOW:
616		case IPV6_USER_FLOW:
617			aq_rx_fltr->type = aq_rx_filter_l3l4;
618			err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
619			break;
620		default:
621			err = -EINVAL;
622			break;
623		}
624	}
625
626	return err;
627}
628
629static int aq_update_table_filters(struct aq_nic_s *aq_nic,
630				   struct aq_rx_filter *aq_rx_fltr, u16 index,
631				   struct ethtool_rxnfc *cmd)
632{
633	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
634	struct aq_rx_filter *rule = NULL, *parent = NULL;
635	struct hlist_node *aq_node2;
636	int err = -EINVAL;
637
638	hlist_for_each_entry_safe(rule, aq_node2,
639				  &rx_fltrs->filter_list, aq_node) {
640		if (rule->aq_fsp.location >= index)
641			break;
642		parent = rule;
643	}
644
645	if (rule && rule->aq_fsp.location == index) {
646		err = aq_add_del_rule(aq_nic, rule, false);
647		hlist_del(&rule->aq_node);
648		kfree(rule);
649		--rx_fltrs->active_filters;
650	}
651
652	if (unlikely(!aq_rx_fltr))
653		return err;
654
655	INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
656
657	if (parent)
658		hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
659	else
660		hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
661
662	++rx_fltrs->active_filters;
663
664	return 0;
665}
666
667u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
668{
669	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
670
671	return rx_fltrs->active_filters;
672}
673
674struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
675{
676	return &aq_nic->aq_hw_rx_fltrs;
677}
678
679int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
680{
681	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
682	struct ethtool_rx_flow_spec *fsp =
683		(struct ethtool_rx_flow_spec *)&cmd->fs;
684	struct aq_rx_filter *aq_rx_fltr;
685	int err = 0;
686
687	err = aq_check_rule(aq_nic, fsp);
688	if (err)
689		goto err_exit;
690
691	aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
692	if (unlikely(!aq_rx_fltr)) {
693		err = -ENOMEM;
694		goto err_exit;
695	}
696
697	memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
698
699	err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
700	if (unlikely(err))
701		goto err_free;
702
703	err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
704	if (unlikely(err)) {
705		hlist_del(&aq_rx_fltr->aq_node);
706		--rx_fltrs->active_filters;
707		goto err_free;
708	}
709
710	return 0;
711
712err_free:
713	kfree(aq_rx_fltr);
714err_exit:
715	return err;
716}
717
718int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
719{
720	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
721	struct aq_rx_filter *rule = NULL;
722	struct hlist_node *aq_node2;
723	int err = -EINVAL;
724
725	hlist_for_each_entry_safe(rule, aq_node2,
726				  &rx_fltrs->filter_list, aq_node) {
727		if (rule->aq_fsp.location == cmd->fs.location)
728			break;
729	}
730
731	if (rule && rule->aq_fsp.location == cmd->fs.location) {
732		err = aq_add_del_rule(aq_nic, rule, false);
733		hlist_del(&rule->aq_node);
734		kfree(rule);
735		--rx_fltrs->active_filters;
736	}
737	return err;
738}
739
740int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
741{
742	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
743	struct ethtool_rx_flow_spec *fsp =
744			(struct ethtool_rx_flow_spec *)&cmd->fs;
745	struct aq_rx_filter *rule = NULL;
746	struct hlist_node *aq_node2;
747
748	hlist_for_each_entry_safe(rule, aq_node2,
749				  &rx_fltrs->filter_list, aq_node)
750		if (fsp->location <= rule->aq_fsp.location)
751			break;
752
753	if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
754		return -EINVAL;
755
756	memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
757
758	return 0;
759}
760
761int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
762			   u32 *rule_locs)
763{
764	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
765	struct hlist_node *aq_node2;
766	struct aq_rx_filter *rule;
767	int count = 0;
768
769	cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
770
771	hlist_for_each_entry_safe(rule, aq_node2,
772				  &rx_fltrs->filter_list, aq_node) {
773		if (unlikely(count == cmd->rule_cnt))
774			return -EMSGSIZE;
775
776		rule_locs[count++] = rule->aq_fsp.location;
777	}
778
779	cmd->rule_cnt = count;
780
781	return 0;
782}
783
784int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
785{
786	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
787	struct hlist_node *aq_node2;
788	struct aq_rx_filter *rule;
789	int err = 0;
790
791	hlist_for_each_entry_safe(rule, aq_node2,
792				  &rx_fltrs->filter_list, aq_node) {
793		err = aq_add_del_rule(aq_nic, rule, false);
794		if (err)
795			goto err_exit;
796		hlist_del(&rule->aq_node);
797		kfree(rule);
798		--rx_fltrs->active_filters;
799	}
800
801err_exit:
802	return err;
803}
804
805int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
806{
807	struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
808	struct hlist_node *aq_node2;
809	struct aq_rx_filter *rule;
810	int err = 0;
811
812	hlist_for_each_entry_safe(rule, aq_node2,
813				  &rx_fltrs->filter_list, aq_node) {
814		err = aq_add_del_rule(aq_nic, rule, true);
815		if (err)
816			goto err_exit;
817	}
818
819err_exit:
820	return err;
821}
822
823int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
824{
825	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
826	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
827	int hweight = 0;
828	int err = 0;
829
830	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
831		return -EOPNOTSUPP;
832	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
833		return -EOPNOTSUPP;
834
835	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
836			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
837
838	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
839		hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
840
841		err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
842		if (err)
843			return err;
844	}
845
846	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
847					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
848					   );
849	if (err)
850		return err;
851
852	if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
853		if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
854			err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
855				!(aq_nic->packet_filter & IFF_PROMISC));
856			aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
857		} else {
858		/* otherwise left in promiscue mode */
859			aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
860		}
861	}
862
863	return err;
864}
865
866int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
867{
868	const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
869	struct aq_hw_s *aq_hw = aq_nic->aq_hw;
870	int err = 0;
871
872	bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
873	aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
874			 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
875
876	if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
877		return -EOPNOTSUPP;
878	if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
879		return -EOPNOTSUPP;
880
881	aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
882	err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
883	if (err)
884		return err;
885	err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
886					    aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
887					   );
888	return err;
889}
890