1/*
2 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 * Permission to use, copy, modify, and/or distribute this software for
4 * any purpose with or without fee is hereby granted, provided that the
5 * above copyright notice and this permission notice appear in all copies.
6 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
7 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
8 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
9 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
10 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
11 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
12 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
13 */
14
15
16
17/**
18 * @defgroup garuda_acl GARUDA_ACL
19 * @{
20 */
21
22#include "sw.h"
23#include "hsl.h"
24#include "hsl_dev.h"
25#include "hsl_acl.h"
26#include "hsl_port_prop.h"
27#include "garuda_acl.h"
28#include "garuda_reg.h"
29
30//#define GARUDA_ACL_DEBUG
31//#define GARUDA_SW_ENTRY
32//#define GARUDA_ENTRY_DUMP
33
34typedef struct
35{
36    a_uint32_t list_id;
37    a_uint32_t list_pri;
38    a_uint32_t addr;
39    a_uint32_t size;
40    a_uint32_t status;
41    fal_pbmp_t bind_pts;
42} garuda_acl_list_t;
43
44typedef struct
45{
46    a_uint32_t slct[8];
47    a_uint32_t vlu[5];
48    a_uint32_t msk[5];
49    a_uint32_t typ;
50    a_uint32_t act;
51} garuda_acl_hw_rule_t;
52
53static garuda_acl_list_t *list_ent[SW_MAX_NR_DEV];
54static garuda_acl_hw_rule_t *hw_rule_ent;
55
56static a_uint32_t filter[SW_MAX_NR_DEV];
57static a_uint32_t filter_snap[SW_MAX_NR_DEV];
58
59#define GARUDA_MAX_LIST 32
60#define GARUDA_MAX_RULE 32
61
62#define ENT_FREE        0x1
63#define ENT_USED        0x2
64
65#define GARUDA_RULE_VLU_ADDR  0x58400
66#define GARUDA_RULE_MSK_ADDR  0x58c00
67#define GARUDA_RULE_TYP_ADDR  0x5881c
68#define GARUDA_RULE_ACT_ADDR  0x58000
69#define GARUDA_RULE_SLCT_ADDR 0x58800
70
71#define GARUDA_MAC_FILTER       1
72#define GARUDA_IP4_FILTER       2
73#define GARUDA_IP6R1_FILTER     3
74#define GARUDA_IP6R2_FILTER     4
75#define GARUDA_IP6R3_FILTER     5
76
77#ifdef GARUDA_SW_ENTRY
78static char *flt_vlu_mem = NULL;
79static char *flt_msk_mem = NULL;
80static char *flt_typ_mem = NULL;
81static char *act_mem = NULL;
82static char *slct_mem = NULL;
83#endif
84
85static a_bool_t _garuda_acl_zero_addr(const fal_mac_addr_t addr);
86
87static a_bool_t
88_garuda_acl_field_care(fal_acl_field_op_t op, a_uint32_t val, a_uint32_t mask,
89                       a_uint32_t chkvlu);
90
91static sw_error_t
92_garuda_acl_list_loc(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t * idx);
93
94static sw_error_t
95_garuda_acl_filter_map_get(const garuda_acl_hw_rule_t * rule,
96                           a_uint32_t flt_idx[], a_uint32_t * flt_nr);
97
98static sw_error_t
99_garuda_acl_rule_mac_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
100                           garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
101                           a_uint32_t * len);
102
103static sw_error_t
104_garuda_acl_rule_ip4_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
105                           garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
106                           a_uint32_t * len);
107
108static sw_error_t
109_garuda_acl_rule_ip6r1_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
110                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
111                             a_uint32_t * len);
112
113static sw_error_t
114_garuda_acl_rule_ip6r2_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
115                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
116                             a_uint32_t * len);
117
118static sw_error_t
119_garuda_acl_rule_ip6r3_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
120                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
121                             a_uint32_t * len);
122
123static sw_error_t
124_garuda_acl_action_parse(a_uint32_t dev_id, const fal_acl_rule_t * sw,
125                         garuda_acl_hw_rule_t * hw);
126
127static sw_error_t
128_garuda_acl_rule_mac_reparse(fal_acl_rule_t * sw,
129                             const garuda_acl_hw_rule_t * hw);
130
131static sw_error_t
132_garuda_acl_rule_ip4_reparse(fal_acl_rule_t * sw,
133                             const garuda_acl_hw_rule_t * hw);
134
135static sw_error_t
136_garuda_acl_rule_ip6r1_reparse(fal_acl_rule_t * sw,
137                               const garuda_acl_hw_rule_t * hw);
138
139static sw_error_t
140_garuda_acl_rule_ip6r2_reparse(fal_acl_rule_t * sw,
141                               const garuda_acl_hw_rule_t * hw);
142
143static sw_error_t
144_garuda_acl_rule_ip6r3_reparse(fal_acl_rule_t * sw,
145                               const garuda_acl_hw_rule_t * hw);
146
147static sw_error_t
148_garuda_acl_rule_action_reparse(fal_acl_rule_t * sw,
149                                const garuda_acl_hw_rule_t * hw);
150
151static sw_error_t
152_garuda_acl_filter_alloc(a_uint32_t dev_id, a_uint32_t * idx);
153
154static void
155_garuda_acl_filter_free(a_uint32_t dev_id, a_uint32_t idx);
156
157static void
158_garuda_acl_filter_snap(a_uint32_t dev_id);
159
160static void
161_garuda_acl_filter_commit(a_uint32_t dev_id);
162
163static sw_error_t
164_garuda_acl_slct_update(garuda_acl_hw_rule_t * hw, a_uint32_t offset,
165                        a_uint32_t flt_idx);
166
167static sw_error_t
168_garuda_acl_filter_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
169                         a_uint32_t flt_idx);
170
171static sw_error_t
172_garuda_acl_action_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
173                         a_uint32_t act_idx);
174
175static sw_error_t
176_garuda_acl_slct_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
177                       a_uint32_t slct_idx);
178
179static sw_error_t
180_garuda_acl_filter_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
181                        a_uint32_t flt_idx);
182
183static sw_error_t
184_garuda_acl_action_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
185                        a_uint32_t act_idx);
186
187static sw_error_t
188_garuda_acl_slct_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
189                      a_uint32_t slct_idx);
190
191static sw_error_t
192_garuda_acl_rule_set(a_uint32_t dev_id, a_uint32_t base_addr,
193                     const garuda_acl_hw_rule_t * hw_rule_ent,
194                     a_uint32_t rule_nr);
195
196static sw_error_t
197_garuda_acl_rule_get(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
198                     a_uint32_t * ent_idx, a_uint32_t rule_idx);
199
200static sw_error_t
201_garuda_acl_rule_sw_to_hw(a_uint32_t dev_id, fal_acl_rule_t * sw,
202                          fal_pbmp_t bind_pts, garuda_acl_hw_rule_t * hw,
203                          a_uint32_t * idx, a_uint32_t * flt_len);
204
205static sw_error_t
206_garuda_acl_rule_hw_to_sw(fal_acl_rule_t * sw, const garuda_acl_hw_rule_t * hw,
207                          a_uint32_t ent_idx, a_uint32_t ent_nr);
208
209static sw_error_t
210_garuda_acl_rule_copy(a_uint32_t dev_id, a_uint32_t src_slct_idx,
211                      a_uint32_t dst_slct_idx, a_uint32_t size);
212
213static sw_error_t
214_garuda_acl_rule_invalid(a_uint32_t dev_id, a_uint32_t rule_idx,
215                         a_uint32_t size);
216
217static sw_error_t
218_garuda_acl_rule_valid(a_uint32_t dev_id, a_uint32_t rule_idx, a_uint32_t size,
219                       a_uint32_t flag);
220
221static sw_error_t
222_garuda_acl_addr_update(a_uint32_t dev_id, a_uint32_t old_addr,
223                        a_uint32_t new_addr, a_uint32_t list_id);
224
225static sw_error_t
226_garuda_acl_rule_bind(a_uint32_t dev_id, a_uint32_t rule_idx, a_uint32_t ports);
227
228#ifdef GARUDA_ACL_DEBUG
229static void
230_garuda_acl_list_dump(a_uint32_t dev_id)
231{
232    a_uint32_t i;
233
234    aos_printk("\ndev_id=%d  list control infomation", dev_id);
235    for (i = 0; i < GARUDA_MAX_LIST; i++)
236    {
237        if (ENT_USED == list_ent[dev_id][i].status)
238        {
239            aos_printk("\nlist_id=%d  list_pri=%d  addr=%d  size=%d  idx=%d ",
240                       list_ent[dev_id][i].list_id,
241                       list_ent[dev_id][i].list_pri,
242                       list_ent[dev_id][i].addr, list_ent[dev_id][i].size, i);
243        }
244    }
245    aos_printk("\n");
246}
247#else
248#define _garuda_acl_list_dump(dev_id)
249#endif
250
251static a_bool_t
252_garuda_acl_zero_addr(const fal_mac_addr_t addr)
253{
254    a_uint32_t i;
255
256    for (i = 0; i < 6; i++)
257    {
258        if (addr.uc[i])
259        {
260            return A_FALSE;
261        }
262    }
263    return A_TRUE;
264}
265
266static a_bool_t
267_garuda_acl_field_care(fal_acl_field_op_t op, a_uint32_t val, a_uint32_t mask,
268                       a_uint32_t chkvlu)
269{
270    if (FAL_ACL_FIELD_MASK == op)
271    {
272        if (0 == mask)
273            return A_FALSE;
274    }
275    else if (FAL_ACL_FIELD_RANGE == op)
276    {
277        if ((0 == val) && (chkvlu == mask))
278            return A_FALSE;
279    }
280    else if (FAL_ACL_FIELD_LE == op)
281    {
282        if (chkvlu == val)
283            return A_FALSE;
284    }
285    else if (FAL_ACL_FIELD_GE == op)
286    {
287        if (0 == val)
288            return A_FALSE;
289    }
290    else if (FAL_ACL_FIELD_NE == op)
291    {
292        return A_TRUE;
293    }
294
295    return A_TRUE;
296}
297
298static sw_error_t
299_garuda_acl_list_loc(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t * idx)
300{
301    a_uint32_t i;
302
303    for (i = 0; i < GARUDA_MAX_LIST; i++)
304    {
305        if ((ENT_USED == list_ent[dev_id][i].status)
306                && (list_id == list_ent[dev_id][i].list_id))
307        {
308            *idx = i;
309            return SW_OK;
310        }
311    }
312    return SW_NOT_FOUND;
313}
314
315static sw_error_t
316_garuda_acl_filter_map_get(const garuda_acl_hw_rule_t * rule,
317                           a_uint32_t flt_idx[], a_uint32_t * flt_nr)
318{
319    a_uint32_t flt_en, idx, i = 0;
320
321    SW_GET_FIELD_BY_REG(RUL_SLCT0, ADDR0_EN, flt_en, (rule->slct[0]));
322    if (flt_en)
323    {
324        SW_GET_FIELD_BY_REG(RUL_SLCT1, ADDR0, idx, (rule->slct[1]));
325        flt_idx[i] = idx;
326        i++;
327    }
328
329    SW_GET_FIELD_BY_REG(RUL_SLCT0, ADDR1_EN, flt_en, (rule->slct[0]));
330    if (flt_en)
331    {
332        SW_GET_FIELD_BY_REG(RUL_SLCT2, ADDR1, idx, (rule->slct[2]));
333        flt_idx[i] = idx;
334        i++;
335    }
336
337    SW_GET_FIELD_BY_REG(RUL_SLCT0, ADDR2_EN, flt_en, (rule->slct[0]));
338    if (flt_en)
339    {
340        SW_GET_FIELD_BY_REG(RUL_SLCT3, ADDR2, idx, (rule->slct[3]));
341        flt_idx[i] = idx;
342        i++;
343    }
344
345    SW_GET_FIELD_BY_REG(RUL_SLCT0, ADDR3_EN, flt_en, (rule->slct[0]));
346    if (flt_en)
347    {
348        SW_GET_FIELD_BY_REG(RUL_SLCT4, ADDR3, idx, (rule->slct[4]));
349        flt_idx[i] = idx;
350        i++;
351    }
352
353    *flt_nr = i;
354    return SW_OK;
355}
356
357static sw_error_t
358_garuda_acl_rule_mac_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
359                           garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
360                           a_uint32_t * len)
361{
362    a_uint32_t i;
363
364    *b_care = A_FALSE;
365    *len = 0;
366
367    aos_mem_zero(&(hw->vlu[0]), sizeof (hw->vlu));
368    aos_mem_zero(&(hw->msk[0]), sizeof (hw->msk));
369    aos_mem_zero(&(hw->typ), sizeof (hw->typ));
370
371    SW_SET_REG_BY_FIELD(MAC_RUL_V4, MAC_INPT, bind_pts, hw->vlu[4]);
372    SW_SET_REG_BY_FIELD(RUL_TYPE, TYP, GARUDA_MAC_FILTER, hw->typ);
373
374    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_DA))
375    {
376        if (A_TRUE != _garuda_acl_zero_addr(sw->dest_mac_mask))
377        {
378            *b_care = A_TRUE;
379            *len = 6;
380        }
381
382        for (i = 0; i < 6; i++)
383        {
384            sw->dest_mac_val.uc[i] &= sw->dest_mac_mask.uc[i];
385        }
386
387        SW_SET_REG_BY_FIELD(MAC_RUL_V0, DAV_BYTE2, sw->dest_mac_val.uc[2],
388                            hw->vlu[0]);
389        SW_SET_REG_BY_FIELD(MAC_RUL_V0, DAV_BYTE3, sw->dest_mac_val.uc[3],
390                            hw->vlu[0]);
391        SW_SET_REG_BY_FIELD(MAC_RUL_V0, DAV_BYTE4, sw->dest_mac_val.uc[4],
392                            hw->vlu[0]);
393        SW_SET_REG_BY_FIELD(MAC_RUL_V0, DAV_BYTE5, sw->dest_mac_val.uc[5],
394                            hw->vlu[0]);
395        SW_SET_REG_BY_FIELD(MAC_RUL_V1, DAV_BYTE0, sw->dest_mac_val.uc[0],
396                            hw->vlu[1]);
397        SW_SET_REG_BY_FIELD(MAC_RUL_V1, DAV_BYTE1, sw->dest_mac_val.uc[1],
398                            hw->vlu[1]);
399
400        SW_SET_REG_BY_FIELD(MAC_RUL_M0, DAM_BYTE2, sw->dest_mac_mask.uc[2],
401                            hw->msk[0]);
402        SW_SET_REG_BY_FIELD(MAC_RUL_M0, DAM_BYTE3, sw->dest_mac_mask.uc[3],
403                            hw->msk[0]);
404        SW_SET_REG_BY_FIELD(MAC_RUL_M0, DAM_BYTE4, sw->dest_mac_mask.uc[4],
405                            hw->msk[0]);
406        SW_SET_REG_BY_FIELD(MAC_RUL_M0, DAM_BYTE5, sw->dest_mac_mask.uc[5],
407                            hw->msk[0]);
408        SW_SET_REG_BY_FIELD(MAC_RUL_M1, DAM_BYTE0, sw->dest_mac_mask.uc[0],
409                            hw->msk[1]);
410        SW_SET_REG_BY_FIELD(MAC_RUL_M1, DAM_BYTE1, sw->dest_mac_mask.uc[1],
411                            hw->msk[1]);
412    }
413
414    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_SA))
415    {
416        if (A_TRUE != _garuda_acl_zero_addr(sw->src_mac_mask))
417        {
418            *b_care = A_TRUE;
419            *len = 12;
420        }
421
422        for (i = 0; i < 6; i++)
423        {
424            sw->src_mac_val.uc[i] &= sw->src_mac_mask.uc[i];
425        }
426
427        SW_SET_REG_BY_FIELD(MAC_RUL_V1, SAV_BYTE4, sw->src_mac_val.uc[4],
428                            hw->vlu[1]);
429        SW_SET_REG_BY_FIELD(MAC_RUL_V1, SAV_BYTE5, sw->src_mac_val.uc[5],
430                            hw->vlu[1]);
431        SW_SET_REG_BY_FIELD(MAC_RUL_V2, SAV_BYTE0, sw->src_mac_val.uc[0],
432                            hw->vlu[2]);
433        SW_SET_REG_BY_FIELD(MAC_RUL_V2, SAV_BYTE1, sw->src_mac_val.uc[1],
434                            hw->vlu[2]);
435        SW_SET_REG_BY_FIELD(MAC_RUL_V2, SAV_BYTE2, sw->src_mac_val.uc[2],
436                            hw->vlu[2]);
437        SW_SET_REG_BY_FIELD(MAC_RUL_V2, SAV_BYTE3, sw->src_mac_val.uc[3],
438                            hw->vlu[2]);
439
440        SW_SET_REG_BY_FIELD(MAC_RUL_M1, SAM_BYTE4, sw->src_mac_mask.uc[4],
441                            hw->msk[1]);
442        SW_SET_REG_BY_FIELD(MAC_RUL_M1, SAM_BYTE5, sw->src_mac_mask.uc[5],
443                            hw->msk[1]);
444        SW_SET_REG_BY_FIELD(MAC_RUL_M2, SAM_BYTE0, sw->src_mac_mask.uc[0],
445                            hw->msk[2]);
446        SW_SET_REG_BY_FIELD(MAC_RUL_M2, SAM_BYTE1, sw->src_mac_mask.uc[1],
447                            hw->msk[2]);
448        SW_SET_REG_BY_FIELD(MAC_RUL_M2, SAM_BYTE2, sw->src_mac_mask.uc[2],
449                            hw->msk[2]);
450        SW_SET_REG_BY_FIELD(MAC_RUL_M2, SAM_BYTE3, sw->src_mac_mask.uc[3],
451                            hw->msk[2]);
452    }
453
454    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_ETHTYPE))
455    {
456        if (0x0 != sw->ethtype_mask)
457        {
458            *b_care = A_TRUE;
459            *len = 14;
460        }
461
462        sw->ethtype_val &= sw->ethtype_mask;
463        SW_SET_REG_BY_FIELD(MAC_RUL_V3, ETHTYPV, sw->ethtype_val, hw->vlu[3]);
464        SW_SET_REG_BY_FIELD(MAC_RUL_M3, ETHTYPM, sw->ethtype_mask, hw->msk[3]);
465    }
466
467    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_TAGGED))
468    {
469        if (0x0 != sw->tagged_mask)
470        {
471            *b_care = A_TRUE;
472        }
473
474        sw->tagged_val &= sw->tagged_mask;
475        SW_SET_REG_BY_FIELD(MAC_RUL_V4, TAGGEDV, sw->tagged_val, hw->vlu[4]);
476        SW_SET_REG_BY_FIELD(MAC_RUL_V4, TAGGEDM, sw->tagged_mask, hw->vlu[4]);
477    }
478
479    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_UP))
480    {
481        if (0x0 != sw->up_mask)
482        {
483            *b_care = A_TRUE;
484        }
485
486        sw->up_val &= sw->up_mask;
487        SW_SET_REG_BY_FIELD(MAC_RUL_V3, VLANPRIV, sw->up_val, hw->vlu[3]);
488        SW_SET_REG_BY_FIELD(MAC_RUL_M3, VLANPRIM, sw->up_mask, hw->msk[3]);
489    }
490
491    SW_SET_REG_BY_FIELD(MAC_RUL_M3, VIDMSK, 1, hw->msk[3]);
492    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_MAC_VID))
493    {
494        if ((FAL_ACL_FIELD_MASK != sw->vid_op)
495                && (FAL_ACL_FIELD_RANGE != sw->vid_op)
496                && (FAL_ACL_FIELD_LE != sw->vid_op)
497                && (FAL_ACL_FIELD_GE != sw->vid_op))
498        {
499            return SW_NOT_SUPPORTED;
500        }
501
502        if (A_TRUE ==
503                _garuda_acl_field_care(sw->vid_op, sw->vid_val, sw->vid_mask,
504                                       0xfff))
505        {
506            *b_care = A_TRUE;
507        }
508
509        SW_SET_REG_BY_FIELD(MAC_RUL_M3, VIDMSK, 0, hw->msk[3]);
510        if (FAL_ACL_FIELD_MASK == sw->vid_op)
511        {
512            sw->vid_val &= sw->vid_mask;
513            SW_SET_REG_BY_FIELD(MAC_RUL_V3, VLANIDV, sw->vid_val, hw->vlu[3]);
514            SW_SET_REG_BY_FIELD(MAC_RUL_M3, VLANIDM, sw->vid_mask, hw->msk[3]);
515            SW_SET_REG_BY_FIELD(MAC_RUL_M3, VIDMSK, 1, hw->msk[3]);
516        }
517        else if (FAL_ACL_FIELD_RANGE == sw->vid_op)
518        {
519            SW_SET_REG_BY_FIELD(MAC_RUL_V3, VLANIDV, sw->vid_val, hw->vlu[3]);
520            SW_SET_REG_BY_FIELD(MAC_RUL_M3, VLANIDM, sw->vid_mask, hw->msk[3]);
521        }
522        else if (FAL_ACL_FIELD_LE == sw->vid_op)
523        {
524            SW_SET_REG_BY_FIELD(MAC_RUL_V3, VLANIDV, 0, hw->vlu[3]);
525            SW_SET_REG_BY_FIELD(MAC_RUL_M3, VLANIDM, sw->vid_val, hw->msk[3]);
526        }
527        else
528        {
529            SW_SET_REG_BY_FIELD(MAC_RUL_V3, VLANIDV, sw->vid_val, hw->vlu[3]);
530            SW_SET_REG_BY_FIELD(MAC_RUL_M3, VLANIDM, 0xfff, hw->msk[3]);
531        }
532    }
533
534    return SW_OK;
535}
536
537static sw_error_t
538_garuda_acl_rule_ip4_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
539                           garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
540                           a_uint32_t * len)
541{
542    *b_care = A_FALSE;
543    *len = 0;
544
545    aos_mem_zero(&(hw->vlu[0]), sizeof (hw->vlu));
546    aos_mem_zero(&(hw->msk[0]), sizeof (hw->msk));
547    aos_mem_zero(&(hw->typ), sizeof (hw->typ));
548
549    SW_SET_REG_BY_FIELD(IP4_RUL_V4, IP4_INPT, bind_pts, hw->vlu[4]);
550    SW_SET_REG_BY_FIELD(RUL_TYPE, TYP, GARUDA_IP4_FILTER, hw->typ);
551
552    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP_DSCP))
553    {
554        if (0x0 != sw->ip_dscp_mask)
555        {
556            *b_care = A_TRUE;
557            *len = 16;
558        }
559
560        sw->ip_dscp_val &= sw->ip_dscp_mask;
561        SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4DSCPV, sw->ip_dscp_val, hw->vlu[2]);
562        SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4DSCPM, sw->ip_dscp_mask, hw->msk[2]);
563    }
564
565    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP_PROTO))
566    {
567        if (0x0 != sw->ip_proto_mask)
568        {
569            *b_care = A_TRUE;
570            *len = 24;
571        }
572
573        sw->ip_proto_val &= sw->ip_proto_mask;
574        SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4PROTV, sw->ip_proto_val, hw->vlu[2]);
575        SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4PROTM, sw->ip_proto_mask,
576                            hw->msk[2]);
577    }
578
579    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP4_SIP))
580    {
581        if (0x0 != sw->src_ip4_mask)
582        {
583            *b_care = A_TRUE;
584            *len = 30;
585        }
586        sw->src_ip4_val &= sw->src_ip4_mask;
587        hw->vlu[1] = sw->src_ip4_val;
588        hw->msk[1] = sw->src_ip4_mask;
589    }
590
591    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP4_DIP))
592    {
593        if (0x0 != sw->dest_ip4_mask)
594        {
595            *b_care = A_TRUE;
596            *len = 34;
597        }
598        sw->dest_ip4_val &= sw->dest_ip4_mask;
599        hw->vlu[0] = sw->dest_ip4_val;
600        hw->msk[0] = sw->dest_ip4_mask;
601    }
602
603    SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM_EN, 1, hw->msk[3]);
604    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_L4_SPORT))
605    {
606        if ((FAL_ACL_FIELD_MASK != sw->src_l4port_op)
607                && (FAL_ACL_FIELD_RANGE != sw->src_l4port_op)
608                && (FAL_ACL_FIELD_LE != sw->src_l4port_op)
609                && (FAL_ACL_FIELD_GE != sw->src_l4port_op))
610        {
611            return SW_NOT_SUPPORTED;
612        }
613
614        if (A_TRUE ==
615                _garuda_acl_field_care(sw->src_l4port_op, sw->src_l4port_val,
616                                       sw->src_l4port_mask, 0xffff))
617        {
618            *b_care = A_TRUE;
619            *len = 36;
620        }
621
622        SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM_EN, 0, hw->msk[3]);
623        if (FAL_ACL_FIELD_MASK == sw->src_l4port_op)
624        {
625            sw->src_l4port_val &= sw->src_l4port_mask;
626            SW_SET_REG_BY_FIELD(IP4_RUL_V3, IP4SPORTV, sw->src_l4port_val,
627                                hw->vlu[3]);
628            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM, sw->src_l4port_mask,
629                                hw->msk[3]);
630            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM_EN, 1, hw->msk[3]);
631        }
632        else if (FAL_ACL_FIELD_RANGE == sw->src_l4port_op)
633        {
634            SW_SET_REG_BY_FIELD(IP4_RUL_V3, IP4SPORTV, sw->src_l4port_val,
635                                hw->vlu[3]);
636            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM, sw->src_l4port_mask,
637                                hw->msk[3]);
638        }
639        else if (FAL_ACL_FIELD_LE == sw->src_l4port_op)
640        {
641            SW_SET_REG_BY_FIELD(IP4_RUL_V3, IP4SPORTV, 0, hw->vlu[3]);
642            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM, sw->src_l4port_val,
643                                hw->msk[3]);
644        }
645        else
646        {
647            SW_SET_REG_BY_FIELD(IP4_RUL_V3, IP4SPORTV, sw->src_l4port_val,
648                                hw->vlu[3]);
649            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4SPORTM, 0xffff, hw->msk[3]);
650        }
651    }
652
653    SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4DPORTM_EN, 1, hw->msk[3]);
654    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_L4_DPORT))
655    {
656        if ((FAL_ACL_FIELD_MASK != sw->dest_l4port_op)
657                && (FAL_ACL_FIELD_RANGE != sw->dest_l4port_op)
658                && (FAL_ACL_FIELD_LE != sw->dest_l4port_op)
659                && (FAL_ACL_FIELD_GE != sw->dest_l4port_op))
660        {
661            return SW_NOT_SUPPORTED;
662        }
663
664        if (A_TRUE ==
665                _garuda_acl_field_care(sw->dest_l4port_op, sw->dest_l4port_val,
666                                       sw->dest_l4port_mask, 0xffff))
667        {
668            *b_care = A_TRUE;
669            *len = 38;
670        }
671
672        SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4DPORTM_EN, 0, hw->msk[3]);
673        if (FAL_ACL_FIELD_MASK == sw->dest_l4port_op)
674        {
675            sw->dest_l4port_val &= sw->dest_l4port_mask;
676            SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4DPORTV, sw->dest_l4port_val,
677                                hw->vlu[2]);
678            SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4DPORTM, sw->dest_l4port_mask,
679                                hw->msk[2]);
680            SW_SET_REG_BY_FIELD(IP4_RUL_M3, IP4DPORTM_EN, 1, hw->msk[3]);
681        }
682        else if (FAL_ACL_FIELD_RANGE == sw->dest_l4port_op)
683        {
684            SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4DPORTV, sw->dest_l4port_val,
685                                hw->vlu[2]);
686            SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4DPORTM, sw->dest_l4port_mask,
687                                hw->msk[2]);
688        }
689        else if (FAL_ACL_FIELD_LE == sw->dest_l4port_op)
690        {
691            SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4DPORTV, 0, hw->vlu[2]);
692            SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4DPORTM, sw->dest_l4port_val,
693                                hw->msk[2]);
694        }
695        else
696        {
697            SW_SET_REG_BY_FIELD(IP4_RUL_V2, IP4DPORTV, sw->dest_l4port_val,
698                                hw->vlu[2]);
699            SW_SET_REG_BY_FIELD(IP4_RUL_M2, IP4DPORTM, 0xffff, hw->msk[2]);
700        }
701    }
702
703    return SW_OK;
704}
705
706static sw_error_t
707_garuda_acl_rule_ip6r1_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
708                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
709                             a_uint32_t * len)
710{
711    a_uint32_t i;
712
713    *b_care = A_FALSE;
714    *len = 0;
715
716    aos_mem_zero(&(hw->vlu[0]), sizeof (hw->vlu));
717    aos_mem_zero(&(hw->msk[0]), sizeof (hw->msk));
718    aos_mem_zero(&(hw->typ), sizeof (hw->typ));
719
720    SW_SET_REG_BY_FIELD(IP6_RUL1_V4, IP6_RUL1_INPT, bind_pts, hw->vlu[4]);
721    SW_SET_REG_BY_FIELD(RUL_TYPE, TYP, GARUDA_IP6R1_FILTER, hw->typ);
722
723    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP6_DIP))
724    {
725        for (i = 0; i < 4; i++)
726        {
727            if (0x0 != sw->dest_ip6_mask.ul[i])
728            {
729                *b_care = A_TRUE;
730                *len = 54;
731            }
732
733            sw->dest_ip6_val.ul[3 - i] &= sw->dest_ip6_mask.ul[3 - i];
734            hw->vlu[i] = sw->dest_ip6_val.ul[3 - i];
735            hw->msk[i] = sw->dest_ip6_mask.ul[3 - i];
736        }
737    }
738
739    return SW_OK;
740}
741
742static sw_error_t
743_garuda_acl_rule_ip6r2_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
744                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
745                             a_uint32_t * len)
746{
747    a_uint32_t i;
748
749    *b_care = A_FALSE;
750    *len = 0;
751
752    aos_mem_zero(&(hw->vlu[0]), sizeof (hw->vlu));
753    aos_mem_zero(&(hw->msk[0]), sizeof (hw->msk));
754    aos_mem_zero(&(hw->typ), sizeof (hw->typ));
755
756    SW_SET_REG_BY_FIELD(IP6_RUL2_V4, IP6_RUL2_INPT, bind_pts, hw->vlu[4]);
757    SW_SET_REG_BY_FIELD(RUL_TYPE, TYP, GARUDA_IP6R2_FILTER, hw->typ);
758
759    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP6_SIP))
760    {
761        for (i = 0; i < 4; i++)
762        {
763            if (0x0 != sw->src_ip6_mask.ul[i])
764            {
765                *b_care = A_TRUE;
766                *len = 38;
767            }
768
769            sw->src_ip6_val.ul[3 - i] &= sw->src_ip6_mask.ul[3 - i];
770            hw->vlu[i] = sw->src_ip6_val.ul[3 - i];
771            hw->msk[i] = sw->src_ip6_mask.ul[3 - i];
772        }
773    }
774
775    return SW_OK;
776}
777
778static sw_error_t
779_garuda_acl_rule_ip6r3_parse(fal_acl_rule_t * sw, fal_pbmp_t bind_pts,
780                             garuda_acl_hw_rule_t * hw, a_bool_t * b_care,
781                             a_uint32_t * len)
782{
783    *b_care = A_FALSE;
784    *len = 0;
785
786    aos_mem_zero(&(hw->vlu[0]), sizeof (hw->vlu));
787    aos_mem_zero(&(hw->msk[0]), sizeof (hw->msk));
788    aos_mem_zero(&(hw->typ), sizeof (hw->typ));
789
790    SW_SET_REG_BY_FIELD(IP6_RUL3_V4, IP6_RUL3_INPT, bind_pts, hw->vlu[4]);
791    SW_SET_REG_BY_FIELD(RUL_TYPE, TYP, GARUDA_IP6R3_FILTER, hw->typ);
792
793    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP_DSCP))
794    {
795        if (0x0 != sw->ip_dscp_mask)
796        {
797            *b_care = A_TRUE;
798            *len = 38;
799        }
800
801        sw->ip_dscp_val &= sw->ip_dscp_mask;
802        SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6DSCPV, sw->ip_dscp_val, hw->vlu[0]);
803        SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6DSCPM, sw->ip_dscp_mask,
804                            hw->msk[0]);
805    }
806
807    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP6_LABEL))
808    {
809        if (0x0 != sw->ip6_lable_mask)
810        {
811            *b_care = A_TRUE;
812            *len = 18;
813        }
814
815        sw->ip6_lable_val &= sw->ip6_lable_mask;
816        SW_SET_REG_BY_FIELD(IP6_RUL3_V1, IP6LABEL1V, sw->ip6_lable_val,
817                            hw->vlu[1]);
818        SW_SET_REG_BY_FIELD(IP6_RUL3_M1, IP6LABEL1M, sw->ip6_lable_mask,
819                            hw->msk[1]);
820
821        SW_SET_REG_BY_FIELD(IP6_RUL3_V2, IP6LABEL2V, (sw->ip6_lable_val >> 16),
822                            hw->vlu[2]);
823        SW_SET_REG_BY_FIELD(IP6_RUL3_M2, IP6LABEL2M, (sw->ip6_lable_mask >> 16),
824                            hw->msk[2]);
825    }
826
827    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_IP_PROTO))
828    {
829        if (0x0 != sw->ip_proto_mask)
830        {
831            *b_care = A_TRUE;
832            *len = 21;
833        }
834
835        sw->ip_proto_val &= sw->ip_proto_mask;
836        SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6PROTV, sw->ip_proto_val,
837                            hw->vlu[0]);
838        SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6PROTM, sw->ip_proto_mask,
839                            hw->msk[0]);
840    }
841
842    SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6SPORTM_EN, 1, hw->msk[3]);
843    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_L4_SPORT))
844    {
845        if ((FAL_ACL_FIELD_MASK != sw->src_l4port_op)
846                && (FAL_ACL_FIELD_RANGE != sw->src_l4port_op)
847                && (FAL_ACL_FIELD_LE != sw->src_l4port_op)
848                && (FAL_ACL_FIELD_GE != sw->src_l4port_op))
849        {
850            return SW_NOT_SUPPORTED;
851        }
852
853        if (A_TRUE ==
854                _garuda_acl_field_care(sw->src_l4port_op, sw->src_l4port_val,
855                                       sw->src_l4port_mask, 0xffff))
856        {
857            *b_care = A_TRUE;
858            *len = 56;
859        }
860
861        SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6SPORTM_EN, 0, hw->msk[3]);
862        if (FAL_ACL_FIELD_MASK == sw->src_l4port_op)
863        {
864            sw->src_l4port_val &= sw->src_l4port_mask;
865            SW_SET_REG_BY_FIELD(IP6_RUL3_V1, IP6SPORTV, sw->src_l4port_val,
866                                hw->vlu[1]);
867            SW_SET_REG_BY_FIELD(IP6_RUL3_M1, IP6SPORTM, sw->src_l4port_mask,
868                                hw->msk[1]);
869            SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6SPORTM_EN, 1, hw->msk[3]);
870        }
871        else if (FAL_ACL_FIELD_RANGE == sw->src_l4port_op)
872        {
873            SW_SET_REG_BY_FIELD(IP6_RUL3_V1, IP6SPORTV, sw->src_l4port_val,
874                                hw->vlu[1]);
875            SW_SET_REG_BY_FIELD(IP6_RUL3_M1, IP6SPORTM, sw->src_l4port_mask,
876                                hw->msk[1]);
877        }
878        else if (FAL_ACL_FIELD_LE == sw->src_l4port_op)
879        {
880            SW_SET_REG_BY_FIELD(IP6_RUL3_V1, IP6SPORTV, 0, hw->vlu[1]);
881            SW_SET_REG_BY_FIELD(IP6_RUL3_M1, IP6SPORTM, sw->src_l4port_val,
882                                hw->msk[1]);
883        }
884        else
885        {
886            SW_SET_REG_BY_FIELD(IP6_RUL3_V1, IP6SPORTV, sw->src_l4port_val,
887                                hw->vlu[1]);
888            SW_SET_REG_BY_FIELD(IP6_RUL3_M1, IP6SPORTM, 0xffff, hw->msk[1]);
889        }
890    }
891
892    SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6DPORTM_EN, 1, hw->msk[3]);
893    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_L4_DPORT))
894    {
895        if ((FAL_ACL_FIELD_MASK != sw->dest_l4port_op)
896                && (FAL_ACL_FIELD_RANGE != sw->dest_l4port_op)
897                && (FAL_ACL_FIELD_LE != sw->dest_l4port_op)
898                && (FAL_ACL_FIELD_GE != sw->dest_l4port_op))
899        {
900            return SW_NOT_SUPPORTED;
901        }
902
903        if (A_TRUE ==
904                _garuda_acl_field_care(sw->dest_l4port_op, sw->dest_l4port_val,
905                                       sw->dest_l4port_mask, 0xffff))
906        {
907            *b_care = A_TRUE;
908            *len = 58;
909        }
910
911        SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6DPORTM_EN, 0, hw->msk[3]);
912        if (FAL_ACL_FIELD_MASK == sw->dest_l4port_op)
913        {
914            sw->dest_l4port_val &= sw->dest_l4port_mask;
915            SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6DPORTV, sw->dest_l4port_val,
916                                hw->vlu[0]);
917            SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6DPORTM, sw->dest_l4port_mask,
918                                hw->msk[0]);
919            SW_SET_REG_BY_FIELD(IP6_RUL3_M3, IP6DPORTM_EN, 1, hw->msk[3]);
920        }
921        else if (FAL_ACL_FIELD_RANGE == sw->dest_l4port_op)
922        {
923            SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6DPORTV, sw->dest_l4port_val,
924                                hw->vlu[0]);
925            SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6DPORTM, sw->dest_l4port_mask,
926                                hw->msk[0]);
927        }
928        else if (FAL_ACL_FIELD_LE == sw->dest_l4port_op)
929        {
930            SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6DPORTV, 0, hw->vlu[0]);
931            SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6DPORTM, sw->dest_l4port_val,
932                                hw->msk[0]);
933        }
934        else
935        {
936            SW_SET_REG_BY_FIELD(IP6_RUL3_V0, IP6DPORTV, sw->dest_l4port_val,
937                                hw->vlu[0]);
938            SW_SET_REG_BY_FIELD(IP6_RUL3_M0, IP6DPORTM, 0xffff, hw->msk[0]);
939        }
940    }
941
942    return SW_OK;
943}
944
945static sw_error_t
946_garuda_acl_action_parse(a_uint32_t dev_id, const fal_acl_rule_t * sw,
947                         garuda_acl_hw_rule_t * hw)
948{
949    hw->act = 0;
950    if ((FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_MODIFY_VLAN))
951            && (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_NEST_VLAN)))
952    {
953        return SW_NOT_SUPPORTED;
954    }
955
956    /* FAL_ACL_ACTION_PERMIT need't process */
957
958    /* we should ignore any other action flags when DENY bit is settd. */
959    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_DENY))
960    {
961        SW_SET_REG_BY_FIELD(ACL_RSLT, DES_PORT_EN, 1, hw->act);
962        SW_SET_REG_BY_FIELD(ACL_RSLT, PORT_MEM, 0, hw->act);
963        return SW_OK;
964    }
965
966    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_RDTCPU))
967    {
968        SW_SET_REG_BY_FIELD(ACL_RSLT, RDTCPU, 1, hw->act);
969    }
970
971    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_CPYCPU))
972    {
973        SW_SET_REG_BY_FIELD(ACL_RSLT, CPYCPU, 1, hw->act);
974    }
975
976    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_MIRROR))
977    {
978        SW_SET_REG_BY_FIELD(ACL_RSLT, MIRR_EN, 1, hw->act);
979    }
980
981    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_REDPT))
982    {
983        SW_SET_REG_BY_FIELD(ACL_RSLT, DES_PORT_EN, 1, hw->act);
984        SW_SET_REG_BY_FIELD(ACL_RSLT, PORT_MEM, sw->ports, hw->act);
985    }
986
987    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_REMARK_UP))
988    {
989        SW_SET_REG_BY_FIELD(ACL_RSLT, REMARK_DOT1P, 1, hw->act);
990        SW_SET_REG_BY_FIELD(ACL_RSLT, DOT1P, sw->up, hw->act);
991    }
992
993    if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_REMARK_QUEUE))
994    {
995        SW_SET_REG_BY_FIELD(ACL_RSLT, REMARK_PRI_QU, 1, hw->act);
996        SW_SET_REG_BY_FIELD(ACL_RSLT, PRI_QU, sw->queue, hw->act);
997    }
998
999    if ((FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_MODIFY_VLAN))
1000            || (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_NEST_VLAN)))
1001    {
1002
1003        SW_SET_REG_BY_FIELD(ACL_RSLT, CHG_VID_EN, 1, hw->act);
1004        SW_SET_REG_BY_FIELD(ACL_RSLT, VID, sw->vid, hw->act);
1005        SW_SET_REG_BY_FIELD(ACL_RSLT, STAG_CHG_EN, 1, hw->act);
1006        if (FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_MODIFY_VLAN))
1007        {
1008            SW_SET_REG_BY_FIELD(ACL_RSLT, STAG_CHG_EN, 0, hw->act);
1009
1010            if (!FAL_ACTION_FLG_TST(sw->action_flg, FAL_ACL_ACTION_REDPT))
1011            {
1012                SW_SET_REG_BY_FIELD(ACL_RSLT, VID_MEM_EN, 1, hw->act);
1013                SW_SET_REG_BY_FIELD(ACL_RSLT, PORT_MEM, sw->ports, hw->act);
1014            }
1015        }
1016    }
1017
1018    return SW_OK;
1019}
1020
1021static sw_error_t
1022_garuda_acl_rule_mac_reparse(fal_acl_rule_t * sw,
1023                             const garuda_acl_hw_rule_t * hw)
1024{
1025    a_uint32_t mask_en;
1026
1027    /* destnation mac address */
1028    SW_GET_FIELD_BY_REG(MAC_RUL_V0, DAV_BYTE2, sw->dest_mac_val.uc[2],
1029                        hw->vlu[0]);
1030    SW_GET_FIELD_BY_REG(MAC_RUL_V0, DAV_BYTE3, sw->dest_mac_val.uc[3],
1031                        hw->vlu[0]);
1032    SW_GET_FIELD_BY_REG(MAC_RUL_V0, DAV_BYTE4, sw->dest_mac_val.uc[4],
1033                        hw->vlu[0]);
1034    SW_GET_FIELD_BY_REG(MAC_RUL_V0, DAV_BYTE5, sw->dest_mac_val.uc[5],
1035                        hw->vlu[0]);
1036    SW_GET_FIELD_BY_REG(MAC_RUL_V1, DAV_BYTE0, sw->dest_mac_val.uc[0],
1037                        hw->vlu[1]);
1038    SW_GET_FIELD_BY_REG(MAC_RUL_V1, DAV_BYTE1, sw->dest_mac_val.uc[1],
1039                        hw->vlu[1]);
1040
1041    SW_GET_FIELD_BY_REG(MAC_RUL_M0, DAM_BYTE2, sw->dest_mac_mask.uc[2],
1042                        hw->msk[0]);
1043    SW_GET_FIELD_BY_REG(MAC_RUL_M0, DAM_BYTE3, sw->dest_mac_mask.uc[3],
1044                        hw->msk[0]);
1045    SW_GET_FIELD_BY_REG(MAC_RUL_M0, DAM_BYTE4, sw->dest_mac_mask.uc[4],
1046                        hw->msk[0]);
1047    SW_GET_FIELD_BY_REG(MAC_RUL_M0, DAM_BYTE5, sw->dest_mac_mask.uc[5],
1048                        hw->msk[0]);
1049    SW_GET_FIELD_BY_REG(MAC_RUL_M1, DAM_BYTE0, sw->dest_mac_mask.uc[0],
1050                        hw->msk[1]);
1051    SW_GET_FIELD_BY_REG(MAC_RUL_M1, DAM_BYTE1, sw->dest_mac_mask.uc[1],
1052                        hw->msk[1]);
1053    if (A_FALSE == _garuda_acl_zero_addr(sw->dest_mac_mask))
1054    {
1055        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_DA);
1056    }
1057
1058    /* source mac address */
1059    SW_GET_FIELD_BY_REG(MAC_RUL_V2, SAV_BYTE0, sw->src_mac_val.uc[0],
1060                        hw->vlu[2]);
1061    SW_GET_FIELD_BY_REG(MAC_RUL_V2, SAV_BYTE1, sw->src_mac_val.uc[1],
1062                        hw->vlu[2]);
1063    SW_GET_FIELD_BY_REG(MAC_RUL_V2, SAV_BYTE2, sw->src_mac_val.uc[2],
1064                        hw->vlu[2]);
1065    SW_GET_FIELD_BY_REG(MAC_RUL_V2, SAV_BYTE3, sw->src_mac_val.uc[3],
1066                        hw->vlu[2]);
1067    SW_GET_FIELD_BY_REG(MAC_RUL_V1, SAV_BYTE4, sw->src_mac_val.uc[4],
1068                        hw->vlu[1]);
1069    SW_GET_FIELD_BY_REG(MAC_RUL_V1, SAV_BYTE5, sw->src_mac_val.uc[5],
1070                        hw->vlu[1]);
1071
1072    SW_GET_FIELD_BY_REG(MAC_RUL_M2, SAM_BYTE0, sw->src_mac_mask.uc[0],
1073                        hw->msk[2]);
1074    SW_GET_FIELD_BY_REG(MAC_RUL_M2, SAM_BYTE1, sw->src_mac_mask.uc[1],
1075                        hw->msk[2]);
1076    SW_GET_FIELD_BY_REG(MAC_RUL_M2, SAM_BYTE2, sw->src_mac_mask.uc[2],
1077                        hw->msk[2]);
1078    SW_GET_FIELD_BY_REG(MAC_RUL_M2, SAM_BYTE3, sw->src_mac_mask.uc[3],
1079                        hw->msk[2]);
1080    SW_GET_FIELD_BY_REG(MAC_RUL_M1, SAM_BYTE4, sw->src_mac_mask.uc[4],
1081                        hw->msk[1]);
1082    SW_GET_FIELD_BY_REG(MAC_RUL_M1, SAM_BYTE5, sw->src_mac_mask.uc[5],
1083                        hw->msk[1]);
1084    if (A_FALSE == _garuda_acl_zero_addr(sw->src_mac_mask))
1085    {
1086        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_SA);
1087    }
1088
1089    /* ethernet type */
1090    SW_GET_FIELD_BY_REG(MAC_RUL_V3, ETHTYPV, sw->ethtype_val, hw->vlu[3]);
1091    SW_GET_FIELD_BY_REG(MAC_RUL_M3, ETHTYPM, sw->ethtype_mask, hw->msk[3]);
1092    if (0x0 != sw->ethtype_mask)
1093    {
1094        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_ETHTYPE);
1095    }
1096
1097    /* packet tagged */
1098    SW_GET_FIELD_BY_REG(MAC_RUL_V4, TAGGEDV, sw->tagged_val, hw->vlu[4]);
1099    SW_GET_FIELD_BY_REG(MAC_RUL_V4, TAGGEDM, sw->tagged_mask, hw->vlu[4]);
1100    if (0x0 != sw->tagged_mask)
1101    {
1102        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_TAGGED);
1103    }
1104
1105    /* vlan priority */
1106    SW_GET_FIELD_BY_REG(MAC_RUL_V3, VLANPRIV, sw->up_val, hw->vlu[3]);
1107    SW_GET_FIELD_BY_REG(MAC_RUL_M3, VLANPRIM, sw->up_mask, hw->msk[3]);
1108    if (0x0 != sw->up_mask)
1109    {
1110        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_UP);
1111    }
1112
1113    /* vlanid */
1114    SW_GET_FIELD_BY_REG(MAC_RUL_V3, VLANIDV, sw->vid_val, hw->vlu[3]);
1115    SW_GET_FIELD_BY_REG(MAC_RUL_M3, VLANIDM, sw->vid_mask, hw->msk[3]);
1116    SW_GET_FIELD_BY_REG(MAC_RUL_M3, VIDMSK, mask_en, hw->msk[3]);
1117    if (mask_en)
1118    {
1119        sw->vid_op = FAL_ACL_FIELD_MASK;
1120    }
1121    else
1122    {
1123        sw->vid_op = FAL_ACL_FIELD_RANGE;
1124    }
1125
1126    if (A_TRUE ==
1127            _garuda_acl_field_care(sw->vid_op, (a_uint32_t) sw->vid_val,
1128                                   (a_uint32_t) sw->vid_mask, 0xfff))
1129    {
1130        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_MAC_VID);
1131    }
1132
1133    return SW_OK;
1134}
1135
1136static sw_error_t
1137_garuda_acl_rule_ip4_reparse(fal_acl_rule_t * sw,
1138                             const garuda_acl_hw_rule_t * hw)
1139{
1140    a_uint32_t mask_en;
1141
1142    sw->dest_ip4_val = hw->vlu[0];
1143    sw->dest_ip4_mask = hw->msk[0];
1144    if (0x0 != sw->dest_ip4_mask)
1145    {
1146        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP4_DIP);
1147    }
1148
1149    sw->src_ip4_val = hw->vlu[1];
1150    sw->src_ip4_mask = hw->msk[1];
1151    if (0x0 != sw->src_ip4_mask)
1152    {
1153        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP4_SIP);
1154    }
1155
1156    SW_GET_FIELD_BY_REG(IP4_RUL_V2, IP4PROTV, sw->ip_proto_val, hw->vlu[2]);
1157    SW_GET_FIELD_BY_REG(IP4_RUL_M2, IP4PROTM, sw->ip_proto_mask, hw->msk[2]);
1158    if (0x0 != sw->ip_proto_mask)
1159    {
1160        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP_PROTO);
1161    }
1162
1163    SW_GET_FIELD_BY_REG(IP4_RUL_V2, IP4DSCPV, sw->ip_dscp_val, hw->vlu[2]);
1164    SW_GET_FIELD_BY_REG(IP4_RUL_M2, IP4DSCPM, sw->ip_dscp_mask, hw->msk[2]);
1165    if (0x0 != sw->ip_dscp_mask)
1166    {
1167        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP_DSCP);
1168    }
1169
1170    SW_GET_FIELD_BY_REG(IP4_RUL_V2, IP4DPORTV, sw->dest_l4port_val, hw->vlu[2]);
1171    SW_GET_FIELD_BY_REG(IP4_RUL_M2, IP4DPORTM, sw->dest_l4port_mask,
1172                        hw->msk[2]);
1173    SW_GET_FIELD_BY_REG(IP4_RUL_M3, IP4DPORTM_EN, mask_en, hw->msk[3]);
1174    if (mask_en)
1175    {
1176        sw->dest_l4port_op = FAL_ACL_FIELD_MASK;
1177    }
1178    else
1179    {
1180        sw->dest_l4port_op = FAL_ACL_FIELD_RANGE;
1181    }
1182
1183    if (A_TRUE ==
1184            _garuda_acl_field_care(sw->dest_l4port_op,
1185                                   (a_uint32_t) sw->dest_l4port_val,
1186                                   (a_uint32_t) sw->dest_l4port_mask, 0xffff))
1187    {
1188        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_L4_DPORT);
1189    }
1190
1191    SW_GET_FIELD_BY_REG(IP4_RUL_V3, IP4SPORTV, sw->src_l4port_val, hw->vlu[3]);
1192    SW_GET_FIELD_BY_REG(IP4_RUL_M3, IP4SPORTM, sw->src_l4port_mask, hw->msk[3]);
1193    SW_GET_FIELD_BY_REG(IP4_RUL_M3, IP4SPORTM_EN, mask_en, hw->msk[3]);
1194    if (mask_en)
1195    {
1196        sw->src_l4port_op = FAL_ACL_FIELD_MASK;
1197    }
1198    else
1199    {
1200        sw->src_l4port_op = FAL_ACL_FIELD_RANGE;
1201    }
1202
1203    if (A_TRUE ==
1204            _garuda_acl_field_care(sw->src_l4port_op,
1205                                   (a_uint32_t) sw->src_l4port_val,
1206                                   (a_uint32_t) sw->src_l4port_mask, 0xffff))
1207    {
1208        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_L4_SPORT);
1209    }
1210
1211    return SW_OK;
1212}
1213
1214static sw_error_t
1215_garuda_acl_rule_ip6r1_reparse(fal_acl_rule_t * sw,
1216                               const garuda_acl_hw_rule_t * hw)
1217{
1218    a_uint32_t i;
1219
1220    for (i = 0; i < 4; i++)
1221    {
1222        sw->dest_ip6_val.ul[i] = hw->vlu[3 - i];
1223        sw->dest_ip6_mask.ul[i] = hw->msk[3 - i];
1224        if (0x0 != sw->dest_ip6_mask.ul[i])
1225        {
1226            FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP6_DIP);
1227        }
1228    }
1229    return SW_OK;
1230}
1231
1232static sw_error_t
1233_garuda_acl_rule_ip6r2_reparse(fal_acl_rule_t * sw,
1234                               const garuda_acl_hw_rule_t * hw)
1235{
1236    a_uint32_t i;
1237
1238    for (i = 0; i < 4; i++)
1239    {
1240        sw->src_ip6_val.ul[i] = hw->vlu[3 - i];
1241        sw->src_ip6_mask.ul[i] = hw->msk[3 - i];
1242        if (0x0 != sw->src_ip6_mask.ul[i])
1243        {
1244            FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP6_SIP);
1245        }
1246    }
1247    return SW_OK;
1248}
1249
1250static sw_error_t
1251_garuda_acl_rule_ip6r3_reparse(fal_acl_rule_t * sw,
1252                               const garuda_acl_hw_rule_t * hw)
1253{
1254    a_uint32_t mask_en;
1255    a_uint32_t tmp;
1256
1257    SW_GET_FIELD_BY_REG(IP6_RUL3_V0, IP6PROTV, sw->ip_proto_val, hw->vlu[0]);
1258    SW_GET_FIELD_BY_REG(IP6_RUL3_M0, IP6PROTM, sw->ip_proto_mask, hw->msk[0]);
1259    if (0x0 != sw->ip_proto_mask)
1260    {
1261        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP_PROTO);
1262    }
1263
1264    SW_GET_FIELD_BY_REG(IP6_RUL3_V0, IP6DSCPV, sw->ip_dscp_val, hw->vlu[0]);
1265    SW_GET_FIELD_BY_REG(IP6_RUL3_M0, IP6DSCPM, sw->ip_dscp_mask, hw->msk[0]);
1266    if (0x0 != sw->ip_dscp_mask)
1267    {
1268        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP_DSCP);
1269    }
1270
1271    SW_GET_FIELD_BY_REG(IP6_RUL3_V0, IP6DPORTV, sw->dest_l4port_val,
1272                        hw->vlu[0]);
1273    SW_GET_FIELD_BY_REG(IP6_RUL3_M0, IP6DPORTM, sw->dest_l4port_mask,
1274                        hw->msk[0]);
1275    SW_GET_FIELD_BY_REG(IP6_RUL3_M3, IP6DPORTM_EN, mask_en, hw->msk[3]);
1276    if (mask_en)
1277    {
1278        sw->dest_l4port_op = FAL_ACL_FIELD_MASK;
1279    }
1280    else
1281    {
1282        sw->dest_l4port_op = FAL_ACL_FIELD_RANGE;
1283    }
1284
1285    if (A_TRUE ==
1286            _garuda_acl_field_care(sw->dest_l4port_op,
1287                                   (a_uint32_t) sw->dest_l4port_val,
1288                                   (a_uint32_t) sw->dest_l4port_mask, 0xffff))
1289    {
1290        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_L4_DPORT);
1291    }
1292
1293    SW_GET_FIELD_BY_REG(IP6_RUL3_V1, IP6SPORTV, sw->src_l4port_val, hw->vlu[1]);
1294    SW_GET_FIELD_BY_REG(IP6_RUL3_M1, IP6SPORTM, sw->src_l4port_mask,
1295                        hw->msk[1]);
1296    SW_GET_FIELD_BY_REG(IP6_RUL3_M3, IP6SPORTM_EN, mask_en, hw->msk[3]);
1297    if (mask_en)
1298    {
1299        sw->src_l4port_op = FAL_ACL_FIELD_MASK;
1300    }
1301    else
1302    {
1303        sw->src_l4port_op = FAL_ACL_FIELD_RANGE;
1304    }
1305
1306    if (A_TRUE ==
1307            _garuda_acl_field_care(sw->src_l4port_op,
1308                                   (a_uint32_t) sw->src_l4port_val,
1309                                   (a_uint32_t) sw->src_l4port_mask, 0xffff))
1310    {
1311        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_L4_SPORT);
1312    }
1313
1314    SW_GET_FIELD_BY_REG(IP6_RUL3_V1, IP6LABEL1V, sw->ip6_lable_val, hw->vlu[1]);
1315    SW_GET_FIELD_BY_REG(IP6_RUL3_M1, IP6LABEL1M, sw->ip6_lable_mask,
1316                        hw->msk[1]);
1317
1318    SW_GET_FIELD_BY_REG(IP6_RUL3_V2, IP6LABEL2V, tmp, hw->vlu[2]);
1319    sw->ip6_lable_val |= (tmp << 16);
1320    SW_GET_FIELD_BY_REG(IP6_RUL3_M2, IP6LABEL2M, tmp, hw->msk[2]);
1321    sw->ip6_lable_mask |= (tmp << 16);
1322
1323    if (0x0 != sw->ip6_lable_mask)
1324    {
1325        FAL_FIELD_FLG_SET(sw->field_flg, FAL_ACL_FIELD_IP6_LABEL);
1326    }
1327
1328    return SW_OK;
1329}
1330
1331static sw_error_t
1332_garuda_acl_rule_action_reparse(fal_acl_rule_t * sw,
1333                                const garuda_acl_hw_rule_t * hw)
1334{
1335    a_uint32_t data;
1336
1337    sw->action_flg = 0;
1338    SW_GET_FIELD_BY_REG(ACL_RSLT, DES_PORT_EN, data, (hw->act));
1339    if (1 == data)
1340    {
1341        SW_GET_FIELD_BY_REG(ACL_RSLT, PORT_MEM, data, (hw->act));
1342        sw->ports = data;
1343
1344        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_REDPT);
1345    }
1346
1347    SW_GET_FIELD_BY_REG(ACL_RSLT, RDTCPU, data, (hw->act));
1348    if (1 == data)
1349    {
1350        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_RDTCPU);
1351    }
1352
1353    SW_GET_FIELD_BY_REG(ACL_RSLT, CPYCPU, data, (hw->act));
1354    if (1 == data)
1355    {
1356        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_CPYCPU);
1357    }
1358
1359    SW_GET_FIELD_BY_REG(ACL_RSLT, MIRR_EN, data, (hw->act));
1360    if (1 == data)
1361    {
1362        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_MIRROR);
1363    }
1364
1365    SW_GET_FIELD_BY_REG(ACL_RSLT, REMARK_DOT1P, data, (hw->act));
1366    if (1 == data)
1367    {
1368        SW_GET_FIELD_BY_REG(ACL_RSLT, DOT1P, data, (hw->act));
1369        sw->up = data & 0x7;
1370
1371        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_REMARK_UP);
1372    }
1373
1374    SW_GET_FIELD_BY_REG(ACL_RSLT, REMARK_PRI_QU, data, (hw->act));
1375    if (1 == data)
1376    {
1377        SW_GET_FIELD_BY_REG(ACL_RSLT, PRI_QU, data, (hw->act));
1378        sw->queue = data & 0x3;
1379
1380        FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_REMARK_QUEUE);
1381    }
1382
1383    SW_GET_FIELD_BY_REG(ACL_RSLT, CHG_VID_EN, data, (hw->act));
1384    if (1 == data)
1385    {
1386        SW_GET_FIELD_BY_REG(ACL_RSLT, STAG_CHG_EN, data, (hw->act));
1387        if (1 == data)
1388        {
1389            FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_NEST_VLAN);
1390        }
1391        else
1392        {
1393            FAL_ACTION_FLG_SET(sw->action_flg, FAL_ACL_ACTION_MODIFY_VLAN);
1394            SW_GET_FIELD_BY_REG(ACL_RSLT, PORT_MEM, data, (hw->act));
1395            sw->ports = data;
1396        }
1397    }
1398
1399    SW_GET_FIELD_BY_REG(ACL_RSLT, VID, data, (hw->act));
1400    sw->vid = data & 0xfff;
1401
1402    return SW_OK;
1403}
1404
1405static sw_error_t
1406_garuda_acl_filter_alloc(a_uint32_t dev_id, a_uint32_t * idx)
1407{
1408    a_uint32_t i;
1409
1410    for (i = 0; i < GARUDA_MAX_RULE; i++)
1411    {
1412        if (0 == (filter_snap[dev_id] & (0x1UL << i)))
1413        {
1414            filter_snap[dev_id] |= (0x1UL << i);
1415            *idx = i;
1416            return SW_OK;
1417        }
1418    }
1419    return SW_NO_RESOURCE;
1420}
1421
1422static void
1423_garuda_acl_filter_free(a_uint32_t dev_id, a_uint32_t idx)
1424{
1425    filter_snap[dev_id] &= (~(0x1UL << idx));
1426}
1427
1428static void
1429_garuda_acl_filter_snap(a_uint32_t dev_id)
1430{
1431    filter_snap[dev_id] = filter[dev_id];
1432    return;
1433}
1434
1435static void
1436_garuda_acl_filter_commit(a_uint32_t dev_id)
1437{
1438    filter[dev_id] = filter_snap[dev_id];
1439    return;
1440}
1441
1442static sw_error_t
1443_garuda_acl_slct_update(garuda_acl_hw_rule_t * hw, a_uint32_t offset,
1444                        a_uint32_t flt_idx)
1445{
1446    switch (offset)
1447    {
1448        case 0:
1449            SW_SET_REG_BY_FIELD(RUL_SLCT0, ADDR0_EN, 1, hw->slct[0]);
1450            SW_SET_REG_BY_FIELD(RUL_SLCT1, ADDR0, flt_idx, hw->slct[1]);
1451            break;
1452
1453        case 1:
1454            SW_SET_REG_BY_FIELD(RUL_SLCT0, ADDR1_EN, 1, hw->slct[0]);
1455            SW_SET_REG_BY_FIELD(RUL_SLCT2, ADDR1, flt_idx, hw->slct[2]);
1456            break;
1457
1458        case 2:
1459            SW_SET_REG_BY_FIELD(RUL_SLCT0, ADDR2_EN, 1, hw->slct[0]);
1460            SW_SET_REG_BY_FIELD(RUL_SLCT3, ADDR2, flt_idx, hw->slct[3]);
1461            break;
1462
1463        case 3:
1464            SW_SET_REG_BY_FIELD(RUL_SLCT0, ADDR3_EN, 1, hw->slct[0]);
1465            SW_SET_REG_BY_FIELD(RUL_SLCT4, ADDR3, flt_idx, hw->slct[4]);
1466            break;
1467
1468        default:
1469            return SW_FAIL;
1470    }
1471    return SW_OK;
1472}
1473
1474static sw_error_t
1475_garuda_acl_filter_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
1476                         a_uint32_t flt_idx)
1477{
1478#ifdef GARUDA_SW_ENTRY
1479    char *memaddr;
1480    a_uint32_t i;
1481
1482    memaddr = flt_vlu_mem + (flt_idx << 5);
1483    aos_mem_copy(memaddr, (char *) &(rule->vlu[0]), 20);
1484
1485    memaddr = flt_msk_mem + (flt_idx << 5);
1486    aos_mem_copy(memaddr, (char *) &(rule->msk[0]), 20);
1487
1488    memaddr = flt_typ_mem + (flt_idx << 5);
1489    aos_mem_copy(memaddr, (char *) &(rule->typ), 4);
1490
1491#else
1492    sw_error_t rv;
1493    a_uint32_t i, base, addr;
1494
1495    /* set filter value */
1496    base = GARUDA_RULE_VLU_ADDR + (flt_idx << 5);
1497    for (i = 0; i < 5; i++)
1498    {
1499        addr = base + (i << 2);
1500        HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
1501                              (a_uint8_t *) (&(rule->vlu[i])),
1502                              sizeof (a_uint32_t));
1503        SW_RTN_ON_ERROR(rv);
1504    }
1505
1506    /* set filter mask */
1507    base = GARUDA_RULE_MSK_ADDR + (flt_idx << 5);
1508    for (i = 0; i < 5; i++)
1509    {
1510        addr = base + (i << 2);
1511        HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
1512                              (a_uint8_t *) (&(rule->msk[i])),
1513                              sizeof (a_uint32_t));
1514        SW_RTN_ON_ERROR(rv);
1515    }
1516
1517    /* set filter type */
1518    addr = GARUDA_RULE_TYP_ADDR + (flt_idx << 5);
1519    HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
1520                          (a_uint8_t *) (&(rule->typ)), sizeof (a_uint32_t));
1521    SW_RTN_ON_ERROR(rv);
1522#endif
1523
1524#ifdef GARUDA_ENTRY_DUMP
1525    aos_printk("\n_garuda_acl_filter_write flt_idx = %d\n", flt_idx);
1526    for (i = 0; i < 5; i++)
1527    {
1528        aos_printk("%08x  ", rule->vlu[i]);
1529    }
1530    aos_printk("\n");
1531    for (i = 0; i < 5; i++)
1532    {
1533        aos_printk("%08x  ", rule->msk[i]);
1534    }
1535#endif
1536
1537    return SW_OK;
1538}
1539
1540static sw_error_t
1541_garuda_acl_action_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
1542                         a_uint32_t act_idx)
1543{
1544#ifdef GARUDA_SW_ENTRY
1545    char *memaddr;
1546
1547    memaddr = act_mem + (act_idx << 5);
1548    aos_mem_copy(memaddr, (char *) &(rule->act), 4);
1549
1550#else
1551    sw_error_t rv;
1552    a_uint32_t addr;
1553
1554    /* set rule action */
1555    addr = GARUDA_RULE_ACT_ADDR + (act_idx << 5);
1556    HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
1557                          (a_uint8_t *) (&(rule->act)), sizeof (a_uint32_t));
1558    SW_RTN_ON_ERROR(rv);
1559#endif
1560
1561#ifdef GARUDA_ENTRY_DUMP
1562    aos_printk("\n_garuda_acl_action_write act_idx = %d    ", act_idx);
1563    aos_printk("%08x  ", rule->act);
1564#endif
1565
1566    return SW_OK;
1567}
1568
1569static sw_error_t
1570_garuda_acl_slct_write(a_uint32_t dev_id, const garuda_acl_hw_rule_t * rule,
1571                       a_uint32_t slct_idx)
1572{
1573#ifdef GARUDA_SW_ENTRY
1574    char *memaddr;
1575    a_uint32_t i;
1576
1577    memaddr = slct_mem + (slct_idx << 5);
1578    aos_mem_copy(memaddr, (char *) &(rule->slct[0]), 32);
1579
1580#else
1581    sw_error_t rv;
1582    a_uint32_t base, addr;
1583    a_uint32_t i;
1584
1585    base = GARUDA_RULE_SLCT_ADDR + (slct_idx << 5);
1586
1587    /* set filter length */
1588    HSL_REG_ENTRY_GEN_SET(rv, dev_id, (base + 24), sizeof (a_uint32_t),
1589                          (a_uint8_t *) (&(rule->slct[6])),
1590                          sizeof (a_uint32_t));
1591    SW_RTN_ON_ERROR(rv);
1592
1593    /* set filter address */
1594    for (i = 1; i < 5; i++)
1595    {
1596        addr = base + (i << 2);
1597        HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
1598                              (a_uint8_t *) (&(rule->slct[i])),
1599                              sizeof (a_uint32_t));
1600        SW_RTN_ON_ERROR(rv);
1601    }
1602
1603    /* set filter enable */
1604    HSL_REG_ENTRY_GEN_SET(rv, dev_id, base, sizeof (a_uint32_t),
1605                          (a_uint8_t *) (&(rule->slct[0])),
1606                          sizeof (a_uint32_t));
1607    SW_RTN_ON_ERROR(rv);
1608#endif
1609
1610#ifdef GARUDA_ENTRY_DUMP
1611    aos_printk("\n_garuda_acl_slct_write slct_idx = %d\n", slct_idx);
1612    for (i = 0; i < 8; i++)
1613    {
1614        aos_printk("%08x  ", rule->slct[i]);
1615    }
1616#endif
1617
1618    return SW_OK;
1619}
1620
1621static sw_error_t
1622_garuda_acl_filter_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
1623                        a_uint32_t flt_idx)
1624{
1625#ifdef GARUDA_SW_ENTRY
1626    char *memaddr;
1627    a_uint32_t i;
1628
1629    memaddr = flt_vlu_mem + (flt_idx << 5);
1630    aos_mem_copy((char *) &(rule->vlu[0]), memaddr, 20);
1631
1632    memaddr = flt_msk_mem + (flt_idx << 5);
1633    aos_mem_copy((char *) &(rule->msk[0]), memaddr, 20);
1634
1635    memaddr = flt_typ_mem + (flt_idx << 5);
1636    aos_mem_copy((char *) &(rule->typ), memaddr, 4);
1637
1638#else
1639    sw_error_t rv;
1640    a_uint32_t i, base, addr;
1641
1642    /* get filter value */
1643    base = GARUDA_RULE_VLU_ADDR + (flt_idx << 5);
1644    for (i = 0; i < 5; i++)
1645    {
1646        addr = base + (i << 2);
1647        HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
1648                              (a_uint8_t *) (&(rule->vlu[i])),
1649                              sizeof (a_uint32_t));
1650        SW_RTN_ON_ERROR(rv);
1651    }
1652
1653    /* get filter mask */
1654    base = GARUDA_RULE_MSK_ADDR + (flt_idx << 5);
1655    for (i = 0; i < 5; i++)
1656    {
1657        addr = base + (i << 2);
1658        HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
1659                              (a_uint8_t *) (&(rule->msk[i])),
1660                              sizeof (a_uint32_t));
1661        SW_RTN_ON_ERROR(rv);
1662    }
1663
1664    /* get filter type */
1665    addr = GARUDA_RULE_TYP_ADDR + (flt_idx << 5);
1666    HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
1667                          (a_uint8_t *) (&(rule->typ)), sizeof (a_uint32_t));
1668    SW_RTN_ON_ERROR(rv);
1669#endif
1670
1671#ifdef GARUDA_ENTRY_DUMP
1672    aos_printk("\n_garuda_acl_filter_read flt_idx = %d\n", flt_idx);
1673    for (i = 0; i < 5; i++)
1674    {
1675        aos_printk("%08x  ", rule->vlu[i]);
1676    }
1677    aos_printk("\n");
1678    for (i = 0; i < 5; i++)
1679    {
1680        aos_printk("%08x  ", rule->msk[i]);
1681    }
1682#endif
1683
1684    return SW_OK;
1685}
1686
1687static sw_error_t
1688_garuda_acl_action_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
1689                        a_uint32_t act_idx)
1690{
1691#ifdef GARUDA_SW_ENTRY
1692    char *memaddr;
1693
1694    memaddr = act_mem + (act_idx << 5);
1695    aos_mem_copy((char *) &(rule->act), memaddr, 4);
1696
1697#else
1698    sw_error_t rv;
1699    a_uint32_t addr;
1700
1701    /* get rule action */
1702    addr = GARUDA_RULE_ACT_ADDR + (act_idx << 5);
1703    HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
1704                          (a_uint8_t *) (&(rule->act)), sizeof (a_uint32_t));
1705    SW_RTN_ON_ERROR(rv);
1706#endif
1707
1708#ifdef GARUDA_ENTRY_DUMP
1709    aos_printk("\n_garuda_acl_action_read act_idx = %d    ", act_idx);
1710    aos_printk("%08x  ", rule->act);
1711#endif
1712
1713    return SW_OK;
1714}
1715
1716static sw_error_t
1717_garuda_acl_slct_read(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
1718                      a_uint32_t slct_idx)
1719{
1720#ifdef GARUDA_SW_ENTRY
1721    char *memaddr;
1722    a_uint32_t i;
1723
1724    memaddr = slct_mem + (slct_idx << 5);
1725    aos_mem_copy((char *) &(rule->slct[0]), memaddr, 32);
1726
1727#else
1728    sw_error_t rv;
1729    a_uint32_t i, base, addr;
1730
1731    base = GARUDA_RULE_SLCT_ADDR + (slct_idx << 5);
1732
1733    /* get filter type */
1734    HSL_REG_ENTRY_GEN_GET(rv, dev_id, (base + 28), sizeof (a_uint32_t),
1735                          (a_uint8_t *) (&(rule->slct[7])),
1736                          sizeof (a_uint32_t));
1737    SW_RTN_ON_ERROR(rv);
1738
1739    /* get filter length */
1740    HSL_REG_ENTRY_GEN_GET(rv, dev_id, (base + 24), sizeof (a_uint32_t),
1741                          (a_uint8_t *) (&(rule->slct[6])),
1742                          sizeof (a_uint32_t));
1743    SW_RTN_ON_ERROR(rv);
1744
1745    /* get filter address and enable */
1746    for (i = 0; i < 5; i++)
1747    {
1748        addr = base + (i << 2);
1749        HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
1750                              (a_uint8_t *) (&(rule->slct[i])),
1751                              sizeof (a_uint32_t));
1752        SW_RTN_ON_ERROR(rv);
1753    }
1754#endif
1755
1756#ifdef GARUDA_ENTRY_DUMP
1757    aos_printk("\n_garuda_acl_slct_read slct_idx = %d\n", slct_idx);
1758    for (i = 0; i < 8; i++)
1759    {
1760        aos_printk("%08x  ", rule->slct[i]);
1761    }
1762#endif
1763
1764    return SW_OK;
1765}
1766
1767static sw_error_t
1768_garuda_acl_rule_set(a_uint32_t dev_id, a_uint32_t base_addr,
1769                     const garuda_acl_hw_rule_t * rule, a_uint32_t rule_nr)
1770{
1771    sw_error_t rv;
1772    a_uint32_t ent_idx, tmp_ent_idx;
1773    a_uint32_t i, flt_nr, flt_idx[4];
1774    a_uint32_t act_idx, slct_idx;
1775
1776    act_idx = base_addr;
1777    slct_idx = base_addr;
1778    ent_idx = 0;
1779    for (i = 0; i < rule_nr; i++)
1780    {
1781        tmp_ent_idx = ent_idx;
1782
1783        rv = _garuda_acl_filter_map_get(&rule[ent_idx], flt_idx, &flt_nr);
1784        SW_RTN_ON_ERROR(rv);
1785
1786        if (!flt_nr)
1787        {
1788            return SW_FAIL;
1789        }
1790
1791        for (i = 0; i < flt_nr; i++)
1792        {
1793            rv = _garuda_acl_filter_write(dev_id, &(rule[ent_idx]), flt_idx[i]);
1794            ent_idx++;
1795        }
1796
1797        rv = _garuda_acl_action_write(dev_id, &(rule[tmp_ent_idx]), act_idx);
1798        SW_RTN_ON_ERROR(rv);
1799
1800        rv = _garuda_acl_slct_write(dev_id, &(rule[tmp_ent_idx]), slct_idx);
1801        SW_RTN_ON_ERROR(rv);
1802
1803        act_idx++;
1804        slct_idx++;
1805    }
1806    return SW_OK;
1807}
1808
1809static sw_error_t
1810_garuda_acl_rule_get(a_uint32_t dev_id, garuda_acl_hw_rule_t * rule,
1811                     a_uint32_t * ent_idx, a_uint32_t rule_idx)
1812{
1813    sw_error_t rv;
1814    a_uint32_t i, tmp_idx, flt_nr, flt_idx[4];
1815
1816    tmp_idx = *ent_idx;
1817
1818    rv = _garuda_acl_slct_read(dev_id, &rule[tmp_idx], rule_idx);
1819    SW_RTN_ON_ERROR(rv);
1820
1821    rv = _garuda_acl_action_read(dev_id, &rule[tmp_idx], rule_idx);
1822    SW_RTN_ON_ERROR(rv);
1823
1824    rv = _garuda_acl_filter_map_get(&rule[tmp_idx], flt_idx, &flt_nr);
1825    SW_RTN_ON_ERROR(rv);
1826
1827    for (i = 0; i < flt_nr; i++)
1828    {
1829        rv = _garuda_acl_filter_read(dev_id, &rule[tmp_idx], flt_idx[i]);
1830        SW_RTN_ON_ERROR(rv);
1831
1832        tmp_idx++;
1833    }
1834
1835    *ent_idx = tmp_idx;
1836    return SW_OK;
1837}
1838
1839static sw_error_t
1840_garuda_acl_rule_sw_to_hw(a_uint32_t dev_id, fal_acl_rule_t * sw,
1841                          fal_pbmp_t bind_pts, garuda_acl_hw_rule_t * hw,
1842                          a_uint32_t * idx, a_uint32_t * flt_len)
1843{
1844    sw_error_t rv;
1845    a_bool_t b_care;
1846    a_bool_t b_valid = A_FALSE;
1847    a_uint32_t tmp_idx;
1848    a_uint32_t len1 = 0, len2 = 0, len3 = 0, maxlen = 0;
1849
1850    if (FAL_FIELD_FLG_TST(sw->field_flg, FAL_ACL_FIELD_UDF))
1851    {
1852        return SW_NOT_SUPPORTED;
1853    }
1854
1855    tmp_idx = *idx;
1856    if (FAL_ACL_RULE_MAC == sw->rule_type)
1857    {
1858        rv = _garuda_acl_rule_mac_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1859                                        &len1);
1860        SW_RTN_ON_ERROR(rv);
1861        tmp_idx++;
1862
1863        if (0 == len1)
1864        {
1865            *flt_len = 14;
1866        }
1867        else
1868        {
1869            *flt_len = len1;
1870        }
1871    }
1872    else if (FAL_ACL_RULE_IP4 == sw->rule_type)
1873    {
1874        rv = _garuda_acl_rule_mac_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1875                                        &len1);
1876        SW_RTN_ON_ERROR(rv);
1877        if (A_TRUE == b_care)
1878        {
1879            tmp_idx++;
1880        }
1881
1882        rv = _garuda_acl_rule_ip4_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1883                                        &len1);
1884        SW_RTN_ON_ERROR(rv);
1885        tmp_idx++;
1886
1887        if (0 == len1)
1888        {
1889            *flt_len = 34;
1890        }
1891        else
1892        {
1893            *flt_len = len1;
1894        }
1895    }
1896    else if (FAL_ACL_RULE_IP6 == sw->rule_type)
1897    {
1898        rv = _garuda_acl_rule_mac_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1899                                        &len1);
1900        SW_RTN_ON_ERROR(rv);
1901        if (A_TRUE == b_care)
1902        {
1903            tmp_idx++;
1904        }
1905
1906        rv = _garuda_acl_rule_ip6r1_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1907                                          &len1);
1908        SW_RTN_ON_ERROR(rv);
1909        if (A_TRUE == b_care)
1910        {
1911            tmp_idx++;
1912            b_valid = A_TRUE;
1913        }
1914
1915        rv = _garuda_acl_rule_ip6r2_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1916                                          &len2);
1917        SW_RTN_ON_ERROR(rv);
1918        if (A_TRUE == b_care)
1919        {
1920            tmp_idx++;
1921            b_valid = A_TRUE;
1922        }
1923
1924        rv = _garuda_acl_rule_ip6r3_parse(sw, bind_pts, &hw[tmp_idx], &b_care,
1925                                          &len3);
1926        SW_RTN_ON_ERROR(rv);
1927        if ((A_TRUE == b_care) || (A_FALSE == b_valid))
1928        {
1929            tmp_idx++;
1930        }
1931
1932        if (len1 >= len2)
1933        {
1934            if (len1 >= len3)
1935            {
1936                maxlen = len1;
1937            }
1938            else
1939            {
1940                maxlen = len3;
1941            }
1942        }
1943        else
1944        {
1945            if (len2 >= len3)
1946            {
1947                maxlen = len2;
1948            }
1949            else
1950            {
1951                maxlen = len3;
1952            }
1953        }
1954
1955        if (0 == maxlen)
1956        {
1957            *flt_len = 54;
1958        }
1959        else
1960        {
1961            *flt_len = maxlen;
1962        }
1963    }
1964    else
1965    {
1966        return SW_NOT_SUPPORTED;
1967    }
1968
1969    rv = _garuda_acl_action_parse(dev_id, sw, &(hw_rule_ent[*idx]));
1970    SW_RTN_ON_ERROR(rv);
1971
1972    *idx = tmp_idx;
1973    return SW_OK;
1974}
1975
1976static sw_error_t
1977_garuda_acl_rule_hw_to_sw(fal_acl_rule_t * sw, const garuda_acl_hw_rule_t * hw,
1978                          a_uint32_t ent_idx, a_uint32_t ent_nr)
1979{
1980    sw_error_t rv;
1981    a_uint32_t i, flt_typ;
1982    a_bool_t b_ip4 = A_FALSE, b_ip6 = A_FALSE;
1983
1984    rv = _garuda_acl_rule_action_reparse(sw, &hw[ent_idx]);
1985    SW_RTN_ON_ERROR(rv);
1986
1987    sw->rule_type = FAL_ACL_RULE_MAC;
1988    for (i = 0; i < ent_nr; i++)
1989    {
1990        SW_GET_FIELD_BY_REG(RUL_TYPE, TYP, flt_typ, hw[ent_idx + i].typ);
1991
1992        if (GARUDA_MAC_FILTER == flt_typ)
1993        {
1994            rv = _garuda_acl_rule_mac_reparse(sw, &hw[ent_idx + i]);
1995            SW_RTN_ON_ERROR(rv);
1996        }
1997        else if (GARUDA_IP4_FILTER == flt_typ)
1998        {
1999            rv = _garuda_acl_rule_ip4_reparse(sw, &hw[ent_idx + i]);
2000            SW_RTN_ON_ERROR(rv);
2001            b_ip4 = A_TRUE;
2002        }
2003        else if (GARUDA_IP6R1_FILTER == flt_typ)
2004        {
2005            rv = _garuda_acl_rule_ip6r1_reparse(sw, &hw[ent_idx + i]);
2006            SW_RTN_ON_ERROR(rv);
2007            b_ip6 = A_TRUE;
2008        }
2009        else if (GARUDA_IP6R2_FILTER == flt_typ)
2010        {
2011            rv = _garuda_acl_rule_ip6r2_reparse(sw, &hw[ent_idx + i]);
2012            SW_RTN_ON_ERROR(rv);
2013            b_ip6 = A_TRUE;
2014        }
2015        else if (GARUDA_IP6R3_FILTER == flt_typ)
2016        {
2017            rv = _garuda_acl_rule_ip6r3_reparse(sw, &hw[ent_idx + i]);
2018            SW_RTN_ON_ERROR(rv);
2019            b_ip6 = A_TRUE;
2020        }
2021        else
2022        {
2023            return SW_FAIL;
2024        }
2025    }
2026
2027    if (A_TRUE == b_ip4)
2028    {
2029        sw->rule_type = FAL_ACL_RULE_IP4;
2030    }
2031
2032    if (A_TRUE == b_ip6)
2033    {
2034        sw->rule_type = FAL_ACL_RULE_IP6;
2035    }
2036
2037    return SW_OK;
2038}
2039
2040static sw_error_t
2041_garuda_acl_rule_copy(a_uint32_t dev_id, a_uint32_t src_slct_idx,
2042                      a_uint32_t dst_slct_idx, a_uint32_t size)
2043{
2044    sw_error_t rv;
2045    a_uint32_t i;
2046    a_int32_t step, src_idx, dst_idx;
2047    garuda_acl_hw_rule_t rule;
2048
2049    if (dst_slct_idx <= src_slct_idx)
2050    {
2051        src_idx = src_slct_idx & 0x7fffffff;
2052        dst_idx = dst_slct_idx & 0x7fffffff;
2053        step = 1;
2054    }
2055    else
2056    {
2057        src_idx = (src_slct_idx + size - 1) & 0x7fffffff;
2058        dst_idx = (dst_slct_idx + size - 1) & 0x7fffffff;
2059        step = -1;
2060    }
2061
2062    aos_mem_zero(&rule, sizeof (garuda_acl_hw_rule_t));
2063    for (i = 0; i < size; i++)
2064    {
2065        rv = _garuda_acl_rule_invalid(dev_id, (a_uint32_t) dst_idx, 1);
2066        SW_RTN_ON_ERROR(rv);
2067
2068        rv = _garuda_acl_action_read(dev_id, &rule, (a_uint32_t) src_idx);
2069        SW_RTN_ON_ERROR(rv);
2070
2071        rv = _garuda_acl_action_write(dev_id, &rule, (a_uint32_t) dst_idx);
2072        SW_RTN_ON_ERROR(rv);
2073
2074        rv = _garuda_acl_slct_read(dev_id, &rule, (a_uint32_t) src_idx);
2075        SW_RTN_ON_ERROR(rv);
2076
2077        rv = _garuda_acl_slct_write(dev_id, &rule, (a_uint32_t) dst_idx);
2078        SW_RTN_ON_ERROR(rv);
2079
2080        rv = _garuda_acl_rule_invalid(dev_id, (a_uint32_t) src_idx, 1);
2081        SW_RTN_ON_ERROR(rv);
2082
2083        src_idx += step;
2084        dst_idx += step;
2085    }
2086
2087    return SW_OK;
2088}
2089
2090static sw_error_t
2091_garuda_acl_rule_invalid(a_uint32_t dev_id, a_uint32_t rule_idx,
2092                         a_uint32_t size)
2093{
2094    sw_error_t rv;
2095    a_uint32_t base, flag, i;
2096
2097    flag = 0;
2098    for (i = 0; i < size; i++)
2099    {
2100        base = GARUDA_RULE_SLCT_ADDR + ((rule_idx + i) << 5);
2101        HSL_REG_ENTRY_GEN_SET(rv, dev_id, base, sizeof (a_uint32_t),
2102                              (a_uint8_t *) (&flag), sizeof (a_uint32_t));
2103        SW_RTN_ON_ERROR(rv);
2104    }
2105    return SW_OK;
2106}
2107
2108static sw_error_t
2109_garuda_acl_rule_valid(a_uint32_t dev_id, a_uint32_t rule_idx, a_uint32_t size,
2110                       a_uint32_t flag)
2111{
2112    sw_error_t rv;
2113    a_uint32_t base, i;
2114
2115    for (i = 0; i < size; i++)
2116    {
2117        base = GARUDA_RULE_SLCT_ADDR + ((rule_idx + i) << 5);
2118        HSL_REG_ENTRY_GEN_SET(rv, dev_id, base, sizeof (a_uint32_t),
2119                              (a_uint8_t *) (&flag), sizeof (a_uint32_t));
2120        SW_RTN_ON_ERROR(rv);
2121    }
2122    return SW_OK;
2123}
2124
2125static sw_error_t
2126_garuda_acl_addr_update(a_uint32_t dev_id, a_uint32_t old_addr,
2127                        a_uint32_t new_addr, a_uint32_t list_id)
2128{
2129    sw_error_t rv;
2130    a_uint32_t idx;
2131
2132    rv = _garuda_acl_list_loc(dev_id, list_id, &idx);
2133    SW_RTN_ON_ERROR(rv);
2134
2135    if (old_addr != list_ent[dev_id][idx].addr)
2136    {
2137        return SW_FAIL;
2138    }
2139
2140    list_ent[dev_id][idx].addr = new_addr;
2141    return SW_OK;
2142}
2143
2144static sw_error_t
2145_garuda_acl_rule_bind(a_uint32_t dev_id, a_uint32_t rule_idx, a_uint32_t ports)
2146{
2147    sw_error_t rv;
2148    a_uint32_t flt_idx[4], flt_nr;
2149    a_uint32_t bind_pts = 0, addr, i, ret = 0;
2150    garuda_acl_hw_rule_t rule;
2151
2152    aos_mem_zero(&rule, sizeof (garuda_acl_hw_rule_t));
2153
2154    rv = _garuda_acl_slct_read(dev_id, &rule, rule_idx);
2155    SW_RTN_ON_ERROR(rv);
2156
2157    rv = _garuda_acl_filter_map_get(&rule, flt_idx, &flt_nr);
2158    SW_RTN_ON_ERROR(rv);
2159
2160    rv = _garuda_acl_rule_invalid(dev_id, rule_idx, 1);
2161    SW_RTN_ON_ERROR(rv);
2162
2163    for (i = 0; i < flt_nr; i++)
2164    {
2165        addr = GARUDA_RULE_VLU_ADDR + (flt_idx[i] << 5) + 16;
2166        HSL_REG_ENTRY_GEN_GET(rv, dev_id, addr, sizeof (a_uint32_t),
2167                              (a_uint8_t *) (&bind_pts), sizeof (a_uint32_t));
2168
2169        /* source port field in different type rules has the same
2170           hardware bit position */
2171        SW_SET_REG_BY_FIELD(MAC_RUL_V4, MAC_INPT, ports, bind_pts);
2172
2173        HSL_REG_ENTRY_GEN_SET(rv, dev_id, addr, sizeof (a_uint32_t),
2174                              (a_uint8_t *) (&bind_pts), sizeof (a_uint32_t));
2175        ret += rv;
2176    }
2177
2178    rv = _garuda_acl_rule_valid(dev_id, rule_idx, 1, rule.slct[0]);
2179    ret += rv;
2180    if (0 != ret)
2181    {
2182        return SW_FAIL;
2183    }
2184
2185    return SW_OK;
2186}
2187
2188static sw_error_t
2189_garuda_acl_list_creat(a_uint32_t dev_id, a_uint32_t list_id,
2190                       a_uint32_t list_pri)
2191{
2192    a_uint32_t i, loc = GARUDA_MAX_LIST;
2193
2194    HSL_DEV_ID_CHECK(dev_id);
2195
2196    for (i = 0; i < GARUDA_MAX_LIST; i++)
2197    {
2198        if ((ENT_USED == list_ent[dev_id][i].status)
2199                && (list_id == list_ent[dev_id][i].list_id))
2200        {
2201            return SW_ALREADY_EXIST;
2202        }
2203
2204        if (ENT_FREE == list_ent[dev_id][i].status)
2205        {
2206            loc = i;
2207        }
2208    }
2209
2210    if (GARUDA_MAX_LIST == loc)
2211    {
2212        return SW_NO_RESOURCE;
2213    }
2214
2215    aos_mem_zero(&(list_ent[dev_id][loc]), sizeof (garuda_acl_list_t));
2216    list_ent[dev_id][loc].list_id = list_id;
2217    list_ent[dev_id][loc].list_pri = list_pri;
2218    list_ent[dev_id][loc].status = ENT_USED;
2219    _garuda_acl_list_dump(dev_id);
2220    return SW_OK;
2221}
2222
2223
2224static sw_error_t
2225_garuda_acl_list_destroy(a_uint32_t dev_id, a_uint32_t list_id)
2226{
2227    a_uint32_t list_idx;
2228
2229    HSL_DEV_ID_CHECK(dev_id);
2230
2231    for (list_idx = 0; list_idx < GARUDA_MAX_LIST; list_idx++)
2232    {
2233        if ((ENT_USED == list_ent[dev_id][list_idx].status)
2234                && (list_id == list_ent[dev_id][list_idx].list_id))
2235        {
2236            break;
2237        }
2238    }
2239
2240    if (list_idx >= GARUDA_MAX_LIST)
2241    {
2242        return SW_NOT_FOUND;
2243    }
2244
2245    if (0 != list_ent[dev_id][list_idx].bind_pts)
2246    {
2247        return SW_NOT_SUPPORTED;
2248    }
2249
2250    if (0 != list_ent[dev_id][list_idx].size)
2251    {
2252        return SW_NOT_SUPPORTED;
2253    }
2254
2255    aos_mem_zero(&(list_ent[dev_id][list_idx]), sizeof (garuda_acl_list_t));
2256    list_ent[dev_id][list_idx].status = ENT_FREE;
2257    _garuda_acl_list_dump(dev_id);
2258    return SW_OK;
2259}
2260
2261
2262static sw_error_t
2263_garuda_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id,
2264                     a_uint32_t rule_id, a_uint32_t rule_nr,
2265                     fal_acl_rule_t * rule)
2266{
2267    sw_error_t rv;
2268    a_uint32_t hsl_f_rsc, list_new_size, list_addr;
2269    a_uint32_t list_pri, list_idx, load_addr, bind_pts;
2270
2271    HSL_DEV_ID_CHECK(dev_id);
2272
2273    if ((0 == rule_nr) || (NULL == rule))
2274    {
2275        return SW_BAD_PARAM;
2276    }
2277
2278    rv = hsl_acl_free_rsc_get(dev_id, &hsl_f_rsc);
2279    SW_RTN_ON_ERROR(rv);
2280    if (hsl_f_rsc < rule_nr)
2281    {
2282        return SW_NO_RESOURCE;
2283    }
2284
2285    rv = _garuda_acl_list_loc(dev_id, list_id, &list_idx);
2286    SW_RTN_ON_ERROR(rv);
2287
2288    if (rule_id != list_ent[dev_id][list_idx].size)
2289    {
2290        return SW_ALREADY_EXIST;
2291    }
2292    bind_pts = list_ent[dev_id][list_idx].bind_pts;
2293
2294    _garuda_acl_filter_snap(dev_id);
2295
2296    /* parse rule entry and alloc rule resource */
2297    {
2298        a_uint32_t i, j;
2299        a_uint32_t ent_idx, tmp_ent_idx, flt_idx, flt_len;
2300
2301        aos_mem_zero(hw_rule_ent,
2302                     GARUDA_MAX_RULE * sizeof (garuda_acl_hw_rule_t));
2303
2304        ent_idx = 0;
2305        for (i = 0; i < rule_nr; i++)
2306        {
2307            tmp_ent_idx = ent_idx;
2308            rv = _garuda_acl_rule_sw_to_hw(dev_id, &rule[i], bind_pts,
2309                                           &hw_rule_ent[ent_idx], &ent_idx,
2310                                           &flt_len);
2311            SW_RTN_ON_ERROR(rv);
2312
2313            for (j = tmp_ent_idx; j < ent_idx; j++)
2314            {
2315                rv = _garuda_acl_filter_alloc(dev_id, &flt_idx);
2316                SW_RTN_ON_ERROR(rv);
2317
2318                rv = _garuda_acl_slct_update(&hw_rule_ent[tmp_ent_idx],
2319                                             j - tmp_ent_idx, flt_idx);
2320                SW_RTN_ON_ERROR(rv);
2321            }
2322            SW_SET_REG_BY_FIELD(RUL_SLCT6, RULE_LEN, flt_len,
2323                                hw_rule_ent[tmp_ent_idx].slct[6]);
2324        }
2325    }
2326
2327    /* alloc hardware select entry resource */
2328    if (0 == list_ent[dev_id][list_idx].size)
2329    {
2330        list_new_size = rule_nr;
2331        list_pri = list_ent[dev_id][list_idx].list_pri;
2332
2333        rv = hsl_acl_blk_alloc(dev_id, list_pri, list_new_size, list_id,
2334                               &list_addr);
2335        SW_RTN_ON_ERROR(rv);
2336
2337        load_addr = list_addr;
2338    }
2339    else
2340    {
2341        list_new_size = list_ent[dev_id][list_idx].size + rule_nr;
2342        list_addr = list_ent[dev_id][list_idx].addr;
2343
2344        rv = hsl_acl_blk_resize(dev_id, list_addr, list_new_size);
2345        SW_RTN_ON_ERROR(rv);
2346
2347        /* must be careful resize opration maybe change list base address */
2348        list_addr = list_ent[dev_id][list_idx].addr;
2349        load_addr = list_ent[dev_id][list_idx].size + list_addr;
2350    }
2351
2352    /* load acl rule to hardware */
2353    rv = _garuda_acl_rule_set(dev_id, load_addr, hw_rule_ent, rule_nr);
2354    if (SW_OK != rv)
2355    {
2356        (void) hsl_acl_blk_resize(dev_id, list_addr,
2357                                  list_ent[dev_id][list_idx].size);
2358        return rv;
2359    }
2360
2361    /* update software list control information */
2362    list_ent[dev_id][list_idx].size = list_new_size;
2363    list_ent[dev_id][list_idx].addr = list_addr;
2364
2365    /* update hardware acl rule resource information */
2366    _garuda_acl_filter_commit(dev_id);
2367    _garuda_acl_list_dump(dev_id);
2368    return SW_OK;
2369}
2370
2371
2372static sw_error_t
2373_garuda_acl_rule_delete(a_uint32_t dev_id, a_uint32_t list_id,
2374                        a_uint32_t rule_id, a_uint32_t rule_nr)
2375{
2376    sw_error_t rv;
2377    a_uint32_t flt_idx[4];
2378    a_uint32_t i, j, flt_nr;
2379    a_uint32_t list_idx, addr, size, rule_idx, cnt;
2380
2381    HSL_DEV_ID_CHECK(dev_id);
2382
2383    rv = _garuda_acl_list_loc(dev_id, list_id, &list_idx);
2384    SW_RTN_ON_ERROR(rv);
2385
2386    if (0 == rule_nr)
2387    {
2388        return SW_BAD_PARAM;
2389    }
2390
2391    if ((rule_id + rule_nr) > list_ent[dev_id][list_idx].size)
2392    {
2393        return SW_NOT_FOUND;
2394    }
2395
2396    _garuda_acl_filter_snap(dev_id);
2397
2398    /* free hardware filter resource */
2399    addr = list_ent[dev_id][list_idx].addr + rule_id;
2400    for (i = 0; i < rule_nr; i++)
2401    {
2402        rv = _garuda_acl_slct_read(dev_id, &hw_rule_ent[0], i + addr);
2403        SW_RTN_ON_ERROR(rv);
2404
2405        rv = _garuda_acl_filter_map_get(&hw_rule_ent[0], flt_idx, &flt_nr);
2406        SW_RTN_ON_ERROR(rv);
2407
2408        for (j = 0; j < flt_nr; j++)
2409        {
2410            _garuda_acl_filter_free(dev_id, flt_idx[j]);
2411        }
2412    }
2413
2414    cnt = list_ent[dev_id][list_idx].size - (rule_id + rule_nr);
2415    rule_idx = list_ent[dev_id][list_idx].addr + (rule_id + rule_nr);
2416    rv = _garuda_acl_rule_copy(dev_id, rule_idx, rule_idx - rule_nr, cnt);
2417    SW_RTN_ON_ERROR(rv);
2418
2419    addr = list_ent[dev_id][list_idx].addr;
2420    size = list_ent[dev_id][list_idx].size;
2421    rv = hsl_acl_blk_resize(dev_id, addr, size - rule_nr);
2422    SW_RTN_ON_ERROR(rv);
2423
2424    list_ent[dev_id][list_idx].size -= rule_nr;
2425    _garuda_acl_filter_commit(dev_id);
2426    _garuda_acl_list_dump(dev_id);
2427    return SW_OK;
2428}
2429
2430
2431static sw_error_t
2432_garuda_acl_rule_query(a_uint32_t dev_id, a_uint32_t list_id,
2433                       a_uint32_t rule_id, fal_acl_rule_t * rule)
2434{
2435    sw_error_t rv;
2436    a_uint32_t list_idx, ent_idx, tmp_ent_idx, rule_idx;
2437
2438    HSL_DEV_ID_CHECK(dev_id);
2439
2440    rv = _garuda_acl_list_loc(dev_id, list_id, &list_idx);
2441    SW_RTN_ON_ERROR(rv);
2442
2443    if (rule_id >= list_ent[dev_id][list_idx].size)
2444    {
2445        return SW_NOT_FOUND;
2446    }
2447
2448    aos_mem_zero(rule, sizeof (fal_acl_rule_t));
2449
2450    ent_idx = 0;
2451    tmp_ent_idx = 0;
2452    rule_idx = list_ent[dev_id][list_idx].addr + rule_id;
2453    rv = _garuda_acl_rule_get(dev_id, hw_rule_ent, &tmp_ent_idx, rule_idx);
2454    SW_RTN_ON_ERROR(rv);
2455
2456    rv = _garuda_acl_rule_hw_to_sw(rule, hw_rule_ent, ent_idx,
2457                                   tmp_ent_idx - ent_idx);
2458    return rv;
2459}
2460
2461
2462static sw_error_t
2463_garuda_acl_list_bind(a_uint32_t dev_id, a_uint32_t list_id,
2464                      fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t,
2465                      a_uint32_t obj_idx)
2466{
2467    sw_error_t rv;
2468    a_uint32_t i, list_idx, rule_idx, base, ports;
2469
2470    HSL_DEV_ID_CHECK(dev_id);
2471
2472    if (FAL_ACL_DIREC_IN != direc)
2473    {
2474        return SW_NOT_SUPPORTED;
2475    }
2476
2477    if (FAL_ACL_BIND_PORT != obj_t)
2478    {
2479        return SW_NOT_SUPPORTED;
2480    }
2481
2482    rv = _garuda_acl_list_loc(dev_id, list_id, &list_idx);
2483    SW_RTN_ON_ERROR(rv);
2484
2485    if (list_ent[dev_id][list_idx].bind_pts & (0x1 << obj_idx))
2486    {
2487        return SW_ALREADY_EXIST;
2488    }
2489
2490    base = list_ent[dev_id][list_idx].addr;
2491    ports = list_ent[dev_id][list_idx].bind_pts | (0x1 << obj_idx);
2492    for (i = 0; i < list_ent[dev_id][list_idx].size; i++)
2493    {
2494        rule_idx = base + i;
2495        rv = _garuda_acl_rule_bind(dev_id, rule_idx, ports);
2496        SW_RTN_ON_ERROR(rv);
2497    }
2498
2499    list_ent[dev_id][list_idx].bind_pts = ports;
2500    return SW_OK;
2501}
2502
2503
2504
2505static sw_error_t
2506_garuda_acl_list_unbind(a_uint32_t dev_id, a_uint32_t list_id,
2507                        fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t,
2508                        a_uint32_t obj_idx)
2509{
2510    sw_error_t rv;
2511    a_uint32_t i, list_idx, rule_idx, base, ports;
2512
2513    HSL_DEV_ID_CHECK(dev_id);
2514
2515    if (FAL_ACL_DIREC_IN != direc)
2516    {
2517        return SW_NOT_SUPPORTED;
2518    }
2519
2520    if (FAL_ACL_BIND_PORT != obj_t)
2521    {
2522        return SW_NOT_SUPPORTED;
2523    }
2524
2525    rv = _garuda_acl_list_loc(dev_id, list_id, &list_idx);
2526    SW_RTN_ON_ERROR(rv);
2527
2528    if (!(list_ent[dev_id][list_idx].bind_pts & (0x1 << obj_idx)))
2529    {
2530        return SW_NOT_FOUND;
2531    }
2532
2533    base = list_ent[dev_id][list_idx].addr;
2534    ports = list_ent[dev_id][list_idx].bind_pts & (~(0x1UL << obj_idx));
2535    for (i = 0; i < list_ent[dev_id][list_idx].size; i++)
2536    {
2537        rule_idx = base + i;
2538        rv = _garuda_acl_rule_bind(dev_id, rule_idx, ports);
2539        SW_RTN_ON_ERROR(rv);
2540    }
2541
2542    list_ent[dev_id][list_idx].bind_pts = ports;
2543    return SW_OK;
2544}
2545
2546
2547static sw_error_t
2548_garuda_acl_status_set(a_uint32_t dev_id, a_bool_t enable)
2549{
2550    sw_error_t rv;
2551    a_uint32_t val;
2552
2553    HSL_DEV_ID_CHECK(dev_id);
2554
2555    if (A_TRUE == enable)
2556    {
2557        val = 1;
2558    }
2559    else if (A_FALSE == enable)
2560    {
2561        val = 0;
2562    }
2563    else
2564    {
2565        return SW_BAD_PARAM;
2566    }
2567
2568    HSL_REG_FIELD_SET(rv, dev_id, QM_CTL, 0, ACL_EN,
2569                      (a_uint8_t *) (&val), sizeof (a_uint32_t));
2570    return rv;
2571}
2572
2573
2574static sw_error_t
2575_garuda_acl_status_get(a_uint32_t dev_id, a_bool_t * enable)
2576{
2577    sw_error_t rv;
2578    a_uint32_t val;
2579
2580    HSL_DEV_ID_CHECK(dev_id);
2581
2582    HSL_REG_FIELD_GET(rv, dev_id, QM_CTL, 0, ACL_EN,
2583                      (a_uint8_t *) (&val), sizeof (a_uint32_t));
2584    SW_RTN_ON_ERROR(rv);
2585
2586    if (val)
2587    {
2588        *enable = A_TRUE;
2589    }
2590    else
2591    {
2592        *enable = A_FALSE;
2593    }
2594
2595    return SW_OK;
2596}
2597
2598HSL_LOCAL sw_error_t
2599garuda_acl_list_dump(a_uint32_t dev_id)
2600{
2601    a_uint32_t idx;
2602
2603    aos_printk("\ngaruda_acl_list_dump:\n");
2604    for (idx = 0; idx < GARUDA_MAX_LIST; idx++)
2605    {
2606        if (ENT_USED == list_ent[dev_id][idx].status)
2607        {
2608            aos_printk
2609            ("\n[id]:%02d  [pri]:%02d  [size]:%02d  [addr]:%02d  [pts_map]:0x%02x",
2610             list_ent[dev_id][idx].list_id, list_ent[dev_id][idx].list_pri,
2611             list_ent[dev_id][idx].size, list_ent[dev_id][idx].addr,
2612             list_ent[dev_id][idx].bind_pts);
2613        }
2614    }
2615    aos_printk("\n");
2616
2617    return SW_OK;
2618}
2619
2620HSL_LOCAL sw_error_t
2621garuda_acl_rule_dump(a_uint32_t dev_id)
2622{
2623    a_uint32_t slt_idx, flt_nr, i, j;
2624    a_uint32_t flt_idx[4];
2625    sw_error_t rv;
2626    garuda_acl_hw_rule_t rule;
2627
2628    aos_printk("\ngaruda_acl_rule_dump:\n");
2629
2630    aos_printk("\nfilter_bitmap:0x%x", filter[dev_id]);
2631    for (slt_idx = 0; slt_idx < GARUDA_MAX_RULE; slt_idx++)
2632    {
2633        aos_mem_zero(&rule, sizeof (garuda_acl_hw_rule_t));
2634
2635        rv = _garuda_acl_slct_read(dev_id, &rule, slt_idx);
2636        if (SW_OK != rv)
2637        {
2638            continue;
2639        }
2640
2641        rv = _garuda_acl_filter_map_get(&rule, flt_idx, &flt_nr);
2642        if (SW_OK != rv)
2643        {
2644            continue;
2645        }
2646
2647        aos_printk("\nslct_idx=%d  ", slt_idx);
2648        for (i = 0; i < flt_nr; i++)
2649        {
2650            aos_printk("flt%d_idx=%d  ", i, flt_idx[i]);
2651        }
2652
2653        aos_printk("\nslt:");
2654        for (i = 0; i < 8; i++)
2655        {
2656            aos_printk("%08x  ", rule.slct[i]);
2657        }
2658
2659        if (flt_nr)
2660        {
2661            rv = _garuda_acl_action_read(dev_id, &rule, slt_idx);
2662            if (SW_OK != rv)
2663            {
2664                continue;
2665            }
2666            aos_printk("\nact:%08x  ", rule.act);
2667
2668            for (i = 0; i < flt_nr; i++)
2669            {
2670                rv = _garuda_acl_filter_read(dev_id, &rule, flt_idx[i]);
2671                if (SW_OK != rv)
2672                {
2673                    continue;
2674                }
2675
2676                aos_printk("\ntyp:%08x  ", rule.typ);
2677                aos_printk("\nvlu:");
2678                for (j = 0; j < 5; j++)
2679                {
2680                    aos_printk("%08x  ", rule.vlu[j]);
2681                }
2682
2683                aos_printk("\nmsk:");
2684                for (j = 0; j < 5; j++)
2685                {
2686                    aos_printk("%08x  ", rule.msk[j]);
2687                }
2688                aos_printk("\n");
2689            }
2690        }
2691        aos_printk("\n");
2692    }
2693
2694    return SW_OK;
2695}
2696
2697sw_error_t
2698garuda_acl_reset(a_uint32_t dev_id)
2699{
2700    sw_error_t rv;
2701    a_uint32_t i;
2702
2703    HSL_DEV_ID_CHECK(dev_id);
2704
2705    aos_mem_zero(hw_rule_ent,
2706                 (GARUDA_MAX_RULE + 3) * sizeof (garuda_acl_hw_rule_t));
2707
2708    aos_mem_zero(list_ent[dev_id],
2709                 GARUDA_MAX_LIST * sizeof (garuda_acl_list_t));
2710
2711    for (i = 0; i < GARUDA_MAX_LIST; i++)
2712    {
2713        list_ent[dev_id][i].status = ENT_FREE;
2714    }
2715
2716    filter[dev_id]      = 0;
2717    filter_snap[dev_id] = 0;
2718
2719    rv = hsl_acl_pool_destroy(dev_id);
2720    SW_RTN_ON_ERROR(rv);
2721
2722    rv = hsl_acl_pool_creat(dev_id, GARUDA_MAX_LIST, GARUDA_MAX_RULE);
2723    SW_RTN_ON_ERROR(rv);
2724
2725    return SW_OK;
2726}
2727
2728/**
2729 * @brief Creat an acl list
2730 * @details  Comments:
2731  *     If the priority of a list is more small then the priority is more high,
2732 *     that means the list could be first matched.
2733 * @param[in] dev_id device id
2734 * @param[in] list_id acl list id
2735 * @param[in] list_pri acl list priority
2736 * @return SW_OK or error code
2737 */
2738HSL_LOCAL sw_error_t
2739garuda_acl_list_creat(a_uint32_t dev_id, a_uint32_t list_id,
2740                      a_uint32_t list_pri)
2741{
2742    sw_error_t rv;
2743
2744    HSL_API_LOCK;
2745    rv = _garuda_acl_list_creat(dev_id, list_id, list_pri);
2746    HSL_API_UNLOCK;
2747    return rv;
2748}
2749
2750/**
2751 * @brief Destroy an acl list
2752 * @param[in] dev_id device id
2753 * @param[in] list_id acl list id
2754 * @return SW_OK or error code
2755 */
2756HSL_LOCAL sw_error_t
2757garuda_acl_list_destroy(a_uint32_t dev_id, a_uint32_t list_id)
2758{
2759    sw_error_t rv;
2760
2761    HSL_API_LOCK;
2762    rv = _garuda_acl_list_destroy(dev_id, list_id);
2763    HSL_API_UNLOCK;
2764    return rv;
2765}
2766
2767/**
2768 * @brief Add one rule or more rules to an existing acl list
2769 * @param[in] dev_id device id
2770 * @param[in] list_id acl list id
2771 * @param[in] rule_id first rule id of this adding operation in list
2772 * @param[in] rule_nr rule number of this adding operation
2773 * @param[in] rule rules content of this adding operation
2774 * @return SW_OK or error code
2775 */
2776HSL_LOCAL sw_error_t
2777garuda_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id,
2778                    a_uint32_t rule_id, a_uint32_t rule_nr,
2779                    fal_acl_rule_t * rule)
2780{
2781    sw_error_t rv;
2782
2783    HSL_API_LOCK;
2784    rv = _garuda_acl_rule_add(dev_id, list_id, rule_id, rule_nr, rule);
2785    HSL_API_UNLOCK;
2786    return rv;
2787}
2788
2789/**
2790 * @brief Delete one rule or more rules from an existing acl list
2791 * @param[in] dev_id device id
2792 * @param[in] list_id acl list id
2793 * @param[in] rule_id first rule id of this deleteing operation in list
2794 * @param[in] rule_nr rule number of this deleteing operation
2795 * @return SW_OK or error code
2796 */
2797HSL_LOCAL sw_error_t
2798garuda_acl_rule_delete(a_uint32_t dev_id, a_uint32_t list_id,
2799                       a_uint32_t rule_id, a_uint32_t rule_nr)
2800{
2801    sw_error_t rv;
2802
2803    HSL_API_LOCK;
2804    rv = _garuda_acl_rule_delete(dev_id, list_id, rule_id, rule_nr);
2805    HSL_API_UNLOCK;
2806    return rv;
2807}
2808
2809/**
2810 * @brief Query one particular rule in a particular acl list
2811 * @param[in] dev_id device id
2812 * @param[in] list_id acl list id
2813 * @param[in] rule_id first rule id of this deleteing operation in list
2814 * @param[out] rule rule content of this operation
2815 * @return SW_OK or error code
2816 */
2817HSL_LOCAL sw_error_t
2818garuda_acl_rule_query(a_uint32_t dev_id, a_uint32_t list_id,
2819                      a_uint32_t rule_id, fal_acl_rule_t * rule)
2820{
2821    sw_error_t rv;
2822
2823    HSL_API_LOCK;
2824    rv = _garuda_acl_rule_query(dev_id, list_id, rule_id, rule);
2825    HSL_API_UNLOCK;
2826    return rv;
2827}
2828
2829/**
2830 * @brief Bind an acl list to a particular object
2831 * @details  Comments:
2832 *    If obj_t equals FAL_ACL_BIND_PORT then obj_idx means port id
2833 * @param[in] dev_id device id
2834 * @param[in] list_id acl list id
2835 * @param[in] direc direction of this binding operation
2836 * @param[in] obj_t object type of this binding operation
2837 * @param[in] obj_idx object index of this binding operation
2838 * @return SW_OK or error code
2839 */
2840HSL_LOCAL sw_error_t
2841garuda_acl_list_bind(a_uint32_t dev_id, a_uint32_t list_id,
2842                     fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t,
2843                     a_uint32_t obj_idx)
2844{
2845    sw_error_t rv;
2846
2847    HSL_API_LOCK;
2848    rv = _garuda_acl_list_bind(dev_id, list_id, direc, obj_t, obj_idx);
2849    HSL_API_UNLOCK;
2850    return rv;
2851}
2852
2853/**
2854 * @brief Unbind an acl list from a particular object
2855 * @details  Comments:
2856  *    If obj_t equals FAL_ACL_BIND_PORT then obj_idx means port id
2857 * @param[in] dev_id device id
2858 * @param[in] list_id acl list id
2859 * @param[in] direc direction of this unbinding operation
2860 * @param[in] obj_t object type of this unbinding operation
2861 * @param[in] obj_idx object index of this unbinding operation
2862 * @return SW_OK or error code
2863 */
2864HSL_LOCAL sw_error_t
2865garuda_acl_list_unbind(a_uint32_t dev_id, a_uint32_t list_id,
2866                       fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t,
2867                       a_uint32_t obj_idx)
2868{
2869    sw_error_t rv;
2870
2871    HSL_API_LOCK;
2872    rv = _garuda_acl_list_unbind(dev_id, list_id, direc, obj_t, obj_idx);
2873    HSL_API_UNLOCK;
2874    return rv;
2875}
2876
2877/**
2878 * @brief Set working status of ACL engine on a particular device
2879 * @param[in] dev_id device id
2880 * @param[in] enable A_TRUE or A_FALSE
2881 * @return SW_OK or error code
2882 */
2883HSL_LOCAL sw_error_t
2884garuda_acl_status_set(a_uint32_t dev_id, a_bool_t enable)
2885{
2886    sw_error_t rv;
2887
2888    HSL_API_LOCK;
2889    rv = _garuda_acl_status_set(dev_id, enable);
2890    HSL_API_UNLOCK;
2891    return rv;
2892}
2893
2894/**
2895 * @brief Get working status of ACL engine on a particular device
2896 * @param[in] dev_id device id
2897 * @param[out] enable A_TRUE or A_FALSE
2898 * @return SW_OK or error code
2899 */
2900HSL_LOCAL sw_error_t
2901garuda_acl_status_get(a_uint32_t dev_id, a_bool_t * enable)
2902{
2903    sw_error_t rv;
2904
2905    HSL_API_LOCK;
2906    rv = _garuda_acl_status_get(dev_id, enable);
2907    HSL_API_UNLOCK;
2908    return rv;
2909}
2910
2911sw_error_t
2912garuda_acl_init(a_uint32_t dev_id)
2913{
2914    static a_bool_t b_hw_rule = A_FALSE;
2915    hsl_acl_func_t *acl_func;
2916    garuda_acl_hw_rule_t rule;
2917    sw_error_t rv;
2918    a_uint32_t i;
2919
2920    HSL_DEV_ID_CHECK(dev_id);
2921
2922    if (A_FALSE == b_hw_rule)
2923    {
2924        hw_rule_ent = (garuda_acl_hw_rule_t *)
2925                      aos_mem_alloc((GARUDA_MAX_RULE +
2926                                     3) * sizeof (garuda_acl_hw_rule_t));
2927        if (NULL == hw_rule_ent)
2928        {
2929            return SW_NO_RESOURCE;
2930        }
2931        aos_mem_zero(hw_rule_ent,
2932                     (GARUDA_MAX_RULE + 3) * sizeof (garuda_acl_hw_rule_t));
2933        b_hw_rule = A_TRUE;
2934    }
2935
2936    list_ent[dev_id] = (garuda_acl_list_t *)
2937                       aos_mem_alloc(GARUDA_MAX_LIST * sizeof (garuda_acl_list_t));
2938    if (NULL == list_ent[dev_id])
2939    {
2940        return SW_NO_RESOURCE;
2941    }
2942    aos_mem_zero(list_ent[dev_id],
2943                 GARUDA_MAX_LIST * sizeof (garuda_acl_list_t));
2944
2945    for (i = 0; i < GARUDA_MAX_LIST; i++)
2946    {
2947        list_ent[dev_id][i].status = ENT_FREE;
2948    }
2949
2950    filter[dev_id] = 0;
2951    filter_snap[dev_id] = 0;
2952
2953    rv = hsl_acl_pool_creat(dev_id, GARUDA_MAX_LIST, GARUDA_MAX_RULE);
2954    SW_RTN_ON_ERROR(rv);
2955
2956    acl_func = hsl_acl_ptr_get(dev_id);
2957    SW_RTN_ON_NULL(acl_func);
2958
2959    acl_func->acl_rule_copy = _garuda_acl_rule_copy;
2960    acl_func->acl_rule_invalid = _garuda_acl_rule_invalid;
2961    acl_func->acl_addr_update = _garuda_acl_addr_update;
2962
2963    /* zero acl hardware memory */
2964    aos_mem_zero(&rule, sizeof (garuda_acl_hw_rule_t));
2965    for (i = 0; i < GARUDA_MAX_RULE; i++)
2966    {
2967        rv = _garuda_acl_slct_write(dev_id, &rule, i);
2968        SW_RTN_ON_ERROR(rv);
2969    }
2970
2971#ifdef GARUDA_SW_ENTRY
2972    flt_vlu_mem = aos_mem_alloc(GARUDA_MAX_RULE * 32);
2973    if (NULL == flt_vlu_mem)
2974    {
2975        return SW_NO_RESOURCE;
2976    }
2977    aos_mem_zero(flt_vlu_mem, GARUDA_MAX_RULE * 32);
2978
2979    flt_msk_mem = aos_mem_alloc(GARUDA_MAX_RULE * 32);
2980    if (NULL == flt_msk_mem)
2981    {
2982        return SW_NO_RESOURCE;
2983    }
2984    aos_mem_zero(flt_msk_mem, GARUDA_MAX_RULE * 32);
2985
2986    flt_typ_mem = aos_mem_alloc(GARUDA_MAX_RULE * 4);
2987    if (NULL == flt_typ_mem)
2988    {
2989        return SW_NO_RESOURCE;
2990    }
2991    aos_mem_zero(flt_typ_mem, GARUDA_MAX_RULE * 4);
2992
2993    act_mem = aos_mem_alloc(GARUDA_MAX_RULE * 32);
2994    if (NULL == act_mem)
2995    {
2996        return SW_NO_RESOURCE;
2997    }
2998    aos_mem_zero(act_mem, GARUDA_MAX_RULE * 32);
2999
3000    slct_mem = aos_mem_alloc(GARUDA_MAX_RULE * 32);
3001    if (NULL == slct_mem)
3002    {
3003        return SW_NO_RESOURCE;
3004    }
3005    aos_mem_zero(slct_mem, GARUDA_MAX_RULE * 32);
3006#endif
3007
3008#ifndef HSL_STANDALONG
3009    {
3010        hsl_api_t *p_api;
3011
3012        SW_RTN_ON_NULL(p_api = hsl_api_ptr_get(dev_id));
3013
3014        p_api->acl_list_creat = garuda_acl_list_creat;
3015        p_api->acl_list_destroy = garuda_acl_list_destroy;
3016        p_api->acl_list_bind = garuda_acl_list_bind;
3017        p_api->acl_list_unbind = garuda_acl_list_unbind;
3018        p_api->acl_rule_add = garuda_acl_rule_add;
3019        p_api->acl_rule_delete = garuda_acl_rule_delete;
3020        p_api->acl_rule_query = garuda_acl_rule_query;
3021        p_api->acl_status_set = garuda_acl_status_set;
3022        p_api->acl_status_get = garuda_acl_status_get;
3023        p_api->acl_list_dump = garuda_acl_list_dump;
3024        p_api->acl_rule_dump = garuda_acl_rule_dump;
3025    }
3026#endif
3027
3028    return SW_OK;
3029}
3030
3031/**
3032 * @}
3033 */
3034
3035