1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5#include <sys/param.h>
6#include <sys/ctype.h>
7#include <sys/systm.h>
8#include <sys/lock.h>
9#include <sys/rwlock.h>
10#include <sys/malloc.h>
11#include <sys/mbuf.h>
12#include <sys/socket.h>
13#include <sys/kernel.h>
14
15int errno = 0, rte_errno = 0;
16
17#if 0
18#include <string.h>
19#include <stdint.h>
20#include <errno.h>
21#include <stdarg.h>
22#include <stdio.h>
23#include <sys/queue.h>
24
25#include <rte_log.h>
26#include <rte_branch_prediction.h>
27#include <rte_common.h>
28#include <rte_memory.h>        /* for definition of RTE_CACHE_LINE_SIZE */
29#include <rte_malloc.h>
30#include <rte_eal.h>
31#include <rte_eal_memconfig.h>
32#include <rte_per_lcore.h>
33#include <rte_string_fns.h>
34#include <rte_errno.h>
35#include <rte_rwlock.h>
36#include <rte_spinlock.h>
37#include <rte_tailq.h>
38#endif
39
40#include "rte_shim.h"
41#include "rte_lpm.h"
42
43#if 0
44TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
45
46static struct rte_tailq_elem rte_lpm_tailq = {
47	.name = "RTE_LPM",
48};
49EAL_REGISTER_TAILQ(rte_lpm_tailq)
50#endif
51
52#define MAX_DEPTH_TBL24 24
53
54enum valid_flag {
55	INVALID = 0,
56	VALID
57};
58
59/* Macro to enable/disable run-time checks. */
60#if defined(RTE_LIBRTE_LPM_DEBUG)
61#include <rte_debug.h>
62#define VERIFY_DEPTH(depth) do {                                \
63	if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
64		rte_panic("LPM: Invalid depth (%u) at line %d", \
65				(unsigned)(depth), __LINE__);   \
66} while (0)
67#else
68#define VERIFY_DEPTH(depth)
69#endif
70
71/*
72 * Converts a given depth value to its corresponding mask value.
73 *
74 * depth  (IN)		: range = 1 - 32
75 * mask   (OUT)		: 32bit mask
76 */
77static uint32_t __attribute__((pure))
78depth_to_mask(uint8_t depth)
79{
80	VERIFY_DEPTH(depth);
81
82	/* To calculate a mask start with a 1 on the left hand side and right
83	 * shift while populating the left hand side with 1's
84	 */
85	return (int)0x80000000 >> (depth - 1);
86}
87
88/*
89 * Converts given depth value to its corresponding range value.
90 */
91static uint32_t __attribute__((pure))
92depth_to_range(uint8_t depth)
93{
94	VERIFY_DEPTH(depth);
95
96	/*
97	 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
98	 */
99	if (depth <= MAX_DEPTH_TBL24)
100		return 1 << (MAX_DEPTH_TBL24 - depth);
101
102	/* Else if depth is greater than 24 */
103	return 1 << (RTE_LPM_MAX_DEPTH - depth);
104}
105
106#if 0
107/*
108 * Find an existing lpm table and return a pointer to it.
109 */
110struct rte_lpm *
111rte_lpm_find_existing(const char *name)
112{
113	struct rte_lpm *l = NULL;
114	struct rte_tailq_entry *te;
115	struct rte_lpm_list *lpm_list;
116
117	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
118
119	rte_mcfg_tailq_read_lock();
120	TAILQ_FOREACH(te, lpm_list, next) {
121		l = te->data;
122		if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
123			break;
124	}
125	rte_mcfg_tailq_read_unlock();
126
127	if (te == NULL) {
128		rte_errno = ENOENT;
129		return NULL;
130	}
131
132	return l;
133}
134#endif
135
136/*
137 * Allocates memory for LPM object
138 */
139struct rte_lpm *
140rte_lpm_create(const char *name, int socket_id,
141		const struct rte_lpm_config *config)
142{
143	char mem_name[RTE_LPM_NAMESIZE];
144	struct rte_lpm *lpm = NULL;
145	//struct rte_tailq_entry *te;
146	uint32_t mem_size, rules_size, tbl8s_size;
147	//struct rte_lpm_list *lpm_list;
148
149	//lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
150
151	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
152
153	/* Check user arguments. */
154	if ((name == NULL) || (socket_id < -1)
155			|| config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
156		rte_errno = EINVAL;
157		return NULL;
158	}
159
160	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
161
162	/* Determine the amount of memory to allocate. */
163	mem_size = sizeof(*lpm);
164	rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
165	tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
166			RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
167
168#if 0
169	rte_mcfg_tailq_write_lock();
170
171	/* guarantee there's no existing */
172	TAILQ_FOREACH(te, lpm_list, next) {
173		lpm = te->data;
174		if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
175			break;
176	}
177
178	if (te != NULL) {
179		lpm = NULL;
180		rte_errno = EEXIST;
181		goto exit;
182	}
183
184	/* allocate tailq entry */
185	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
186	if (te == NULL) {
187		RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
188		rte_errno = ENOMEM;
189		goto exit;
190	}
191#endif
192
193	/* Allocate memory to store the LPM data structures. */
194	lpm = rte_zmalloc_socket(mem_name, mem_size,
195			RTE_CACHE_LINE_SIZE, socket_id);
196	if (lpm == NULL) {
197		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
198		//rte_free(te);
199		rte_errno = ENOMEM;
200		goto exit;
201	}
202
203	lpm->rules_tbl = rte_zmalloc_socket(NULL,
204			(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
205
206	if (lpm->rules_tbl == NULL) {
207		RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
208		rte_free(lpm);
209		lpm = NULL;
210		//rte_free(te);
211		rte_errno = ENOMEM;
212		goto exit;
213	}
214
215	lpm->tbl8 = rte_zmalloc_socket(NULL,
216			(size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
217
218	if (lpm->tbl8 == NULL) {
219		RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
220		rte_free(lpm->rules_tbl);
221		rte_free(lpm);
222		lpm = NULL;
223		//rte_free(te);
224		rte_errno = ENOMEM;
225		goto exit;
226	}
227
228	/* Save user arguments. */
229	lpm->max_rules = config->max_rules;
230	lpm->number_tbl8s = config->number_tbl8s;
231	strlcpy(lpm->name, name, sizeof(lpm->name));
232
233	//te->data = lpm;
234
235	//TAILQ_INSERT_TAIL(lpm_list, te, next);
236
237exit:
238	rte_mcfg_tailq_write_unlock();
239
240	return lpm;
241}
242
243/*
244 * Deallocates memory for given LPM table.
245 */
246void
247rte_lpm_free(struct rte_lpm *lpm)
248{
249#if 0
250	struct rte_lpm_list *lpm_list;
251	struct rte_tailq_entry *te;
252
253	/* Check user arguments. */
254	if (lpm == NULL)
255		return;
256
257	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258
259	rte_mcfg_tailq_write_lock();
260
261	/* find our tailq entry */
262	TAILQ_FOREACH(te, lpm_list, next) {
263		if (te->data == (void *) lpm)
264			break;
265	}
266	if (te != NULL)
267		TAILQ_REMOVE(lpm_list, te, next);
268
269	rte_mcfg_tailq_write_unlock();
270#endif
271
272	rte_free(lpm->tbl8);
273	rte_free(lpm->rules_tbl);
274	rte_free(lpm);
275	//rte_free(te);
276}
277
278#if 0
279/*
280 * Adds a rule to the rule table.
281 *
282 * NOTE: The rule table is split into 32 groups. Each group contains rules that
283 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
284 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
285 * to refer to depth 1 because even though the depth range is 1 - 32, depths
286 * are stored in the rule table from 0 - 31.
287 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
288 */
289static int32_t
290rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
291	uint32_t next_hop)
292{
293	uint32_t rule_gindex, rule_index, last_rule;
294	int i;
295
296	VERIFY_DEPTH(depth);
297
298	/* Scan through rule group to see if rule already exists. */
299	if (lpm->rule_info[depth - 1].used_rules > 0) {
300
301		/* rule_gindex stands for rule group index. */
302		rule_gindex = lpm->rule_info[depth - 1].first_rule;
303		/* Initialise rule_index to point to start of rule group. */
304		rule_index = rule_gindex;
305		/* Last rule = Last used rule in this rule group. */
306		last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
307
308		for (; rule_index < last_rule; rule_index++) {
309
310			/* If rule already exists update next hop and return. */
311			if (lpm->rules_tbl[rule_index].ip == ip_masked) {
312
313				if (lpm->rules_tbl[rule_index].next_hop
314						== next_hop)
315					return -EEXIST;
316				lpm->rules_tbl[rule_index].next_hop = next_hop;
317
318				return rule_index;
319			}
320		}
321
322		if (rule_index == lpm->max_rules)
323			return -ENOSPC;
324	} else {
325		/* Calculate the position in which the rule will be stored. */
326		rule_index = 0;
327
328		for (i = depth - 1; i > 0; i--) {
329			if (lpm->rule_info[i - 1].used_rules > 0) {
330				rule_index = lpm->rule_info[i - 1].first_rule
331						+ lpm->rule_info[i - 1].used_rules;
332				break;
333			}
334		}
335		if (rule_index == lpm->max_rules)
336			return -ENOSPC;
337
338		lpm->rule_info[depth - 1].first_rule = rule_index;
339	}
340
341	/* Make room for the new rule in the array. */
342	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
343		if (lpm->rule_info[i - 1].first_rule
344				+ lpm->rule_info[i - 1].used_rules == lpm->max_rules)
345			return -ENOSPC;
346
347		if (lpm->rule_info[i - 1].used_rules > 0) {
348			lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349				+ lpm->rule_info[i - 1].used_rules]
350					= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351			lpm->rule_info[i - 1].first_rule++;
352		}
353	}
354
355	/* Add the new rule. */
356	lpm->rules_tbl[rule_index].ip = ip_masked;
357	lpm->rules_tbl[rule_index].next_hop = next_hop;
358
359	/* Increment the used rules counter for this rule group. */
360	lpm->rule_info[depth - 1].used_rules++;
361
362	return rule_index;
363}
364
365/*
366 * Delete a rule from the rule table.
367 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
368 */
369static void
370rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
371{
372	int i;
373
374	VERIFY_DEPTH(depth);
375
376	lpm->rules_tbl[rule_index] =
377			lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378			+ lpm->rule_info[depth - 1].used_rules - 1];
379
380	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
381		if (lpm->rule_info[i].used_rules > 0) {
382			lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383					lpm->rules_tbl[lpm->rule_info[i].first_rule
384						+ lpm->rule_info[i].used_rules - 1];
385			lpm->rule_info[i].first_rule--;
386		}
387	}
388
389	lpm->rule_info[depth - 1].used_rules--;
390}
391
392/*
393 * Finds a rule in rule table.
394 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
395 */
396static int32_t
397rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
398{
399	uint32_t rule_gindex, last_rule, rule_index;
400
401	VERIFY_DEPTH(depth);
402
403	rule_gindex = lpm->rule_info[depth - 1].first_rule;
404	last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
405
406	/* Scan used rules at given depth to find rule. */
407	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
408		/* If rule is found return the rule index. */
409		if (lpm->rules_tbl[rule_index].ip == ip_masked)
410			return rule_index;
411	}
412
413	/* If rule is not found return -EINVAL. */
414	return -EINVAL;
415}
416#endif
417
418/*
419 * Find, clean and allocate a tbl8.
420 */
421static int32_t
422tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
423{
424	uint32_t group_idx; /* tbl8 group index. */
425	struct rte_lpm_tbl_entry *tbl8_entry;
426
427	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
428	for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
429		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
430		/* If a free tbl8 group is found clean it and set as VALID. */
431		if (!tbl8_entry->valid_group) {
432			struct rte_lpm_tbl_entry new_tbl8_entry = {
433				.next_hop = 0,
434				.valid = INVALID,
435				.depth = 0,
436				.valid_group = VALID,
437			};
438
439			memset(&tbl8_entry[0], 0,
440					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
441					sizeof(tbl8_entry[0]));
442
443			__atomic_store(tbl8_entry, &new_tbl8_entry,
444					__ATOMIC_RELAXED);
445
446			/* Return group index for allocated tbl8 group. */
447			return group_idx;
448		}
449	}
450
451	/* If there are no tbl8 groups free then return error. */
452	return -ENOSPC;
453}
454
455static void
456tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
457{
458	/* Set tbl8 group invalid*/
459	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
460
461	__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
462			__ATOMIC_RELAXED);
463}
464
465static __rte_noinline int32_t
466add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
467		uint32_t next_hop)
468{
469#define group_idx next_hop
470	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
471
472	/* Calculate the index into Table24. */
473	tbl24_index = ip >> 8;
474	tbl24_range = depth_to_range(depth);
475
476	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
477		/*
478		 * For invalid OR valid and non-extended tbl 24 entries set
479		 * entry.
480		 */
481		if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
482				lpm->tbl24[i].depth <= depth)) {
483
484			struct rte_lpm_tbl_entry new_tbl24_entry = {
485				.next_hop = next_hop,
486				.valid = VALID,
487				.valid_group = 0,
488				.depth = depth,
489			};
490
491			/* Setting tbl24 entry in one go to avoid race
492			 * conditions
493			 */
494			__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
495					__ATOMIC_RELEASE);
496
497			continue;
498		}
499
500		if (lpm->tbl24[i].valid_group == 1) {
501			/* If tbl24 entry is valid and extended calculate the
502			 *  index into tbl8.
503			 */
504			tbl8_index = lpm->tbl24[i].group_idx *
505					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
506			tbl8_group_end = tbl8_index +
507					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
508
509			for (j = tbl8_index; j < tbl8_group_end; j++) {
510				if (!lpm->tbl8[j].valid ||
511						lpm->tbl8[j].depth <= depth) {
512					struct rte_lpm_tbl_entry
513						new_tbl8_entry = {
514						.valid = VALID,
515						.valid_group = VALID,
516						.depth = depth,
517						.next_hop = next_hop,
518					};
519
520					/*
521					 * Setting tbl8 entry in one go to avoid
522					 * race conditions
523					 */
524					__atomic_store(&lpm->tbl8[j],
525						&new_tbl8_entry,
526						__ATOMIC_RELAXED);
527
528					continue;
529				}
530			}
531		}
532	}
533#undef group_idx
534	return 0;
535}
536
537static __rte_noinline int32_t
538add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
539		uint32_t next_hop)
540{
541#define group_idx next_hop
542	uint32_t tbl24_index;
543	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
544		tbl8_range, i;
545
546	tbl24_index = (ip_masked >> 8);
547	tbl8_range = depth_to_range(depth);
548
549	if (!lpm->tbl24[tbl24_index].valid) {
550		/* Search for a free tbl8 group. */
551		tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
552
553		/* Check tbl8 allocation was successful. */
554		if (tbl8_group_index < 0) {
555			return tbl8_group_index;
556		}
557
558		/* Find index into tbl8 and range. */
559		tbl8_index = (tbl8_group_index *
560				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
561				(ip_masked & 0xFF);
562
563		/* Set tbl8 entry. */
564		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
565			struct rte_lpm_tbl_entry new_tbl8_entry = {
566				.valid = VALID,
567				.depth = depth,
568				.valid_group = lpm->tbl8[i].valid_group,
569				.next_hop = next_hop,
570			};
571			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
572					__ATOMIC_RELAXED);
573		}
574
575		/*
576		 * Update tbl24 entry to point to new tbl8 entry. Note: The
577		 * ext_flag and tbl8_index need to be updated simultaneously,
578		 * so assign whole structure in one go
579		 */
580
581		struct rte_lpm_tbl_entry new_tbl24_entry = {
582			.group_idx = tbl8_group_index,
583			.valid = VALID,
584			.valid_group = 1,
585			.depth = 0,
586		};
587
588		/* The tbl24 entry must be written only after the
589		 * tbl8 entries are written.
590		 */
591		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
592				__ATOMIC_RELEASE);
593
594	} /* If valid entry but not extended calculate the index into Table8. */
595	else if (lpm->tbl24[tbl24_index].valid_group == 0) {
596		/* Search for free tbl8 group. */
597		tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
598
599		if (tbl8_group_index < 0) {
600			return tbl8_group_index;
601		}
602
603		tbl8_group_start = tbl8_group_index *
604				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
605		tbl8_group_end = tbl8_group_start +
606				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
607
608		/* Populate new tbl8 with tbl24 value. */
609		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
610			struct rte_lpm_tbl_entry new_tbl8_entry = {
611				.valid = VALID,
612				.depth = lpm->tbl24[tbl24_index].depth,
613				.valid_group = lpm->tbl8[i].valid_group,
614				.next_hop = lpm->tbl24[tbl24_index].next_hop,
615			};
616			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
617					__ATOMIC_RELAXED);
618		}
619
620		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
621
622		/* Insert new rule into the tbl8 entry. */
623		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
624			struct rte_lpm_tbl_entry new_tbl8_entry = {
625				.valid = VALID,
626				.depth = depth,
627				.valid_group = lpm->tbl8[i].valid_group,
628				.next_hop = next_hop,
629			};
630			__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
631					__ATOMIC_RELAXED);
632		}
633
634		/*
635		 * Update tbl24 entry to point to new tbl8 entry. Note: The
636		 * ext_flag and tbl8_index need to be updated simultaneously,
637		 * so assign whole structure in one go.
638		 */
639
640		struct rte_lpm_tbl_entry new_tbl24_entry = {
641				.group_idx = tbl8_group_index,
642				.valid = VALID,
643				.valid_group = 1,
644				.depth = 0,
645		};
646
647		/* The tbl24 entry must be written only after the
648		 * tbl8 entries are written.
649		 */
650		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
651				__ATOMIC_RELEASE);
652
653	} else { /*
654		* If it is valid, extended entry calculate the index into tbl8.
655		*/
656		tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
657		tbl8_group_start = tbl8_group_index *
658				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
659		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
660
661		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
662
663			if (!lpm->tbl8[i].valid ||
664					lpm->tbl8[i].depth <= depth) {
665				struct rte_lpm_tbl_entry new_tbl8_entry = {
666					.valid = VALID,
667					.depth = depth,
668					.next_hop = next_hop,
669					.valid_group = lpm->tbl8[i].valid_group,
670				};
671
672				/*
673				 * Setting tbl8 entry in one go to avoid race
674				 * condition
675				 */
676				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
677						__ATOMIC_RELAXED);
678
679				continue;
680			}
681		}
682	}
683#undef group_idx
684	return 0;
685}
686
687/*
688 * Add a route
689 */
690int
691rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
692		uint32_t next_hop)
693{
694	int32_t status = 0;
695	uint32_t ip_masked;
696
697	/* Check user arguments. */
698	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
699		return -EINVAL;
700
701	ip_masked = ip & depth_to_mask(depth);
702
703#if 0
704	/* Add the rule to the rule table. */
705	rule_index = rule_add(lpm, ip_masked, depth, next_hop);
706
707	/* Skip table entries update if The rule is the same as
708	 * the rule in the rules table.
709	 */
710	if (rule_index == -EEXIST)
711		return 0;
712
713	/* If the is no space available for new rule return error. */
714	if (rule_index < 0) {
715		return rule_index;
716	}
717#endif
718
719	if (depth <= MAX_DEPTH_TBL24) {
720		status = add_depth_small(lpm, ip_masked, depth, next_hop);
721	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
722		status = add_depth_big(lpm, ip_masked, depth, next_hop);
723
724		/*
725		 * If add fails due to exhaustion of tbl8 extensions delete
726		 * rule that was added to rule table.
727		 */
728		if (status < 0) {
729			//rule_delete(lpm, rule_index, depth);
730
731			return status;
732		}
733	}
734
735	return 0;
736}
737
738#if 0
739/*
740 * Look for a rule in the high-level rules table
741 */
742int
743rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
744uint32_t *next_hop)
745{
746	uint32_t ip_masked;
747	int32_t rule_index;
748
749	/* Check user arguments. */
750	if ((lpm == NULL) ||
751		(next_hop == NULL) ||
752		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
753		return -EINVAL;
754
755	/* Look for the rule using rule_find. */
756	ip_masked = ip & depth_to_mask(depth);
757	rule_index = rule_find(lpm, ip_masked, depth);
758
759	if (rule_index >= 0) {
760		*next_hop = lpm->rules_tbl[rule_index].next_hop;
761		return 1;
762	}
763
764	/* If rule is not found return 0. */
765	return 0;
766}
767
768static int32_t
769find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
770		uint8_t *sub_rule_depth)
771{
772	int32_t rule_index;
773	uint32_t ip_masked;
774	uint8_t prev_depth;
775
776	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
777		ip_masked = ip & depth_to_mask(prev_depth);
778
779		rule_index = rule_find(lpm, ip_masked, prev_depth);
780
781		if (rule_index >= 0) {
782			*sub_rule_depth = prev_depth;
783			return rule_index;
784		}
785	}
786
787	return -1;
788}
789#endif
790
791static int32_t
792delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
793	uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
794{
795#define group_idx next_hop
796	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
797
798	/* Calculate the range and index into Table24. */
799	tbl24_range = depth_to_range(depth);
800	tbl24_index = (ip_masked >> 8);
801	struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
802
803	/*
804	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
805	 * and a positive number indicates a sub_rule_index.
806	 */
807	if (sub_rule_nhop == 0) {
808		/*
809		 * If no replacement rule exists then invalidate entries
810		 * associated with this rule.
811		 */
812		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
813
814			if (lpm->tbl24[i].valid_group == 0 &&
815					lpm->tbl24[i].depth <= depth) {
816				__atomic_store(&lpm->tbl24[i],
817					&zero_tbl24_entry, __ATOMIC_RELEASE);
818			} else if (lpm->tbl24[i].valid_group == 1) {
819				/*
820				 * If TBL24 entry is extended, then there has
821				 * to be a rule with depth >= 25 in the
822				 * associated TBL8 group.
823				 */
824
825				tbl8_group_index = lpm->tbl24[i].group_idx;
826				tbl8_index = tbl8_group_index *
827						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
828
829				for (j = tbl8_index; j < (tbl8_index +
830					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
831
832					if (lpm->tbl8[j].depth <= depth)
833						lpm->tbl8[j].valid = INVALID;
834				}
835			}
836		}
837	} else {
838		/*
839		 * If a replacement rule exists then modify entries
840		 * associated with this rule.
841		 */
842
843		struct rte_lpm_tbl_entry new_tbl24_entry = {
844			.next_hop = sub_rule_nhop,
845			.valid = VALID,
846			.valid_group = 0,
847			.depth = sub_rule_depth,
848		};
849
850		struct rte_lpm_tbl_entry new_tbl8_entry = {
851			.valid = VALID,
852			.valid_group = VALID,
853			.depth = sub_rule_depth,
854			.next_hop = sub_rule_nhop,
855		};
856
857		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
858
859			if (lpm->tbl24[i].valid_group == 0 &&
860					lpm->tbl24[i].depth <= depth) {
861				__atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
862						__ATOMIC_RELEASE);
863			} else  if (lpm->tbl24[i].valid_group == 1) {
864				/*
865				 * If TBL24 entry is extended, then there has
866				 * to be a rule with depth >= 25 in the
867				 * associated TBL8 group.
868				 */
869
870				tbl8_group_index = lpm->tbl24[i].group_idx;
871				tbl8_index = tbl8_group_index *
872						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
873
874				for (j = tbl8_index; j < (tbl8_index +
875					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
876
877					if (lpm->tbl8[j].depth <= depth)
878						__atomic_store(&lpm->tbl8[j],
879							&new_tbl8_entry,
880							__ATOMIC_RELAXED);
881				}
882			}
883		}
884	}
885#undef group_idx
886	return 0;
887}
888
889/*
890 * Checks if table 8 group can be recycled.
891 *
892 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
893 * Return of -EINVAL means tbl8 is empty and thus can be recycled
894 * Return of value > -1 means tbl8 is in use but has all the same values and
895 * thus can be recycled
896 */
897static int32_t
898tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
899		uint32_t tbl8_group_start)
900{
901	uint32_t tbl8_group_end, i;
902	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
903
904	/*
905	 * Check the first entry of the given tbl8. If it is invalid we know
906	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
907	 *  (As they would affect all entries in a tbl8) and thus this table
908	 *  can not be recycled.
909	 */
910	if (tbl8[tbl8_group_start].valid) {
911		/*
912		 * If first entry is valid check if the depth is less than 24
913		 * and if so check the rest of the entries to verify that they
914		 * are all of this depth.
915		 */
916		if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
917			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
918					i++) {
919
920				if (tbl8[i].depth !=
921						tbl8[tbl8_group_start].depth) {
922
923					return -EEXIST;
924				}
925			}
926			/* If all entries are the same return the tb8 index */
927			return tbl8_group_start;
928		}
929
930		return -EEXIST;
931	}
932	/*
933	 * If the first entry is invalid check if the rest of the entries in
934	 * the tbl8 are invalid.
935	 */
936	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
937		if (tbl8[i].valid)
938			return -EEXIST;
939	}
940	/* If no valid entries are found then return -EINVAL. */
941	return -EINVAL;
942}
943
944static int32_t
945delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
946	uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
947{
948#define group_idx next_hop
949	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
950			tbl8_range, i;
951	int32_t tbl8_recycle_index;
952
953	/*
954	 * Calculate the index into tbl24 and range. Note: All depths larger
955	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
956	 */
957	tbl24_index = ip_masked >> 8;
958
959	/* Calculate the index into tbl8 and range. */
960	tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
961	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
962	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
963	tbl8_range = depth_to_range(depth);
964
965	if (sub_rule_nhop == 0) {
966		/*
967		 * Loop through the range of entries on tbl8 for which the
968		 * rule_to_delete must be removed or modified.
969		 */
970		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
971			if (lpm->tbl8[i].depth <= depth)
972				lpm->tbl8[i].valid = INVALID;
973		}
974	} else {
975		/* Set new tbl8 entry. */
976		struct rte_lpm_tbl_entry new_tbl8_entry = {
977			.valid = VALID,
978			.depth = sub_rule_depth,
979			.valid_group = lpm->tbl8[tbl8_group_start].valid_group,
980			.next_hop = sub_rule_nhop,
981		};
982
983		/*
984		 * Loop through the range of entries on tbl8 for which the
985		 * rule_to_delete must be modified.
986		 */
987		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
988			if (lpm->tbl8[i].depth <= depth)
989				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
990						__ATOMIC_RELAXED);
991		}
992	}
993
994	/*
995	 * Check if there are any valid entries in this tbl8 group. If all
996	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
997	 * associated tbl24 entry.
998	 */
999
1000	tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1001
1002	if (tbl8_recycle_index == -EINVAL) {
1003		/* Set tbl24 before freeing tbl8 to avoid race condition.
1004		 * Prevent the free of the tbl8 group from hoisting.
1005		 */
1006		lpm->tbl24[tbl24_index].valid = 0;
1007		__atomic_thread_fence(__ATOMIC_RELEASE);
1008		tbl8_free(lpm->tbl8, tbl8_group_start);
1009	} else if (tbl8_recycle_index > -1) {
1010		/* Update tbl24 entry. */
1011		struct rte_lpm_tbl_entry new_tbl24_entry = {
1012			.next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1013			.valid = VALID,
1014			.valid_group = 0,
1015			.depth = lpm->tbl8[tbl8_recycle_index].depth,
1016		};
1017
1018		/* Set tbl24 before freeing tbl8 to avoid race condition.
1019		 * Prevent the free of the tbl8 group from hoisting.
1020		 */
1021		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1022				__ATOMIC_RELAXED);
1023		__atomic_thread_fence(__ATOMIC_RELEASE);
1024		tbl8_free(lpm->tbl8, tbl8_group_start);
1025	}
1026#undef group_idx
1027	return 0;
1028}
1029
1030/*
1031 * Deletes a rule
1032 */
1033int
1034rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1035	uint8_t sub_rule_depth, uint32_t sub_rule_nhop)
1036{
1037	//int32_t rule_to_delete_index;
1038	uint32_t ip_masked;
1039	//uint8_t sub_rule_depth;
1040	/*
1041	 * Check input arguments. Note: IP must be a positive integer of 32
1042	 * bits in length therefore it need not be checked.
1043	 */
1044	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1045		return -EINVAL;
1046	}
1047
1048	ip_masked = ip & depth_to_mask(depth);
1049
1050#if 0
1051	/*
1052	 * Find the index of the input rule, that needs to be deleted, in the
1053	 * rule table.
1054	 */
1055	rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1056
1057	/*
1058	 * Check if rule_to_delete_index was found. If no rule was found the
1059	 * function rule_find returns -EINVAL.
1060	 */
1061	if (rule_to_delete_index < 0)
1062		return -EINVAL;
1063
1064	/* Delete the rule from the rule table. */
1065	rule_delete(lpm, rule_to_delete_index, depth);
1066#endif
1067
1068	/*
1069	 * Find rule to replace the rule_to_delete. If there is no rule to
1070	 * replace the rule_to_delete we return -1 and invalidate the table
1071	 * entries associated with this rule.
1072	 */
1073	//sub_rule_depth = *psub_rule_depth;
1074	//sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1075
1076	/*
1077	 * If the input depth value is less than 25 use function
1078	 * delete_depth_small otherwise use delete_depth_big.
1079	 */
1080	if (depth <= MAX_DEPTH_TBL24) {
1081		return delete_depth_small(lpm, ip_masked, depth,
1082				sub_rule_nhop, sub_rule_depth);
1083	} else { /* If depth > MAX_DEPTH_TBL24 */
1084		return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop,
1085				sub_rule_depth);
1086	}
1087}
1088
1089/*
1090 * Delete all rules from the LPM table.
1091 */
1092void
1093rte_lpm_delete_all(struct rte_lpm *lpm)
1094{
1095	/* Zero rule information. */
1096	memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1097
1098	/* Zero tbl24. */
1099	memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1100
1101	/* Zero tbl8. */
1102	memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1103			* RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1104
1105	/* Delete all rules form the rules table. */
1106	memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1107}
1108