• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/contrib/dpdk_rte_lpm/

Lines Matching defs:lpm

108  * Find an existing lpm table and return a pointer to it.
144 struct rte_lpm *lpm = NULL;
163 mem_size = sizeof(*lpm);
173 lpm = te->data;
174 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
179 lpm = NULL;
194 lpm = rte_zmalloc_socket(mem_name, mem_size,
196 if (lpm == NULL) {
203 lpm->rules_tbl = rte_zmalloc_socket(NULL,
206 if (lpm->rules_tbl == NULL) {
208 rte_free(lpm);
209 lpm = NULL;
215 lpm->tbl8 = rte_zmalloc_socket(NULL,
218 if (lpm->tbl8 == NULL) {
220 rte_free(lpm->rules_tbl);
221 rte_free(lpm);
222 lpm = NULL;
229 lpm->max_rules = config->max_rules;
230 lpm->number_tbl8s = config->number_tbl8s;
231 strlcpy(lpm->name, name, sizeof(lpm->name));
233 //te->data = lpm;
240 return lpm;
247 rte_lpm_free(struct rte_lpm *lpm)
254 if (lpm == NULL)
263 if (te->data == (void *) lpm)
272 rte_free(lpm->tbl8);
273 rte_free(lpm->rules_tbl);
274 rte_free(lpm);
290 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
299 if (lpm->rule_info[depth - 1].used_rules > 0) {
302 rule_gindex = lpm->rule_info[depth - 1].first_rule;
306 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
311 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
313 if (lpm->rules_tbl[rule_index].next_hop
316 lpm->rules_tbl[rule_index].next_hop = next_hop;
322 if (rule_index == lpm->max_rules)
329 if (lpm->rule_info[i - 1].used_rules > 0) {
330 rule_index = lpm->rule_info[i - 1].first_rule
331 + lpm->rule_info[i - 1].used_rules;
335 if (rule_index == lpm->max_rules)
338 lpm->rule_info[depth - 1].first_rule = rule_index;
343 if (lpm->rule_info[i - 1].first_rule
344 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
347 if (lpm->rule_info[i - 1].used_rules > 0) {
348 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349 + lpm->rule_info[i - 1].used_rules]
350 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351 lpm->rule_info[i - 1].first_rule++;
356 lpm->rules_tbl[rule_index].ip = ip_masked;
357 lpm->rules_tbl[rule_index].next_hop = next_hop;
360 lpm->rule_info[depth - 1].used_rules++;
370 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
376 lpm->rules_tbl[rule_index] =
377 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378 + lpm->rule_info[depth - 1].used_rules - 1];
381 if (lpm->rule_info[i].used_rules > 0) {
382 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383 lpm->rules_tbl[lpm->rule_info[i].first_rule
384 + lpm->rule_info[i].used_rules - 1];
385 lpm->rule_info[i].first_rule--;
389 lpm->rule_info[depth - 1].used_rules--;
397 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
403 rule_gindex = lpm->rule_info[depth - 1].first_rule;
404 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
409 if (lpm->rules_tbl[rule_index].ip == ip_masked)
466 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
481 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
482 lpm->tbl24[i].depth <= depth)) {
494 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
500 if (lpm->tbl24[i].valid_group == 1) {
504 tbl8_index = lpm->tbl24[i].group_idx *
510 if (!lpm->tbl8[j].valid ||
511 lpm->tbl8[j].depth <= depth) {
524 __atomic_store(&lpm->tbl8[j],
538 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
549 if (!lpm->tbl24[tbl24_index].valid) {
551 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
568 .valid_group = lpm->tbl8[i].valid_group,
571 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
591 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
595 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
597 tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
612 .depth = lpm->tbl24[tbl24_index].depth,
613 .valid_group = lpm->tbl8[i].valid_group,
614 .next_hop = lpm->tbl24[tbl24_index].next_hop,
616 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
627 .valid_group = lpm->tbl8[i].valid_group,
630 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
650 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
656 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
663 if (!lpm->tbl8[i].valid ||
664 lpm->tbl8[i].depth <= depth) {
669 .valid_group = lpm->tbl8[i].valid_group,
676 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
691 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
698 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
705 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
720 status = add_depth_small(lpm, ip_masked, depth, next_hop);
722 status = add_depth_big(lpm, ip_masked, depth, next_hop);
729 //rule_delete(lpm, rule_index, depth);
743 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
750 if ((lpm == NULL) ||
757 rule_index = rule_find(lpm, ip_masked, depth);
760 *next_hop = lpm->rules_tbl[rule_index].next_hop;
769 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
779 rule_index = rule_find(lpm, ip_masked, prev_depth);
792 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
814 if (lpm->tbl24[i].valid_group == 0 &&
815 lpm->tbl24[i].depth <= depth) {
816 __atomic_store(&lpm->tbl24[i],
818 } else if (lpm->tbl24[i].valid_group == 1) {
825 tbl8_group_index = lpm->tbl24[i].group_idx;
832 if (lpm->tbl8[j].depth <= depth)
833 lpm->tbl8[j].valid = INVALID;
859 if (lpm->tbl24[i].valid_group == 0 &&
860 lpm->tbl24[i].depth <= depth) {
861 __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
863 } else if (lpm->tbl24[i].valid_group == 1) {
870 tbl8_group_index = lpm->tbl24[i].group_idx;
877 if (lpm->tbl8[j].depth <= depth)
878 __atomic_store(&lpm->tbl8[j],
945 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
960 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
971 if (lpm->tbl8[i].depth <= depth)
972 lpm->tbl8[i].valid = INVALID;
979 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
988 if (lpm->tbl8[i].depth <= depth)
989 __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
1000 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1006 lpm->tbl24[tbl24_index].valid = 0;
1008 tbl8_free(lpm->tbl8, tbl8_group_start);
1012 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1015 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1021 __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1024 tbl8_free(lpm->tbl8, tbl8_group_start);
1034 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1044 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1055 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1065 rule_delete(lpm, rule_to_delete_index, depth);
1074 //sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1081 return delete_depth_small(lpm, ip_masked, depth,
1084 return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop,
1093 rte_lpm_delete_all(struct rte_lpm *lpm)
1096 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1099 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1102 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1103 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1106 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);