1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4#include <linux/hash.h>
5#include <linux/hashtable.h>
6#include <linux/jhash.h>
7#include <linux/math64.h>
8#include <linux/vmalloc.h>
9#include <net/pkt_cls.h>
10
11#include "cmsg.h"
12#include "conntrack.h"
13#include "main.h"
14#include "../nfp_app.h"
15
16struct nfp_mask_id_table {
17	struct hlist_node link;
18	u32 hash_key;
19	u32 ref_cnt;
20	u8 mask_id;
21};
22
23struct nfp_fl_flow_table_cmp_arg {
24	struct net_device *netdev;
25	unsigned long cookie;
26};
27
28struct nfp_fl_stats_ctx_to_flow {
29	struct rhash_head ht_node;
30	u32 stats_cxt;
31	struct nfp_fl_payload *flow;
32};
33
34static const struct rhashtable_params stats_ctx_table_params = {
35	.key_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
36	.head_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
37	.key_len	= sizeof(u32),
38};
39
40static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
41{
42	struct nfp_flower_priv *priv = app->priv;
43	struct circ_buf *ring;
44
45	ring = &priv->stats_ids.free_list;
46	/* Check if buffer is full, stats_ring_size must be power of 2 */
47	if (!CIRC_SPACE(ring->head, ring->tail, priv->stats_ring_size))
48		return -ENOBUFS;
49
50	/* Each increment of head represents size of NFP_FL_STATS_ELEM_RS */
51	memcpy(&ring->buf[ring->head * NFP_FL_STATS_ELEM_RS],
52	       &stats_context_id, NFP_FL_STATS_ELEM_RS);
53	ring->head = (ring->head + 1) & (priv->stats_ring_size - 1);
54
55	return 0;
56}
57
58static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
59{
60	struct nfp_flower_priv *priv = app->priv;
61	u32 freed_stats_id, temp_stats_id;
62	struct circ_buf *ring;
63
64	ring = &priv->stats_ids.free_list;
65	freed_stats_id = priv->stats_ring_size;
66	/* Check for unallocated entries first. */
67	if (priv->stats_ids.init_unalloc > 0) {
68		*stats_context_id =
69			FIELD_PREP(NFP_FL_STAT_ID_STAT,
70				   priv->stats_ids.init_unalloc - 1) |
71			FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
72				   priv->active_mem_unit);
73
74		if (++priv->active_mem_unit == priv->total_mem_units) {
75			priv->stats_ids.init_unalloc--;
76			priv->active_mem_unit = 0;
77		}
78
79		return 0;
80	}
81
82	/* Check if buffer is empty. */
83	if (ring->head == ring->tail) {
84		*stats_context_id = freed_stats_id;
85		return -ENOENT;
86	}
87
88	/* Each increment of tail represents size of NFP_FL_STATS_ELEM_RS */
89	memcpy(&temp_stats_id, &ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS],
90	       NFP_FL_STATS_ELEM_RS);
91	*stats_context_id = temp_stats_id;
92	memcpy(&ring->buf[ring->tail * NFP_FL_STATS_ELEM_RS], &freed_stats_id,
93	       NFP_FL_STATS_ELEM_RS);
94	/* stats_ring_size must be power of 2 */
95	ring->tail = (ring->tail + 1) & (priv->stats_ring_size - 1);
96
97	return 0;
98}
99
100/* Must be called with either RTNL or rcu_read_lock */
101struct nfp_fl_payload *
102nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
103			   struct net_device *netdev)
104{
105	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
106	struct nfp_flower_priv *priv = app->priv;
107
108	flower_cmp_arg.netdev = netdev;
109	flower_cmp_arg.cookie = tc_flower_cookie;
110
111	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
112				      nfp_flower_table_params);
113}
114
115void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
116{
117	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
118	struct nfp_flower_priv *priv = app->priv;
119	struct nfp_fl_stats_frame *stats;
120	unsigned char *msg;
121	u32 ctx_id;
122	int i;
123
124	msg = nfp_flower_cmsg_get_data(skb);
125
126	spin_lock(&priv->stats_lock);
127	for (i = 0; i < msg_len / sizeof(*stats); i++) {
128		stats = (struct nfp_fl_stats_frame *)msg + i;
129		ctx_id = be32_to_cpu(stats->stats_con_id);
130		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
131		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
132		priv->stats[ctx_id].used = jiffies;
133	}
134	spin_unlock(&priv->stats_lock);
135}
136
137static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
138{
139	struct nfp_flower_priv *priv = app->priv;
140	struct circ_buf *ring;
141
142	ring = &priv->mask_ids.mask_id_free_list;
143	/* Checking if buffer is full,
144	 * NFP_FLOWER_MASK_ENTRY_RS must be power of 2
145	 */
146	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
147		return -ENOBUFS;
148
149	/* Each increment of head represents size of
150	 * NFP_FLOWER_MASK_ELEMENT_RS
151	 */
152	memcpy(&ring->buf[ring->head * NFP_FLOWER_MASK_ELEMENT_RS], &mask_id,
153	       NFP_FLOWER_MASK_ELEMENT_RS);
154	ring->head = (ring->head + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
155
156	priv->mask_ids.last_used[mask_id] = ktime_get();
157
158	return 0;
159}
160
161static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
162{
163	struct nfp_flower_priv *priv = app->priv;
164	ktime_t reuse_timeout;
165	struct circ_buf *ring;
166	u8 temp_id, freed_id;
167
168	ring = &priv->mask_ids.mask_id_free_list;
169	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
170	/* Checking for unallocated entries first. */
171	if (priv->mask_ids.init_unallocated > 0) {
172		*mask_id = priv->mask_ids.init_unallocated;
173		priv->mask_ids.init_unallocated--;
174		return 0;
175	}
176
177	/* Checking if buffer is empty. */
178	if (ring->head == ring->tail)
179		goto err_not_found;
180
181	/* Each increment of tail represents size of
182	 * NFP_FLOWER_MASK_ELEMENT_RS
183	 */
184	memcpy(&temp_id, &ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS],
185	       NFP_FLOWER_MASK_ELEMENT_RS);
186	*mask_id = temp_id;
187
188	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
189				     NFP_FL_MASK_REUSE_TIME_NS);
190
191	if (ktime_before(ktime_get(), reuse_timeout))
192		goto err_not_found;
193
194	memcpy(&ring->buf[ring->tail * NFP_FLOWER_MASK_ELEMENT_RS], &freed_id,
195	       NFP_FLOWER_MASK_ELEMENT_RS);
196	/* NFP_FLOWER_MASK_ENTRY_RS must be power of 2 */
197	ring->tail = (ring->tail + 1) & (NFP_FLOWER_MASK_ENTRY_RS - 1);
198
199	return 0;
200
201err_not_found:
202	*mask_id = freed_id;
203	return -ENOENT;
204}
205
206static int
207nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
208{
209	struct nfp_flower_priv *priv = app->priv;
210	struct nfp_mask_id_table *mask_entry;
211	unsigned long hash_key;
212	u8 mask_id;
213
214	if (nfp_mask_alloc(app, &mask_id))
215		return -ENOENT;
216
217	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
218	if (!mask_entry) {
219		nfp_release_mask_id(app, mask_id);
220		return -ENOMEM;
221	}
222
223	INIT_HLIST_NODE(&mask_entry->link);
224	mask_entry->mask_id = mask_id;
225	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
226	mask_entry->hash_key = hash_key;
227	mask_entry->ref_cnt = 1;
228	hash_add(priv->mask_table, &mask_entry->link, hash_key);
229
230	return mask_id;
231}
232
233static struct nfp_mask_id_table *
234nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
235{
236	struct nfp_flower_priv *priv = app->priv;
237	struct nfp_mask_id_table *mask_entry;
238	unsigned long hash_key;
239
240	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
241
242	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
243		if (mask_entry->hash_key == hash_key)
244			return mask_entry;
245
246	return NULL;
247}
248
249static int
250nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
251{
252	struct nfp_mask_id_table *mask_entry;
253
254	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
255	if (!mask_entry)
256		return -ENOENT;
257
258	mask_entry->ref_cnt++;
259
260	/* Casting u8 to int for later use. */
261	return mask_entry->mask_id;
262}
263
264static bool
265nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
266		   u8 *meta_flags, u8 *mask_id)
267{
268	int id;
269
270	id = nfp_find_in_mask_table(app, mask_data, mask_len);
271	if (id < 0) {
272		id = nfp_add_mask_table(app, mask_data, mask_len);
273		if (id < 0)
274			return false;
275		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
276	}
277	*mask_id = id;
278
279	return true;
280}
281
282static bool
283nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
284		      u8 *meta_flags, u8 *mask_id)
285{
286	struct nfp_mask_id_table *mask_entry;
287
288	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
289	if (!mask_entry)
290		return false;
291
292	*mask_id = mask_entry->mask_id;
293	mask_entry->ref_cnt--;
294	if (!mask_entry->ref_cnt) {
295		hash_del(&mask_entry->link);
296		nfp_release_mask_id(app, *mask_id);
297		kfree(mask_entry);
298		if (meta_flags)
299			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
300	}
301
302	return true;
303}
304
305int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
306			      struct nfp_fl_payload *nfp_flow,
307			      struct net_device *netdev,
308			      struct netlink_ext_ack *extack)
309{
310	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
311	struct nfp_flower_priv *priv = app->priv;
312	struct nfp_fl_payload *check_entry;
313	u8 new_mask_id;
314	u32 stats_cxt;
315	int err;
316
317	err = nfp_get_stats_entry(app, &stats_cxt);
318	if (err) {
319		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
320		return err;
321	}
322
323	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
324	nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
325	nfp_flow->ingress_dev = netdev;
326
327	ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
328	if (!ctx_entry) {
329		err = -ENOMEM;
330		goto err_release_stats;
331	}
332
333	ctx_entry->stats_cxt = stats_cxt;
334	ctx_entry->flow = nfp_flow;
335
336	if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
337				   stats_ctx_table_params)) {
338		err = -ENOMEM;
339		goto err_free_ctx_entry;
340	}
341
342	/* Do not allocate a mask-id for pre_tun_rules. These flows are used to
343	 * configure the pre_tun table and are never actually send to the
344	 * firmware as an add-flow message. This causes the mask-id allocation
345	 * on the firmware to get out of sync if allocated here.
346	 */
347	new_mask_id = 0;
348	if (!nfp_flow->pre_tun_rule.dev &&
349	    !nfp_check_mask_add(app, nfp_flow->mask_data,
350				nfp_flow->meta.mask_len,
351				&nfp_flow->meta.flags, &new_mask_id)) {
352		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
353		err = -ENOENT;
354		goto err_remove_rhash;
355	}
356
357	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
358	priv->flower_version++;
359
360	/* Update flow payload with mask ids. */
361	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
362	priv->stats[stats_cxt].pkts = 0;
363	priv->stats[stats_cxt].bytes = 0;
364	priv->stats[stats_cxt].used = jiffies;
365
366	check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
367	if (check_entry) {
368		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
369		err = -EEXIST;
370		goto err_remove_mask;
371	}
372
373	return 0;
374
375err_remove_mask:
376	if (!nfp_flow->pre_tun_rule.dev)
377		nfp_check_mask_remove(app, nfp_flow->mask_data,
378				      nfp_flow->meta.mask_len,
379				      NULL, &new_mask_id);
380err_remove_rhash:
381	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
382					    &ctx_entry->ht_node,
383					    stats_ctx_table_params));
384err_free_ctx_entry:
385	kfree(ctx_entry);
386err_release_stats:
387	nfp_release_stats_entry(app, stats_cxt);
388
389	return err;
390}
391
392void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
393				struct nfp_fl_payload *nfp_flow)
394{
395	nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
396	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
397	priv->flower_version++;
398}
399
400int nfp_modify_flow_metadata(struct nfp_app *app,
401			     struct nfp_fl_payload *nfp_flow)
402{
403	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
404	struct nfp_flower_priv *priv = app->priv;
405	u8 new_mask_id = 0;
406	u32 temp_ctx_id;
407
408	__nfp_modify_flow_metadata(priv, nfp_flow);
409
410	if (!nfp_flow->pre_tun_rule.dev)
411		nfp_check_mask_remove(app, nfp_flow->mask_data,
412				      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
413				      &new_mask_id);
414
415	/* Update flow payload with mask ids. */
416	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
417
418	/* Release the stats ctx id and ctx to flow table entry. */
419	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
420
421	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
422					   stats_ctx_table_params);
423	if (!ctx_entry)
424		return -ENOENT;
425
426	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
427					    &ctx_entry->ht_node,
428					    stats_ctx_table_params));
429	kfree(ctx_entry);
430
431	return nfp_release_stats_entry(app, temp_ctx_id);
432}
433
434struct nfp_fl_payload *
435nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
436{
437	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
438	struct nfp_flower_priv *priv = app->priv;
439
440	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
441					   stats_ctx_table_params);
442	if (!ctx_entry)
443		return NULL;
444
445	return ctx_entry->flow;
446}
447
448static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
449			    const void *obj)
450{
451	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
452	const struct nfp_fl_payload *flow_entry = obj;
453
454	if (flow_entry->ingress_dev == cmp_arg->netdev)
455		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
456
457	return 1;
458}
459
460static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
461{
462	const struct nfp_fl_payload *flower_entry = data;
463
464	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
465		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
466		      seed);
467}
468
469static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
470{
471	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
472
473	return jhash2((u32 *)&cmp_arg->cookie,
474		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
475}
476
477const struct rhashtable_params nfp_flower_table_params = {
478	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
479	.hashfn			= nfp_fl_key_hashfn,
480	.obj_cmpfn		= nfp_fl_obj_cmpfn,
481	.obj_hashfn		= nfp_fl_obj_hashfn,
482	.automatic_shrinking	= true,
483};
484
485const struct rhashtable_params merge_table_params = {
486	.key_offset	= offsetof(struct nfp_merge_info, parent_ctx),
487	.head_offset	= offsetof(struct nfp_merge_info, ht_node),
488	.key_len	= sizeof(u64),
489};
490
491const struct rhashtable_params nfp_zone_table_params = {
492	.head_offset		= offsetof(struct nfp_fl_ct_zone_entry, hash_node),
493	.key_len		= sizeof(u16),
494	.key_offset		= offsetof(struct nfp_fl_ct_zone_entry, zone),
495	.automatic_shrinking	= false,
496};
497
498const struct rhashtable_params nfp_ct_map_params = {
499	.head_offset		= offsetof(struct nfp_fl_ct_map_entry, hash_node),
500	.key_len		= sizeof(unsigned long),
501	.key_offset		= offsetof(struct nfp_fl_ct_map_entry, cookie),
502	.automatic_shrinking	= true,
503};
504
505const struct rhashtable_params neigh_table_params = {
506	.key_offset	= offsetof(struct nfp_neigh_entry, neigh_cookie),
507	.head_offset	= offsetof(struct nfp_neigh_entry, ht_node),
508	.key_len	= sizeof(unsigned long),
509};
510
511int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
512			     unsigned int host_num_mems)
513{
514	struct nfp_flower_priv *priv = app->priv;
515	int err, stats_size;
516
517	hash_init(priv->mask_table);
518
519	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
520	if (err)
521		return err;
522
523	err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
524	if (err)
525		goto err_free_flow_table;
526
527	err = rhashtable_init(&priv->merge_table, &merge_table_params);
528	if (err)
529		goto err_free_stats_ctx_table;
530
531	mutex_init(&priv->nfp_fl_lock);
532
533	err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
534	if (err)
535		goto err_free_merge_table;
536
537	err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
538	if (err)
539		goto err_free_ct_zone_table;
540
541	err = rhashtable_init(&priv->neigh_table, &neigh_table_params);
542	if (err)
543		goto err_free_ct_map_table;
544
545	INIT_LIST_HEAD(&priv->predt_list);
546
547	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
548
549	/* Init ring buffer and unallocated mask_ids. */
550	priv->mask_ids.mask_id_free_list.buf =
551		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
552			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
553	if (!priv->mask_ids.mask_id_free_list.buf)
554		goto err_free_neigh_table;
555
556	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
557
558	/* Init timestamps for mask id*/
559	priv->mask_ids.last_used =
560		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
561			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
562	if (!priv->mask_ids.last_used)
563		goto err_free_mask_id;
564
565	/* Init ring buffer and unallocated stats_ids. */
566	priv->stats_ids.free_list.buf =
567		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
568				   priv->stats_ring_size));
569	if (!priv->stats_ids.free_list.buf)
570		goto err_free_last_used;
571
572	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
573
574	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
575		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
576	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
577				     GFP_KERNEL);
578	if (!priv->stats)
579		goto err_free_ring_buf;
580
581	spin_lock_init(&priv->stats_lock);
582	spin_lock_init(&priv->predt_lock);
583
584	return 0;
585
586err_free_ring_buf:
587	vfree(priv->stats_ids.free_list.buf);
588err_free_last_used:
589	kfree(priv->mask_ids.last_used);
590err_free_mask_id:
591	kfree(priv->mask_ids.mask_id_free_list.buf);
592err_free_neigh_table:
593	rhashtable_destroy(&priv->neigh_table);
594err_free_ct_map_table:
595	rhashtable_destroy(&priv->ct_map_table);
596err_free_ct_zone_table:
597	rhashtable_destroy(&priv->ct_zone_table);
598err_free_merge_table:
599	rhashtable_destroy(&priv->merge_table);
600err_free_stats_ctx_table:
601	rhashtable_destroy(&priv->stats_ctx_table);
602err_free_flow_table:
603	rhashtable_destroy(&priv->flow_table);
604	return -ENOMEM;
605}
606
607static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
608{
609	if (!zt)
610		return;
611
612	if (!list_empty(&zt->pre_ct_list)) {
613		struct rhashtable *m_table = &zt->priv->ct_map_table;
614		struct nfp_fl_ct_flow_entry *entry, *tmp;
615		struct nfp_fl_ct_map_entry *map;
616
617		WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
618		list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
619					 list_node) {
620			map = rhashtable_lookup_fast(m_table,
621						     &entry->cookie,
622						     nfp_ct_map_params);
623			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
624							    &map->hash_node,
625							    nfp_ct_map_params));
626			nfp_fl_ct_clean_flow_entry(entry);
627			kfree(map);
628		}
629	}
630
631	if (!list_empty(&zt->post_ct_list)) {
632		struct rhashtable *m_table = &zt->priv->ct_map_table;
633		struct nfp_fl_ct_flow_entry *entry, *tmp;
634		struct nfp_fl_ct_map_entry *map;
635
636		WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
637		list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
638					 list_node) {
639			map = rhashtable_lookup_fast(m_table,
640						     &entry->cookie,
641						     nfp_ct_map_params);
642			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
643							    &map->hash_node,
644							    nfp_ct_map_params));
645			nfp_fl_ct_clean_flow_entry(entry);
646			kfree(map);
647		}
648	}
649
650	if (zt->nft) {
651		nf_flow_table_offload_del_cb(zt->nft,
652					     nfp_fl_ct_handle_nft_flow,
653					     zt);
654		zt->nft = NULL;
655	}
656
657	if (!list_empty(&zt->nft_flows_list)) {
658		struct rhashtable *m_table = &zt->priv->ct_map_table;
659		struct nfp_fl_ct_flow_entry *entry, *tmp;
660		struct nfp_fl_ct_map_entry *map;
661
662		WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
663		list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
664					 list_node) {
665			map = rhashtable_lookup_fast(m_table,
666						     &entry->cookie,
667						     nfp_ct_map_params);
668			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
669							    &map->hash_node,
670							    nfp_ct_map_params));
671			nfp_fl_ct_clean_flow_entry(entry);
672			kfree(map);
673		}
674	}
675
676	rhashtable_free_and_destroy(&zt->tc_merge_tb,
677				    nfp_check_rhashtable_empty, NULL);
678	rhashtable_free_and_destroy(&zt->nft_merge_tb,
679				    nfp_check_rhashtable_empty, NULL);
680
681	kfree(zt);
682}
683
684static void nfp_free_zone_table_entry(void *ptr, void *arg)
685{
686	struct nfp_fl_ct_zone_entry *zt = ptr;
687
688	nfp_zone_table_entry_destroy(zt);
689}
690
691static void nfp_free_map_table_entry(void *ptr, void *arg)
692{
693	struct nfp_fl_ct_map_entry *map = ptr;
694
695	if (!map)
696		return;
697
698	kfree(map);
699}
700
701void nfp_flower_metadata_cleanup(struct nfp_app *app)
702{
703	struct nfp_flower_priv *priv = app->priv;
704
705	if (!priv)
706		return;
707
708	rhashtable_free_and_destroy(&priv->flow_table,
709				    nfp_check_rhashtable_empty, NULL);
710	rhashtable_free_and_destroy(&priv->stats_ctx_table,
711				    nfp_check_rhashtable_empty, NULL);
712	rhashtable_free_and_destroy(&priv->merge_table,
713				    nfp_check_rhashtable_empty, NULL);
714	rhashtable_free_and_destroy(&priv->ct_zone_table,
715				    nfp_free_zone_table_entry, NULL);
716	nfp_zone_table_entry_destroy(priv->ct_zone_wc);
717
718	rhashtable_free_and_destroy(&priv->ct_map_table,
719				    nfp_free_map_table_entry, NULL);
720	rhashtable_free_and_destroy(&priv->neigh_table,
721				    nfp_check_rhashtable_empty, NULL);
722	kvfree(priv->stats);
723	kfree(priv->mask_ids.mask_id_free_list.buf);
724	kfree(priv->mask_ids.last_used);
725	vfree(priv->stats_ids.free_list.buf);
726}
727