1
2
3
4#include <linux/hash.h>
5#include <linux/hashtable.h>
6#include <linux/jhash.h>
7#include <linux/math64.h>
8#include <linux/vmalloc.h>
9#include <net/pkt_cls.h>
10
11#include "cmsg.h"
12#include "conntrack.h"
13#include "main.h"
14#include "../nfp_app.h"
15
16struct nfp_mask_id_table {
17 struct hlist_node link;
18 u32 hash_key;
19 u32 ref_cnt;
20 u8 mask_id;
21};
22
23struct nfp_fl_flow_table_cmp_arg {
24 struct net_device *netdev;
25 unsigned long cookie;
26};
27
28struct nfp_fl_stats_ctx_to_flow {
29 struct rhash_head ht_node;
30 u32 stats_cxt;
31 struct nfp_fl_payload *flow;
32};
33
34static const struct rhashtable_params stats_ctx_table_params = {
35 .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
36 .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
37 .key_len = sizeof(u32),
38};
39
40static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
41{
42 struct nfp_flower_priv *priv = app->priv;
43 struct circ_buf *ring;
44
45 ring = &priv->stats_ids.free_list;
46
47 if (!CIRC_SPACE(ring->head, ring->tail,
48 priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
49 NFP_FL_STATS_ELEM_RS + 1))
50 return -ENOBUFS;
51
52 memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
53 ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
54 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
55
56 return 0;
57}
58
59static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
60{
61 struct nfp_flower_priv *priv = app->priv;
62 u32 freed_stats_id, temp_stats_id;
63 struct circ_buf *ring;
64
65 ring = &priv->stats_ids.free_list;
66 freed_stats_id = priv->stats_ring_size;
67
68 if (priv->stats_ids.init_unalloc > 0) {
69 *stats_context_id =
70 FIELD_PREP(NFP_FL_STAT_ID_STAT,
71 priv->stats_ids.init_unalloc - 1) |
72 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
73 priv->active_mem_unit);
74
75 if (++priv->active_mem_unit == priv->total_mem_units) {
76 priv->stats_ids.init_unalloc--;
77 priv->active_mem_unit = 0;
78 }
79
80 return 0;
81 }
82
83
84 if (ring->head == ring->tail) {
85 *stats_context_id = freed_stats_id;
86 return -ENOENT;
87 }
88
89 memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
90 *stats_context_id = temp_stats_id;
91 memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
92 ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
93 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
94
95 return 0;
96}
97
98
99struct nfp_fl_payload *
100nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
101 struct net_device *netdev)
102{
103 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
104 struct nfp_flower_priv *priv = app->priv;
105
106 flower_cmp_arg.netdev = netdev;
107 flower_cmp_arg.cookie = tc_flower_cookie;
108
109 return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
110 nfp_flower_table_params);
111}
112
113void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
114{
115 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
116 struct nfp_flower_priv *priv = app->priv;
117 struct nfp_fl_stats_frame *stats;
118 unsigned char *msg;
119 u32 ctx_id;
120 int i;
121
122 msg = nfp_flower_cmsg_get_data(skb);
123
124 spin_lock(&priv->stats_lock);
125 for (i = 0; i < msg_len / sizeof(*stats); i++) {
126 stats = (struct nfp_fl_stats_frame *)msg + i;
127 ctx_id = be32_to_cpu(stats->stats_con_id);
128 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
129 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
130 priv->stats[ctx_id].used = jiffies;
131 }
132 spin_unlock(&priv->stats_lock);
133}
134
135static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
136{
137 struct nfp_flower_priv *priv = app->priv;
138 struct circ_buf *ring;
139
140 ring = &priv->mask_ids.mask_id_free_list;
141
142 if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
143 return -ENOBUFS;
144
145 memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
146 ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
147 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
148
149 priv->mask_ids.last_used[mask_id] = ktime_get();
150
151 return 0;
152}
153
154static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
155{
156 struct nfp_flower_priv *priv = app->priv;
157 ktime_t reuse_timeout;
158 struct circ_buf *ring;
159 u8 temp_id, freed_id;
160
161 ring = &priv->mask_ids.mask_id_free_list;
162 freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
163
164 if (priv->mask_ids.init_unallocated > 0) {
165 *mask_id = priv->mask_ids.init_unallocated;
166 priv->mask_ids.init_unallocated--;
167 return 0;
168 }
169
170
171 if (ring->head == ring->tail)
172 goto err_not_found;
173
174 memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
175 *mask_id = temp_id;
176
177 reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
178 NFP_FL_MASK_REUSE_TIME_NS);
179
180 if (ktime_before(ktime_get(), reuse_timeout))
181 goto err_not_found;
182
183 memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
184 ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
185 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
186
187 return 0;
188
189err_not_found:
190 *mask_id = freed_id;
191 return -ENOENT;
192}
193
194static int
195nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
196{
197 struct nfp_flower_priv *priv = app->priv;
198 struct nfp_mask_id_table *mask_entry;
199 unsigned long hash_key;
200 u8 mask_id;
201
202 if (nfp_mask_alloc(app, &mask_id))
203 return -ENOENT;
204
205 mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
206 if (!mask_entry) {
207 nfp_release_mask_id(app, mask_id);
208 return -ENOMEM;
209 }
210
211 INIT_HLIST_NODE(&mask_entry->link);
212 mask_entry->mask_id = mask_id;
213 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
214 mask_entry->hash_key = hash_key;
215 mask_entry->ref_cnt = 1;
216 hash_add(priv->mask_table, &mask_entry->link, hash_key);
217
218 return mask_id;
219}
220
221static struct nfp_mask_id_table *
222nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
223{
224 struct nfp_flower_priv *priv = app->priv;
225 struct nfp_mask_id_table *mask_entry;
226 unsigned long hash_key;
227
228 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
229
230 hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
231 if (mask_entry->hash_key == hash_key)
232 return mask_entry;
233
234 return NULL;
235}
236
237static int
238nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
239{
240 struct nfp_mask_id_table *mask_entry;
241
242 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
243 if (!mask_entry)
244 return -ENOENT;
245
246 mask_entry->ref_cnt++;
247
248
249 return mask_entry->mask_id;
250}
251
252static bool
253nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
254 u8 *meta_flags, u8 *mask_id)
255{
256 int id;
257
258 id = nfp_find_in_mask_table(app, mask_data, mask_len);
259 if (id < 0) {
260 id = nfp_add_mask_table(app, mask_data, mask_len);
261 if (id < 0)
262 return false;
263 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
264 }
265 *mask_id = id;
266
267 return true;
268}
269
270static bool
271nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
272 u8 *meta_flags, u8 *mask_id)
273{
274 struct nfp_mask_id_table *mask_entry;
275
276 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
277 if (!mask_entry)
278 return false;
279
280 *mask_id = mask_entry->mask_id;
281 mask_entry->ref_cnt--;
282 if (!mask_entry->ref_cnt) {
283 hash_del(&mask_entry->link);
284 nfp_release_mask_id(app, *mask_id);
285 kfree(mask_entry);
286 if (meta_flags)
287 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
288 }
289
290 return true;
291}
292
293int nfp_compile_flow_metadata(struct nfp_app *app,
294 struct flow_cls_offload *flow,
295 struct nfp_fl_payload *nfp_flow,
296 struct net_device *netdev,
297 struct netlink_ext_ack *extack)
298{
299 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
300 struct nfp_flower_priv *priv = app->priv;
301 struct nfp_fl_payload *check_entry;
302 u8 new_mask_id;
303 u32 stats_cxt;
304 int err;
305
306 err = nfp_get_stats_entry(app, &stats_cxt);
307 if (err) {
308 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
309 return err;
310 }
311
312 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
313 nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
314 nfp_flow->ingress_dev = netdev;
315
316 ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
317 if (!ctx_entry) {
318 err = -ENOMEM;
319 goto err_release_stats;
320 }
321
322 ctx_entry->stats_cxt = stats_cxt;
323 ctx_entry->flow = nfp_flow;
324
325 if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
326 stats_ctx_table_params)) {
327 err = -ENOMEM;
328 goto err_free_ctx_entry;
329 }
330
331
332
333
334
335
336 new_mask_id = 0;
337 if (!nfp_flow->pre_tun_rule.dev &&
338 !nfp_check_mask_add(app, nfp_flow->mask_data,
339 nfp_flow->meta.mask_len,
340 &nfp_flow->meta.flags, &new_mask_id)) {
341 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
342 if (nfp_release_stats_entry(app, stats_cxt)) {
343 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
344 err = -EINVAL;
345 goto err_remove_rhash;
346 }
347 err = -ENOENT;
348 goto err_remove_rhash;
349 }
350
351 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
352 priv->flower_version++;
353
354
355 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
356 priv->stats[stats_cxt].pkts = 0;
357 priv->stats[stats_cxt].bytes = 0;
358 priv->stats[stats_cxt].used = jiffies;
359
360 check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
361 if (check_entry) {
362 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
363 if (nfp_release_stats_entry(app, stats_cxt)) {
364 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
365 err = -EINVAL;
366 goto err_remove_mask;
367 }
368
369 if (!nfp_flow->pre_tun_rule.dev &&
370 !nfp_check_mask_remove(app, nfp_flow->mask_data,
371 nfp_flow->meta.mask_len,
372 NULL, &new_mask_id)) {
373 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
374 err = -EINVAL;
375 goto err_remove_mask;
376 }
377
378 err = -EEXIST;
379 goto err_remove_mask;
380 }
381
382 return 0;
383
384err_remove_mask:
385 if (!nfp_flow->pre_tun_rule.dev)
386 nfp_check_mask_remove(app, nfp_flow->mask_data,
387 nfp_flow->meta.mask_len,
388 NULL, &new_mask_id);
389err_remove_rhash:
390 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
391 &ctx_entry->ht_node,
392 stats_ctx_table_params));
393err_free_ctx_entry:
394 kfree(ctx_entry);
395err_release_stats:
396 nfp_release_stats_entry(app, stats_cxt);
397
398 return err;
399}
400
401void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
402 struct nfp_fl_payload *nfp_flow)
403{
404 nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
405 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
406 priv->flower_version++;
407}
408
409int nfp_modify_flow_metadata(struct nfp_app *app,
410 struct nfp_fl_payload *nfp_flow)
411{
412 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
413 struct nfp_flower_priv *priv = app->priv;
414 u8 new_mask_id = 0;
415 u32 temp_ctx_id;
416
417 __nfp_modify_flow_metadata(priv, nfp_flow);
418
419 if (!nfp_flow->pre_tun_rule.dev)
420 nfp_check_mask_remove(app, nfp_flow->mask_data,
421 nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
422 &new_mask_id);
423
424
425 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
426
427
428 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
429
430 ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
431 stats_ctx_table_params);
432 if (!ctx_entry)
433 return -ENOENT;
434
435 WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
436 &ctx_entry->ht_node,
437 stats_ctx_table_params));
438 kfree(ctx_entry);
439
440 return nfp_release_stats_entry(app, temp_ctx_id);
441}
442
443struct nfp_fl_payload *
444nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
445{
446 struct nfp_fl_stats_ctx_to_flow *ctx_entry;
447 struct nfp_flower_priv *priv = app->priv;
448
449 ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
450 stats_ctx_table_params);
451 if (!ctx_entry)
452 return NULL;
453
454 return ctx_entry->flow;
455}
456
457static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
458 const void *obj)
459{
460 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
461 const struct nfp_fl_payload *flow_entry = obj;
462
463 if (flow_entry->ingress_dev == cmp_arg->netdev)
464 return flow_entry->tc_flower_cookie != cmp_arg->cookie;
465
466 return 1;
467}
468
469static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
470{
471 const struct nfp_fl_payload *flower_entry = data;
472
473 return jhash2((u32 *)&flower_entry->tc_flower_cookie,
474 sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
475 seed);
476}
477
478static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
479{
480 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
481
482 return jhash2((u32 *)&cmp_arg->cookie,
483 sizeof(cmp_arg->cookie) / sizeof(u32), seed);
484}
485
486const struct rhashtable_params nfp_flower_table_params = {
487 .head_offset = offsetof(struct nfp_fl_payload, fl_node),
488 .hashfn = nfp_fl_key_hashfn,
489 .obj_cmpfn = nfp_fl_obj_cmpfn,
490 .obj_hashfn = nfp_fl_obj_hashfn,
491 .automatic_shrinking = true,
492};
493
494const struct rhashtable_params merge_table_params = {
495 .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
496 .head_offset = offsetof(struct nfp_merge_info, ht_node),
497 .key_len = sizeof(u64),
498};
499
500const struct rhashtable_params nfp_zone_table_params = {
501 .head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
502 .key_len = sizeof(u16),
503 .key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
504 .automatic_shrinking = false,
505};
506
507const struct rhashtable_params nfp_ct_map_params = {
508 .head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
509 .key_len = sizeof(unsigned long),
510 .key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
511 .automatic_shrinking = true,
512};
513
514int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
515 unsigned int host_num_mems)
516{
517 struct nfp_flower_priv *priv = app->priv;
518 int err, stats_size;
519
520 hash_init(priv->mask_table);
521
522 err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
523 if (err)
524 return err;
525
526 err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
527 if (err)
528 goto err_free_flow_table;
529
530 err = rhashtable_init(&priv->merge_table, &merge_table_params);
531 if (err)
532 goto err_free_stats_ctx_table;
533
534 err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
535 if (err)
536 goto err_free_merge_table;
537
538 err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
539 if (err)
540 goto err_free_ct_zone_table;
541
542 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
543
544
545 priv->mask_ids.mask_id_free_list.buf =
546 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
547 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
548 if (!priv->mask_ids.mask_id_free_list.buf)
549 goto err_free_ct_map_table;
550
551 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
552
553
554 priv->mask_ids.last_used =
555 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
556 sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
557 if (!priv->mask_ids.last_used)
558 goto err_free_mask_id;
559
560
561 priv->stats_ids.free_list.buf =
562 vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
563 priv->stats_ring_size));
564 if (!priv->stats_ids.free_list.buf)
565 goto err_free_last_used;
566
567 priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
568
569 stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
570 FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
571 priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
572 GFP_KERNEL);
573 if (!priv->stats)
574 goto err_free_ring_buf;
575
576 spin_lock_init(&priv->stats_lock);
577
578 return 0;
579
580err_free_ring_buf:
581 vfree(priv->stats_ids.free_list.buf);
582err_free_last_used:
583 kfree(priv->mask_ids.last_used);
584err_free_mask_id:
585 kfree(priv->mask_ids.mask_id_free_list.buf);
586err_free_ct_map_table:
587 rhashtable_destroy(&priv->ct_map_table);
588err_free_ct_zone_table:
589 rhashtable_destroy(&priv->ct_zone_table);
590err_free_merge_table:
591 rhashtable_destroy(&priv->merge_table);
592err_free_stats_ctx_table:
593 rhashtable_destroy(&priv->stats_ctx_table);
594err_free_flow_table:
595 rhashtable_destroy(&priv->flow_table);
596 return -ENOMEM;
597}
598
599static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
600{
601 if (!zt)
602 return;
603
604 if (!list_empty(&zt->pre_ct_list)) {
605 struct rhashtable *m_table = &zt->priv->ct_map_table;
606 struct nfp_fl_ct_flow_entry *entry, *tmp;
607 struct nfp_fl_ct_map_entry *map;
608
609 WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
610 list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
611 list_node) {
612 map = rhashtable_lookup_fast(m_table,
613 &entry->cookie,
614 nfp_ct_map_params);
615 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
616 &map->hash_node,
617 nfp_ct_map_params));
618 nfp_fl_ct_clean_flow_entry(entry);
619 kfree(map);
620 }
621 }
622
623 if (!list_empty(&zt->post_ct_list)) {
624 struct rhashtable *m_table = &zt->priv->ct_map_table;
625 struct nfp_fl_ct_flow_entry *entry, *tmp;
626 struct nfp_fl_ct_map_entry *map;
627
628 WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
629 list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
630 list_node) {
631 map = rhashtable_lookup_fast(m_table,
632 &entry->cookie,
633 nfp_ct_map_params);
634 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
635 &map->hash_node,
636 nfp_ct_map_params));
637 nfp_fl_ct_clean_flow_entry(entry);
638 kfree(map);
639 }
640 }
641
642 if (zt->nft) {
643 nf_flow_table_offload_del_cb(zt->nft,
644 nfp_fl_ct_handle_nft_flow,
645 zt);
646 zt->nft = NULL;
647 }
648
649 if (!list_empty(&zt->nft_flows_list)) {
650 struct rhashtable *m_table = &zt->priv->ct_map_table;
651 struct nfp_fl_ct_flow_entry *entry, *tmp;
652 struct nfp_fl_ct_map_entry *map;
653
654 WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
655 list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
656 list_node) {
657 map = rhashtable_lookup_fast(m_table,
658 &entry->cookie,
659 nfp_ct_map_params);
660 WARN_ON_ONCE(rhashtable_remove_fast(m_table,
661 &map->hash_node,
662 nfp_ct_map_params));
663 nfp_fl_ct_clean_flow_entry(entry);
664 kfree(map);
665 }
666 }
667
668 rhashtable_free_and_destroy(&zt->tc_merge_tb,
669 nfp_check_rhashtable_empty, NULL);
670 rhashtable_free_and_destroy(&zt->nft_merge_tb,
671 nfp_check_rhashtable_empty, NULL);
672
673 kfree(zt);
674}
675
676static void nfp_free_zone_table_entry(void *ptr, void *arg)
677{
678 struct nfp_fl_ct_zone_entry *zt = ptr;
679
680 nfp_zone_table_entry_destroy(zt);
681}
682
683static void nfp_free_map_table_entry(void *ptr, void *arg)
684{
685 struct nfp_fl_ct_map_entry *map = ptr;
686
687 if (!map)
688 return;
689
690 kfree(map);
691}
692
693void nfp_flower_metadata_cleanup(struct nfp_app *app)
694{
695 struct nfp_flower_priv *priv = app->priv;
696
697 if (!priv)
698 return;
699
700 rhashtable_free_and_destroy(&priv->flow_table,
701 nfp_check_rhashtable_empty, NULL);
702 rhashtable_free_and_destroy(&priv->stats_ctx_table,
703 nfp_check_rhashtable_empty, NULL);
704 rhashtable_free_and_destroy(&priv->merge_table,
705 nfp_check_rhashtable_empty, NULL);
706 rhashtable_free_and_destroy(&priv->ct_zone_table,
707 nfp_free_zone_table_entry, NULL);
708 nfp_zone_table_entry_destroy(priv->ct_zone_wc);
709
710 rhashtable_free_and_destroy(&priv->ct_map_table,
711 nfp_free_map_table_entry, NULL);
712 kvfree(priv->stats);
713 kfree(priv->mask_ids.mask_id_free_list.buf);
714 kfree(priv->mask_ids.last_used);
715 vfree(priv->stats_ids.free_list.buf);
716}
717