1
2
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/rhashtable.h>
7#include <linux/bitops.h>
8#include <linux/in6.h>
9#include <linux/notifier.h>
10#include <linux/inetdevice.h>
11#include <linux/netdevice.h>
12#include <linux/if_bridge.h>
13#include <linux/socket.h>
14#include <linux/route.h>
15#include <linux/gcd.h>
16#include <linux/if_macvlan.h>
17#include <linux/refcount.h>
18#include <linux/jhash.h>
19#include <linux/net_namespace.h>
20#include <linux/mutex.h>
21#include <net/netevent.h>
22#include <net/neighbour.h>
23#include <net/arp.h>
24#include <net/ip_fib.h>
25#include <net/ip6_fib.h>
26#include <net/nexthop.h>
27#include <net/fib_rules.h>
28#include <net/ip_tunnels.h>
29#include <net/l3mdev.h>
30#include <net/addrconf.h>
31#include <net/ndisc.h>
32#include <net/ipv6.h>
33#include <net/fib_notifier.h>
34#include <net/switchdev.h>
35
36#include "spectrum.h"
37#include "core.h"
38#include "reg.h"
39#include "spectrum_cnt.h"
40#include "spectrum_dpipe.h"
41#include "spectrum_ipip.h"
42#include "spectrum_mr.h"
43#include "spectrum_mr_tcam.h"
44#include "spectrum_router.h"
45#include "spectrum_span.h"
46
47struct mlxsw_sp_fib;
48struct mlxsw_sp_vr;
49struct mlxsw_sp_lpm_tree;
50struct mlxsw_sp_rif_ops;
51
52struct mlxsw_sp_rif {
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev;
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
58 int mtu;
59 u16 rif_index;
60 u8 mac_profile_id;
61 u16 vr_id;
62 const struct mlxsw_sp_rif_ops *ops;
63 struct mlxsw_sp *mlxsw_sp;
64
65 unsigned int counter_ingress;
66 bool counter_ingress_valid;
67 unsigned int counter_egress;
68 bool counter_egress_valid;
69};
70
71struct mlxsw_sp_rif_params {
72 struct net_device *dev;
73 union {
74 u16 system_port;
75 u16 lag_id;
76 };
77 u16 vid;
78 bool lag;
79};
80
81struct mlxsw_sp_rif_subport {
82 struct mlxsw_sp_rif common;
83 refcount_t ref_count;
84 union {
85 u16 system_port;
86 u16 lag_id;
87 };
88 u16 vid;
89 bool lag;
90};
91
92struct mlxsw_sp_rif_ipip_lb {
93 struct mlxsw_sp_rif common;
94 struct mlxsw_sp_rif_ipip_lb_config lb_config;
95 u16 ul_vr_id;
96 u16 ul_rif_id;
97};
98
99struct mlxsw_sp_rif_params_ipip_lb {
100 struct mlxsw_sp_rif_params common;
101 struct mlxsw_sp_rif_ipip_lb_config lb_config;
102};
103
104struct mlxsw_sp_rif_ops {
105 enum mlxsw_sp_rif_type type;
106 size_t rif_size;
107
108 void (*setup)(struct mlxsw_sp_rif *rif,
109 const struct mlxsw_sp_rif_params *params);
110 int (*configure)(struct mlxsw_sp_rif *rif,
111 struct netlink_ext_ack *extack);
112 void (*deconfigure)(struct mlxsw_sp_rif *rif);
113 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
114 struct netlink_ext_ack *extack);
115 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116};
117
118struct mlxsw_sp_rif_mac_profile {
119 unsigned char mac_prefix[ETH_ALEN];
120 refcount_t ref_count;
121 u8 id;
122};
123
124struct mlxsw_sp_router_ops {
125 int (*init)(struct mlxsw_sp *mlxsw_sp);
126 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
127};
128
129static struct mlxsw_sp_rif *
130mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
131 const struct net_device *dev);
132static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
133static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
134static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
135 struct mlxsw_sp_lpm_tree *lpm_tree);
136static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
137 const struct mlxsw_sp_fib *fib,
138 u8 tree_id);
139static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
140 const struct mlxsw_sp_fib *fib);
141
142static unsigned int *
143mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
144 enum mlxsw_sp_rif_counter_dir dir)
145{
146 switch (dir) {
147 case MLXSW_SP_RIF_COUNTER_EGRESS:
148 return &rif->counter_egress;
149 case MLXSW_SP_RIF_COUNTER_INGRESS:
150 return &rif->counter_ingress;
151 }
152 return NULL;
153}
154
155static bool
156mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
157 enum mlxsw_sp_rif_counter_dir dir)
158{
159 switch (dir) {
160 case MLXSW_SP_RIF_COUNTER_EGRESS:
161 return rif->counter_egress_valid;
162 case MLXSW_SP_RIF_COUNTER_INGRESS:
163 return rif->counter_ingress_valid;
164 }
165 return false;
166}
167
168static void
169mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
170 enum mlxsw_sp_rif_counter_dir dir,
171 bool valid)
172{
173 switch (dir) {
174 case MLXSW_SP_RIF_COUNTER_EGRESS:
175 rif->counter_egress_valid = valid;
176 break;
177 case MLXSW_SP_RIF_COUNTER_INGRESS:
178 rif->counter_ingress_valid = valid;
179 break;
180 }
181}
182
183static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
184 unsigned int counter_index, bool enable,
185 enum mlxsw_sp_rif_counter_dir dir)
186{
187 char ritr_pl[MLXSW_REG_RITR_LEN];
188 bool is_egress = false;
189 int err;
190
191 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
192 is_egress = true;
193 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
194 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
195 if (err)
196 return err;
197
198 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
199 is_egress);
200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
201}
202
203int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
204 struct mlxsw_sp_rif *rif,
205 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
206{
207 char ricnt_pl[MLXSW_REG_RICNT_LEN];
208 unsigned int *p_counter_index;
209 bool valid;
210 int err;
211
212 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
213 if (!valid)
214 return -EINVAL;
215
216 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
217 if (!p_counter_index)
218 return -EINVAL;
219 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
220 MLXSW_REG_RICNT_OPCODE_NOP);
221 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
222 if (err)
223 return err;
224 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
225 return 0;
226}
227
228static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
229 unsigned int counter_index)
230{
231 char ricnt_pl[MLXSW_REG_RICNT_LEN];
232
233 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
234 MLXSW_REG_RICNT_OPCODE_CLEAR);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
236}
237
238int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
239 struct mlxsw_sp_rif *rif,
240 enum mlxsw_sp_rif_counter_dir dir)
241{
242 unsigned int *p_counter_index;
243 int err;
244
245 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
246 if (!p_counter_index)
247 return -EINVAL;
248 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
249 p_counter_index);
250 if (err)
251 return err;
252
253 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
254 if (err)
255 goto err_counter_clear;
256
257 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
258 *p_counter_index, true, dir);
259 if (err)
260 goto err_counter_edit;
261 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
262 return 0;
263
264err_counter_edit:
265err_counter_clear:
266 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
267 *p_counter_index);
268 return err;
269}
270
271void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
272 struct mlxsw_sp_rif *rif,
273 enum mlxsw_sp_rif_counter_dir dir)
274{
275 unsigned int *p_counter_index;
276
277 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
278 return;
279
280 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
281 if (WARN_ON(!p_counter_index))
282 return;
283 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
284 *p_counter_index, false, dir);
285 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
286 *p_counter_index);
287 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
288}
289
290static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
291{
292 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
293 struct devlink *devlink;
294
295 devlink = priv_to_devlink(mlxsw_sp->core);
296 if (!devlink_dpipe_table_counter_enabled(devlink,
297 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
298 return;
299 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
300}
301
302static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
303{
304 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
305
306 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
307}
308
309#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
310
311struct mlxsw_sp_prefix_usage {
312 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
313};
314
315#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
316 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
317
318static bool
319mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
320 struct mlxsw_sp_prefix_usage *prefix_usage2)
321{
322 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
323}
324
325static void
326mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
327 struct mlxsw_sp_prefix_usage *prefix_usage2)
328{
329 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
330}
331
332static void
333mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
334 unsigned char prefix_len)
335{
336 set_bit(prefix_len, prefix_usage->b);
337}
338
339static void
340mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
341 unsigned char prefix_len)
342{
343 clear_bit(prefix_len, prefix_usage->b);
344}
345
346struct mlxsw_sp_fib_key {
347 unsigned char addr[sizeof(struct in6_addr)];
348 unsigned char prefix_len;
349};
350
351enum mlxsw_sp_fib_entry_type {
352 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
353 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
354 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
355 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
356 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
357
358
359
360
361
362
363
364 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
365 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
366};
367
368struct mlxsw_sp_nexthop_group_info;
369struct mlxsw_sp_nexthop_group;
370struct mlxsw_sp_fib_entry;
371
372struct mlxsw_sp_fib_node {
373 struct mlxsw_sp_fib_entry *fib_entry;
374 struct list_head list;
375 struct rhash_head ht_node;
376 struct mlxsw_sp_fib *fib;
377 struct mlxsw_sp_fib_key key;
378};
379
380struct mlxsw_sp_fib_entry_decap {
381 struct mlxsw_sp_ipip_entry *ipip_entry;
382 u32 tunnel_index;
383};
384
385static struct mlxsw_sp_fib_entry_priv *
386mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
387{
388 struct mlxsw_sp_fib_entry_priv *priv;
389
390 if (!ll_ops->fib_entry_priv_size)
391
392 return NULL;
393
394 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
395 if (!priv)
396 return ERR_PTR(-ENOMEM);
397 refcount_set(&priv->refcnt, 1);
398 return priv;
399}
400
401static void
402mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
403{
404 kfree(priv);
405}
406
407static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
408{
409 refcount_inc(&priv->refcnt);
410}
411
412static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
413{
414 if (!priv || !refcount_dec_and_test(&priv->refcnt))
415 return;
416 mlxsw_sp_fib_entry_priv_destroy(priv);
417}
418
419static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
420 struct mlxsw_sp_fib_entry_priv *priv)
421{
422 if (!priv)
423 return;
424 mlxsw_sp_fib_entry_priv_hold(priv);
425 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
426}
427
428static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
429{
430 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
431
432 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
433 mlxsw_sp_fib_entry_priv_put(priv);
434 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
435}
436
437struct mlxsw_sp_fib_entry {
438 struct mlxsw_sp_fib_node *fib_node;
439 enum mlxsw_sp_fib_entry_type type;
440 struct list_head nexthop_group_node;
441 struct mlxsw_sp_nexthop_group *nh_group;
442 struct mlxsw_sp_fib_entry_decap decap;
443 struct mlxsw_sp_fib_entry_priv *priv;
444};
445
446struct mlxsw_sp_fib4_entry {
447 struct mlxsw_sp_fib_entry common;
448 struct fib_info *fi;
449 u32 tb_id;
450 u8 tos;
451 u8 type;
452};
453
454struct mlxsw_sp_fib6_entry {
455 struct mlxsw_sp_fib_entry common;
456 struct list_head rt6_list;
457 unsigned int nrt6;
458};
459
460struct mlxsw_sp_rt6 {
461 struct list_head list;
462 struct fib6_info *rt;
463};
464
465struct mlxsw_sp_lpm_tree {
466 u8 id;
467 unsigned int ref_count;
468 enum mlxsw_sp_l3proto proto;
469 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
470 struct mlxsw_sp_prefix_usage prefix_usage;
471};
472
473struct mlxsw_sp_fib {
474 struct rhashtable ht;
475 struct list_head node_list;
476 struct mlxsw_sp_vr *vr;
477 struct mlxsw_sp_lpm_tree *lpm_tree;
478 enum mlxsw_sp_l3proto proto;
479 const struct mlxsw_sp_router_ll_ops *ll_ops;
480};
481
482struct mlxsw_sp_vr {
483 u16 id;
484 u32 tb_id;
485 unsigned int rif_count;
486 struct mlxsw_sp_fib *fib4;
487 struct mlxsw_sp_fib *fib6;
488 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
489 struct mlxsw_sp_rif *ul_rif;
490 refcount_t ul_rif_refcnt;
491};
492
493static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id,
494 enum mlxsw_sp_l3proto proto)
495{
496 return 0;
497}
498
499static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
500{
501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
502 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
503}
504
505static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
506{
507 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
508 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
509}
510
511static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
512{
513 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
514 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
515}
516
517static const struct rhashtable_params mlxsw_sp_fib_ht_params;
518
519static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
520 struct mlxsw_sp_vr *vr,
521 enum mlxsw_sp_l3proto proto)
522{
523 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
524 struct mlxsw_sp_lpm_tree *lpm_tree;
525 struct mlxsw_sp_fib *fib;
526 int err;
527
528 err = ll_ops->init(mlxsw_sp, vr->id, proto);
529 if (err)
530 return ERR_PTR(err);
531
532 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
533 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
534 if (!fib)
535 return ERR_PTR(-ENOMEM);
536 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
537 if (err)
538 goto err_rhashtable_init;
539 INIT_LIST_HEAD(&fib->node_list);
540 fib->proto = proto;
541 fib->vr = vr;
542 fib->lpm_tree = lpm_tree;
543 fib->ll_ops = ll_ops;
544 mlxsw_sp_lpm_tree_hold(lpm_tree);
545 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
546 if (err)
547 goto err_lpm_tree_bind;
548 return fib;
549
550err_lpm_tree_bind:
551 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
552err_rhashtable_init:
553 kfree(fib);
554 return ERR_PTR(err);
555}
556
557static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
558 struct mlxsw_sp_fib *fib)
559{
560 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
561 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
562 WARN_ON(!list_empty(&fib->node_list));
563 rhashtable_destroy(&fib->ht);
564 kfree(fib);
565}
566
567static struct mlxsw_sp_lpm_tree *
568mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
569{
570 static struct mlxsw_sp_lpm_tree *lpm_tree;
571 int i;
572
573 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
574 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
575 if (lpm_tree->ref_count == 0)
576 return lpm_tree;
577 }
578 return NULL;
579}
580
581static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
582 const struct mlxsw_sp_router_ll_ops *ll_ops,
583 struct mlxsw_sp_lpm_tree *lpm_tree)
584{
585 char xralta_pl[MLXSW_REG_XRALTA_LEN];
586
587 mlxsw_reg_xralta_pack(xralta_pl, true,
588 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
589 lpm_tree->id);
590 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
591}
592
593static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
594 const struct mlxsw_sp_router_ll_ops *ll_ops,
595 struct mlxsw_sp_lpm_tree *lpm_tree)
596{
597 char xralta_pl[MLXSW_REG_XRALTA_LEN];
598
599 mlxsw_reg_xralta_pack(xralta_pl, false,
600 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
601 lpm_tree->id);
602 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
603}
604
605static int
606mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
607 const struct mlxsw_sp_router_ll_ops *ll_ops,
608 struct mlxsw_sp_prefix_usage *prefix_usage,
609 struct mlxsw_sp_lpm_tree *lpm_tree)
610{
611 char xralst_pl[MLXSW_REG_XRALST_LEN];
612 u8 root_bin = 0;
613 u8 prefix;
614 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
615
616 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
617 root_bin = prefix;
618
619 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
620 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
621 if (prefix == 0)
622 continue;
623 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
624 MLXSW_REG_RALST_BIN_NO_CHILD);
625 last_prefix = prefix;
626 }
627 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
628}
629
630static struct mlxsw_sp_lpm_tree *
631mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
632 const struct mlxsw_sp_router_ll_ops *ll_ops,
633 struct mlxsw_sp_prefix_usage *prefix_usage,
634 enum mlxsw_sp_l3proto proto)
635{
636 struct mlxsw_sp_lpm_tree *lpm_tree;
637 int err;
638
639 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
640 if (!lpm_tree)
641 return ERR_PTR(-EBUSY);
642 lpm_tree->proto = proto;
643 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
644 if (err)
645 return ERR_PTR(err);
646
647 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
648 if (err)
649 goto err_left_struct_set;
650 memcpy(&lpm_tree->prefix_usage, prefix_usage,
651 sizeof(lpm_tree->prefix_usage));
652 memset(&lpm_tree->prefix_ref_count, 0,
653 sizeof(lpm_tree->prefix_ref_count));
654 lpm_tree->ref_count = 1;
655 return lpm_tree;
656
657err_left_struct_set:
658 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
659 return ERR_PTR(err);
660}
661
662static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
663 const struct mlxsw_sp_router_ll_ops *ll_ops,
664 struct mlxsw_sp_lpm_tree *lpm_tree)
665{
666 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
667}
668
669static struct mlxsw_sp_lpm_tree *
670mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
671 struct mlxsw_sp_prefix_usage *prefix_usage,
672 enum mlxsw_sp_l3proto proto)
673{
674 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
675 struct mlxsw_sp_lpm_tree *lpm_tree;
676 int i;
677
678 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
679 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
680 if (lpm_tree->ref_count != 0 &&
681 lpm_tree->proto == proto &&
682 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
683 prefix_usage)) {
684 mlxsw_sp_lpm_tree_hold(lpm_tree);
685 return lpm_tree;
686 }
687 }
688 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
689}
690
691static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
692{
693 lpm_tree->ref_count++;
694}
695
696static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
697 struct mlxsw_sp_lpm_tree *lpm_tree)
698{
699 const struct mlxsw_sp_router_ll_ops *ll_ops =
700 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
701
702 if (--lpm_tree->ref_count == 0)
703 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
704}
705
706#define MLXSW_SP_LPM_TREE_MIN 1
707
708static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
709{
710 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
711 struct mlxsw_sp_lpm_tree *lpm_tree;
712 u64 max_trees;
713 int err, i;
714
715 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
716 return -EIO;
717
718 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
719 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
720 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
721 sizeof(struct mlxsw_sp_lpm_tree),
722 GFP_KERNEL);
723 if (!mlxsw_sp->router->lpm.trees)
724 return -ENOMEM;
725
726 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
727 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
728 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
729 }
730
731 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
732 MLXSW_SP_L3_PROTO_IPV4);
733 if (IS_ERR(lpm_tree)) {
734 err = PTR_ERR(lpm_tree);
735 goto err_ipv4_tree_get;
736 }
737 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
738
739 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
740 MLXSW_SP_L3_PROTO_IPV6);
741 if (IS_ERR(lpm_tree)) {
742 err = PTR_ERR(lpm_tree);
743 goto err_ipv6_tree_get;
744 }
745 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
746
747 return 0;
748
749err_ipv6_tree_get:
750 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
751 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
752err_ipv4_tree_get:
753 kfree(mlxsw_sp->router->lpm.trees);
754 return err;
755}
756
757static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
758{
759 struct mlxsw_sp_lpm_tree *lpm_tree;
760
761 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
762 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
763
764 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
765 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
766
767 kfree(mlxsw_sp->router->lpm.trees);
768}
769
770static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
771{
772 return !!vr->fib4 || !!vr->fib6 ||
773 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
774 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
775}
776
777static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
778{
779 struct mlxsw_sp_vr *vr;
780 int i;
781
782 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
783 vr = &mlxsw_sp->router->vrs[i];
784 if (!mlxsw_sp_vr_is_used(vr))
785 return vr;
786 }
787 return NULL;
788}
789
790static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
791 const struct mlxsw_sp_fib *fib, u8 tree_id)
792{
793 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
794
795 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
796 (enum mlxsw_reg_ralxx_protocol) fib->proto,
797 tree_id);
798 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
799}
800
801static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
802 const struct mlxsw_sp_fib *fib)
803{
804 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
805
806
807 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
808 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
809 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
810}
811
812static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
813{
814
815 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
816 tb_id = RT_TABLE_MAIN;
817 return tb_id;
818}
819
820static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
821 u32 tb_id)
822{
823 struct mlxsw_sp_vr *vr;
824 int i;
825
826 tb_id = mlxsw_sp_fix_tb_id(tb_id);
827
828 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
829 vr = &mlxsw_sp->router->vrs[i];
830 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
831 return vr;
832 }
833 return NULL;
834}
835
836int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
837 u16 *vr_id)
838{
839 struct mlxsw_sp_vr *vr;
840 int err = 0;
841
842 mutex_lock(&mlxsw_sp->router->lock);
843 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
844 if (!vr) {
845 err = -ESRCH;
846 goto out;
847 }
848 *vr_id = vr->id;
849out:
850 mutex_unlock(&mlxsw_sp->router->lock);
851 return err;
852}
853
854static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
855 enum mlxsw_sp_l3proto proto)
856{
857 switch (proto) {
858 case MLXSW_SP_L3_PROTO_IPV4:
859 return vr->fib4;
860 case MLXSW_SP_L3_PROTO_IPV6:
861 return vr->fib6;
862 }
863 return NULL;
864}
865
866static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
867 u32 tb_id,
868 struct netlink_ext_ack *extack)
869{
870 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
871 struct mlxsw_sp_fib *fib4;
872 struct mlxsw_sp_fib *fib6;
873 struct mlxsw_sp_vr *vr;
874 int err;
875
876 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
877 if (!vr) {
878 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
879 return ERR_PTR(-EBUSY);
880 }
881 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
882 if (IS_ERR(fib4))
883 return ERR_CAST(fib4);
884 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
885 if (IS_ERR(fib6)) {
886 err = PTR_ERR(fib6);
887 goto err_fib6_create;
888 }
889 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
890 MLXSW_SP_L3_PROTO_IPV4);
891 if (IS_ERR(mr4_table)) {
892 err = PTR_ERR(mr4_table);
893 goto err_mr4_table_create;
894 }
895 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
896 MLXSW_SP_L3_PROTO_IPV6);
897 if (IS_ERR(mr6_table)) {
898 err = PTR_ERR(mr6_table);
899 goto err_mr6_table_create;
900 }
901
902 vr->fib4 = fib4;
903 vr->fib6 = fib6;
904 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
905 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
906 vr->tb_id = tb_id;
907 return vr;
908
909err_mr6_table_create:
910 mlxsw_sp_mr_table_destroy(mr4_table);
911err_mr4_table_create:
912 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
913err_fib6_create:
914 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
915 return ERR_PTR(err);
916}
917
918static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
919 struct mlxsw_sp_vr *vr)
920{
921 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
922 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
923 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
924 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
925 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
926 vr->fib6 = NULL;
927 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
928 vr->fib4 = NULL;
929}
930
931static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
932 struct netlink_ext_ack *extack)
933{
934 struct mlxsw_sp_vr *vr;
935
936 tb_id = mlxsw_sp_fix_tb_id(tb_id);
937 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
938 if (!vr)
939 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
940 return vr;
941}
942
943static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
944{
945 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
946 list_empty(&vr->fib6->node_list) &&
947 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
948 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
949 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
950}
951
952static bool
953mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
954 enum mlxsw_sp_l3proto proto, u8 tree_id)
955{
956 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
957
958 if (!mlxsw_sp_vr_is_used(vr))
959 return false;
960 if (fib->lpm_tree->id == tree_id)
961 return true;
962 return false;
963}
964
965static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
966 struct mlxsw_sp_fib *fib,
967 struct mlxsw_sp_lpm_tree *new_tree)
968{
969 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
970 int err;
971
972 fib->lpm_tree = new_tree;
973 mlxsw_sp_lpm_tree_hold(new_tree);
974 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
975 if (err)
976 goto err_tree_bind;
977 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
978 return 0;
979
980err_tree_bind:
981 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
982 fib->lpm_tree = old_tree;
983 return err;
984}
985
986static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
987 struct mlxsw_sp_fib *fib,
988 struct mlxsw_sp_lpm_tree *new_tree)
989{
990 enum mlxsw_sp_l3proto proto = fib->proto;
991 struct mlxsw_sp_lpm_tree *old_tree;
992 u8 old_id, new_id = new_tree->id;
993 struct mlxsw_sp_vr *vr;
994 int i, err;
995
996 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
997 old_id = old_tree->id;
998
999 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
1000 vr = &mlxsw_sp->router->vrs[i];
1001 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
1002 continue;
1003 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1004 mlxsw_sp_vr_fib(vr, proto),
1005 new_tree);
1006 if (err)
1007 goto err_tree_replace;
1008 }
1009
1010 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
1011 sizeof(new_tree->prefix_ref_count));
1012 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
1013 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
1014
1015 return 0;
1016
1017err_tree_replace:
1018 for (i--; i >= 0; i--) {
1019 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
1020 continue;
1021 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
1022 mlxsw_sp_vr_fib(vr, proto),
1023 old_tree);
1024 }
1025 return err;
1026}
1027
1028static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1029{
1030 struct mlxsw_sp_vr *vr;
1031 u64 max_vrs;
1032 int i;
1033
1034 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1035 return -EIO;
1036
1037 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1038 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1039 GFP_KERNEL);
1040 if (!mlxsw_sp->router->vrs)
1041 return -ENOMEM;
1042
1043 for (i = 0; i < max_vrs; i++) {
1044 vr = &mlxsw_sp->router->vrs[i];
1045 vr->id = i;
1046 }
1047
1048 return 0;
1049}
1050
1051static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1052
1053static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1054{
1055
1056
1057
1058
1059
1060
1061
1062 mlxsw_core_flush_owq();
1063 mlxsw_sp_router_fib_flush(mlxsw_sp);
1064 kfree(mlxsw_sp->router->vrs);
1065}
1066
1067u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1068{
1069 struct net_device *d;
1070 u32 tb_id;
1071
1072 rcu_read_lock();
1073 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1074 if (d)
1075 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1076 else
1077 tb_id = RT_TABLE_MAIN;
1078 rcu_read_unlock();
1079
1080 return tb_id;
1081}
1082
1083static struct mlxsw_sp_rif *
1084mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1085 const struct mlxsw_sp_rif_params *params,
1086 struct netlink_ext_ack *extack);
1087
1088static struct mlxsw_sp_rif_ipip_lb *
1089mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1090 enum mlxsw_sp_ipip_type ipipt,
1091 struct net_device *ol_dev,
1092 struct netlink_ext_ack *extack)
1093{
1094 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1095 const struct mlxsw_sp_ipip_ops *ipip_ops;
1096 struct mlxsw_sp_rif *rif;
1097
1098 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1099 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1100 .common.dev = ol_dev,
1101 .common.lag = false,
1102 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1103 };
1104
1105 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1106 if (IS_ERR(rif))
1107 return ERR_CAST(rif);
1108 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1109}
1110
1111static struct mlxsw_sp_ipip_entry *
1112mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1113 enum mlxsw_sp_ipip_type ipipt,
1114 struct net_device *ol_dev)
1115{
1116 const struct mlxsw_sp_ipip_ops *ipip_ops;
1117 struct mlxsw_sp_ipip_entry *ipip_entry;
1118 struct mlxsw_sp_ipip_entry *ret = NULL;
1119 int err;
1120
1121 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1122 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1123 if (!ipip_entry)
1124 return ERR_PTR(-ENOMEM);
1125
1126 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1127 ol_dev, NULL);
1128 if (IS_ERR(ipip_entry->ol_lb)) {
1129 ret = ERR_CAST(ipip_entry->ol_lb);
1130 goto err_ol_ipip_lb_create;
1131 }
1132
1133 ipip_entry->ipipt = ipipt;
1134 ipip_entry->ol_dev = ol_dev;
1135 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1136
1137 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1138 if (err) {
1139 ret = ERR_PTR(err);
1140 goto err_rem_ip_addr_set;
1141 }
1142
1143 return ipip_entry;
1144
1145err_rem_ip_addr_set:
1146 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1147err_ol_ipip_lb_create:
1148 kfree(ipip_entry);
1149 return ret;
1150}
1151
1152static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1153 struct mlxsw_sp_ipip_entry *ipip_entry)
1154{
1155 const struct mlxsw_sp_ipip_ops *ipip_ops =
1156 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1157
1158 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1159 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1160 kfree(ipip_entry);
1161}
1162
1163static bool
1164mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1165 const enum mlxsw_sp_l3proto ul_proto,
1166 union mlxsw_sp_l3addr saddr,
1167 u32 ul_tb_id,
1168 struct mlxsw_sp_ipip_entry *ipip_entry)
1169{
1170 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1171 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1172 union mlxsw_sp_l3addr tun_saddr;
1173
1174 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1175 return false;
1176
1177 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1178 return tun_ul_tb_id == ul_tb_id &&
1179 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1180}
1181
1182static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1183 enum mlxsw_sp_ipip_type ipipt)
1184{
1185 const struct mlxsw_sp_ipip_ops *ipip_ops;
1186
1187 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1188
1189
1190
1191
1192 if (ipip_ops->inc_parsing_depth)
1193 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1194
1195 return 0;
1196}
1197
1198static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1199 enum mlxsw_sp_ipip_type ipipt)
1200{
1201 const struct mlxsw_sp_ipip_ops *ipip_ops =
1202 mlxsw_sp->router->ipip_ops_arr[ipipt];
1203
1204 if (ipip_ops->inc_parsing_depth)
1205 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1206}
1207
1208static int
1209mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1210 struct mlxsw_sp_fib_entry *fib_entry,
1211 struct mlxsw_sp_ipip_entry *ipip_entry)
1212{
1213 u32 tunnel_index;
1214 int err;
1215
1216 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1217 1, &tunnel_index);
1218 if (err)
1219 return err;
1220
1221 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1222 ipip_entry->ipipt);
1223 if (err)
1224 goto err_parsing_depth_inc;
1225
1226 ipip_entry->decap_fib_entry = fib_entry;
1227 fib_entry->decap.ipip_entry = ipip_entry;
1228 fib_entry->decap.tunnel_index = tunnel_index;
1229
1230 return 0;
1231
1232err_parsing_depth_inc:
1233 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1234 fib_entry->decap.tunnel_index);
1235 return err;
1236}
1237
1238static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1239 struct mlxsw_sp_fib_entry *fib_entry)
1240{
1241 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1242
1243
1244 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1245 fib_entry->decap.ipip_entry = NULL;
1246 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1247 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1248 1, fib_entry->decap.tunnel_index);
1249}
1250
1251static struct mlxsw_sp_fib_node *
1252mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1253 size_t addr_len, unsigned char prefix_len);
1254static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1255 struct mlxsw_sp_fib_entry *fib_entry);
1256
1257static void
1258mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1259 struct mlxsw_sp_ipip_entry *ipip_entry)
1260{
1261 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1262
1263 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1264 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1265
1266 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1267}
1268
1269static void
1270mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1271 struct mlxsw_sp_ipip_entry *ipip_entry,
1272 struct mlxsw_sp_fib_entry *decap_fib_entry)
1273{
1274 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1275 ipip_entry))
1276 return;
1277 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1278
1279 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1280 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1281}
1282
1283static struct mlxsw_sp_fib_entry *
1284mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1285 enum mlxsw_sp_l3proto proto,
1286 const union mlxsw_sp_l3addr *addr,
1287 enum mlxsw_sp_fib_entry_type type)
1288{
1289 struct mlxsw_sp_fib_node *fib_node;
1290 unsigned char addr_prefix_len;
1291 struct mlxsw_sp_fib *fib;
1292 struct mlxsw_sp_vr *vr;
1293 const void *addrp;
1294 size_t addr_len;
1295 u32 addr4;
1296
1297 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1298 if (!vr)
1299 return NULL;
1300 fib = mlxsw_sp_vr_fib(vr, proto);
1301
1302 switch (proto) {
1303 case MLXSW_SP_L3_PROTO_IPV4:
1304 addr4 = be32_to_cpu(addr->addr4);
1305 addrp = &addr4;
1306 addr_len = 4;
1307 addr_prefix_len = 32;
1308 break;
1309 case MLXSW_SP_L3_PROTO_IPV6:
1310 default:
1311 WARN_ON(1);
1312 return NULL;
1313 }
1314
1315 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1316 addr_prefix_len);
1317 if (!fib_node || fib_node->fib_entry->type != type)
1318 return NULL;
1319
1320 return fib_node->fib_entry;
1321}
1322
1323
1324static struct mlxsw_sp_fib_entry *
1325mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1326 struct mlxsw_sp_ipip_entry *ipip_entry)
1327{
1328 static struct mlxsw_sp_fib_node *fib_node;
1329 const struct mlxsw_sp_ipip_ops *ipip_ops;
1330 unsigned char saddr_prefix_len;
1331 union mlxsw_sp_l3addr saddr;
1332 struct mlxsw_sp_fib *ul_fib;
1333 struct mlxsw_sp_vr *ul_vr;
1334 const void *saddrp;
1335 size_t saddr_len;
1336 u32 ul_tb_id;
1337 u32 saddr4;
1338
1339 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1340
1341 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1342 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1343 if (!ul_vr)
1344 return NULL;
1345
1346 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1347 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1348 ipip_entry->ol_dev);
1349
1350 switch (ipip_ops->ul_proto) {
1351 case MLXSW_SP_L3_PROTO_IPV4:
1352 saddr4 = be32_to_cpu(saddr.addr4);
1353 saddrp = &saddr4;
1354 saddr_len = 4;
1355 saddr_prefix_len = 32;
1356 break;
1357 case MLXSW_SP_L3_PROTO_IPV6:
1358 saddrp = &saddr.addr6;
1359 saddr_len = 16;
1360 saddr_prefix_len = 128;
1361 break;
1362 default:
1363 WARN_ON(1);
1364 return NULL;
1365 }
1366
1367 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1368 saddr_prefix_len);
1369 if (!fib_node ||
1370 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1371 return NULL;
1372
1373 return fib_node->fib_entry;
1374}
1375
1376static struct mlxsw_sp_ipip_entry *
1377mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1378 enum mlxsw_sp_ipip_type ipipt,
1379 struct net_device *ol_dev)
1380{
1381 struct mlxsw_sp_ipip_entry *ipip_entry;
1382
1383 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1384 if (IS_ERR(ipip_entry))
1385 return ipip_entry;
1386
1387 list_add_tail(&ipip_entry->ipip_list_node,
1388 &mlxsw_sp->router->ipip_list);
1389
1390 return ipip_entry;
1391}
1392
1393static void
1394mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1395 struct mlxsw_sp_ipip_entry *ipip_entry)
1396{
1397 list_del(&ipip_entry->ipip_list_node);
1398 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1399}
1400
1401static bool
1402mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1403 const struct net_device *ul_dev,
1404 enum mlxsw_sp_l3proto ul_proto,
1405 union mlxsw_sp_l3addr ul_dip,
1406 struct mlxsw_sp_ipip_entry *ipip_entry)
1407{
1408 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1409 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1410
1411 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1412 return false;
1413
1414 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1415 ul_tb_id, ipip_entry);
1416}
1417
1418
1419static struct mlxsw_sp_ipip_entry *
1420mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1421 enum mlxsw_sp_l3proto ul_proto,
1422 union mlxsw_sp_l3addr ul_dip)
1423{
1424 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1425 struct net_device *ul_dev;
1426
1427 rcu_read_lock();
1428
1429 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1430 if (!ul_dev)
1431 goto out_unlock;
1432
1433 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1434 ipip_list_node)
1435 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1436 ul_proto, ul_dip,
1437 ipip_entry))
1438 goto out_unlock;
1439
1440 rcu_read_unlock();
1441
1442 return NULL;
1443
1444out_unlock:
1445 rcu_read_unlock();
1446 return ipip_entry;
1447}
1448
1449static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1450 const struct net_device *dev,
1451 enum mlxsw_sp_ipip_type *p_type)
1452{
1453 struct mlxsw_sp_router *router = mlxsw_sp->router;
1454 const struct mlxsw_sp_ipip_ops *ipip_ops;
1455 enum mlxsw_sp_ipip_type ipipt;
1456
1457 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1458 ipip_ops = router->ipip_ops_arr[ipipt];
1459 if (dev->type == ipip_ops->dev_type) {
1460 if (p_type)
1461 *p_type = ipipt;
1462 return true;
1463 }
1464 }
1465 return false;
1466}
1467
1468bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1469 const struct net_device *dev)
1470{
1471 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1472}
1473
1474static struct mlxsw_sp_ipip_entry *
1475mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1476 const struct net_device *ol_dev)
1477{
1478 struct mlxsw_sp_ipip_entry *ipip_entry;
1479
1480 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1481 ipip_list_node)
1482 if (ipip_entry->ol_dev == ol_dev)
1483 return ipip_entry;
1484
1485 return NULL;
1486}
1487
1488static struct mlxsw_sp_ipip_entry *
1489mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1490 const struct net_device *ul_dev,
1491 struct mlxsw_sp_ipip_entry *start)
1492{
1493 struct mlxsw_sp_ipip_entry *ipip_entry;
1494
1495 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1496 ipip_list_node);
1497 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1498 ipip_list_node) {
1499 struct net_device *ol_dev = ipip_entry->ol_dev;
1500 struct net_device *ipip_ul_dev;
1501
1502 rcu_read_lock();
1503 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1504 rcu_read_unlock();
1505
1506 if (ipip_ul_dev == ul_dev)
1507 return ipip_entry;
1508 }
1509
1510 return NULL;
1511}
1512
1513bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1514 const struct net_device *dev)
1515{
1516 bool is_ipip_ul;
1517
1518 mutex_lock(&mlxsw_sp->router->lock);
1519 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1520 mutex_unlock(&mlxsw_sp->router->lock);
1521
1522 return is_ipip_ul;
1523}
1524
1525static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1526 const struct net_device *ol_dev,
1527 enum mlxsw_sp_ipip_type ipipt)
1528{
1529 const struct mlxsw_sp_ipip_ops *ops
1530 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1531
1532 return ops->can_offload(mlxsw_sp, ol_dev);
1533}
1534
1535static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1536 struct net_device *ol_dev)
1537{
1538 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1539 struct mlxsw_sp_ipip_entry *ipip_entry;
1540 enum mlxsw_sp_l3proto ul_proto;
1541 union mlxsw_sp_l3addr saddr;
1542 u32 ul_tb_id;
1543
1544 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1545 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1546 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1547 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1548 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1549 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1550 saddr, ul_tb_id,
1551 NULL)) {
1552 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1553 ol_dev);
1554 if (IS_ERR(ipip_entry))
1555 return PTR_ERR(ipip_entry);
1556 }
1557 }
1558
1559 return 0;
1560}
1561
1562static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1563 struct net_device *ol_dev)
1564{
1565 struct mlxsw_sp_ipip_entry *ipip_entry;
1566
1567 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1568 if (ipip_entry)
1569 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1570}
1571
1572static void
1573mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1574 struct mlxsw_sp_ipip_entry *ipip_entry)
1575{
1576 struct mlxsw_sp_fib_entry *decap_fib_entry;
1577
1578 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1579 if (decap_fib_entry)
1580 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1581 decap_fib_entry);
1582}
1583
1584static int
1585mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1586 u16 ul_rif_id, bool enable)
1587{
1588 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1589 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1590 struct mlxsw_sp_rif *rif = &lb_rif->common;
1591 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1592 char ritr_pl[MLXSW_REG_RITR_LEN];
1593 struct in6_addr *saddr6;
1594 u32 saddr4;
1595
1596 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1597 switch (lb_cf.ul_protocol) {
1598 case MLXSW_SP_L3_PROTO_IPV4:
1599 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1600 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1601 rif->rif_index, rif->vr_id, rif->dev->mtu);
1602 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1603 ipip_options, ul_vr_id,
1604 ul_rif_id, saddr4,
1605 lb_cf.okey);
1606 break;
1607
1608 case MLXSW_SP_L3_PROTO_IPV6:
1609 saddr6 = &lb_cf.saddr.addr6;
1610 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1611 rif->rif_index, rif->vr_id, rif->dev->mtu);
1612 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1613 ipip_options, ul_vr_id,
1614 ul_rif_id, saddr6,
1615 lb_cf.okey);
1616 break;
1617 }
1618
1619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1620}
1621
1622static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1623 struct net_device *ol_dev)
1624{
1625 struct mlxsw_sp_ipip_entry *ipip_entry;
1626 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1627 int err = 0;
1628
1629 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1630 if (ipip_entry) {
1631 lb_rif = ipip_entry->ol_lb;
1632 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1633 lb_rif->ul_rif_id, true);
1634 if (err)
1635 goto out;
1636 lb_rif->common.mtu = ol_dev->mtu;
1637 }
1638
1639out:
1640 return err;
1641}
1642
1643static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1644 struct net_device *ol_dev)
1645{
1646 struct mlxsw_sp_ipip_entry *ipip_entry;
1647
1648 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1649 if (ipip_entry)
1650 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1651}
1652
1653static void
1654mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1655 struct mlxsw_sp_ipip_entry *ipip_entry)
1656{
1657 if (ipip_entry->decap_fib_entry)
1658 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1659}
1660
1661static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1662 struct net_device *ol_dev)
1663{
1664 struct mlxsw_sp_ipip_entry *ipip_entry;
1665
1666 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1667 if (ipip_entry)
1668 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1669}
1670
1671static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1672 struct mlxsw_sp_rif *old_rif,
1673 struct mlxsw_sp_rif *new_rif);
1674static int
1675mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1676 struct mlxsw_sp_ipip_entry *ipip_entry,
1677 bool keep_encap,
1678 struct netlink_ext_ack *extack)
1679{
1680 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1681 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1682
1683 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1684 ipip_entry->ipipt,
1685 ipip_entry->ol_dev,
1686 extack);
1687 if (IS_ERR(new_lb_rif))
1688 return PTR_ERR(new_lb_rif);
1689 ipip_entry->ol_lb = new_lb_rif;
1690
1691 if (keep_encap)
1692 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1693 &new_lb_rif->common);
1694
1695 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1696
1697 return 0;
1698}
1699
1700static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1701 struct mlxsw_sp_rif *rif);
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1717 struct mlxsw_sp_ipip_entry *ipip_entry,
1718 bool recreate_loopback,
1719 bool keep_encap,
1720 bool update_nexthops,
1721 struct netlink_ext_ack *extack)
1722{
1723 int err;
1724
1725
1726
1727
1728
1729
1730
1731 if (ipip_entry->decap_fib_entry)
1732 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1733
1734 if (recreate_loopback) {
1735 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1736 keep_encap, extack);
1737 if (err)
1738 return err;
1739 } else if (update_nexthops) {
1740 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1741 &ipip_entry->ol_lb->common);
1742 }
1743
1744 if (ipip_entry->ol_dev->flags & IFF_UP)
1745 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1746
1747 return 0;
1748}
1749
1750static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1751 struct net_device *ol_dev,
1752 struct netlink_ext_ack *extack)
1753{
1754 struct mlxsw_sp_ipip_entry *ipip_entry =
1755 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1756
1757 if (!ipip_entry)
1758 return 0;
1759
1760 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1761 true, false, false, extack);
1762}
1763
1764static int
1765mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1766 struct mlxsw_sp_ipip_entry *ipip_entry,
1767 struct net_device *ul_dev,
1768 bool *demote_this,
1769 struct netlink_ext_ack *extack)
1770{
1771 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1772 enum mlxsw_sp_l3proto ul_proto;
1773 union mlxsw_sp_l3addr saddr;
1774
1775
1776
1777
1778 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1779 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1780 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1781 saddr, ul_tb_id,
1782 ipip_entry)) {
1783 *demote_this = true;
1784 return 0;
1785 }
1786
1787 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1788 true, true, false, extack);
1789}
1790
1791static int
1792mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1793 struct mlxsw_sp_ipip_entry *ipip_entry,
1794 struct net_device *ul_dev)
1795{
1796 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1797 false, false, true, NULL);
1798}
1799
1800static int
1801mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1802 struct mlxsw_sp_ipip_entry *ipip_entry,
1803 struct net_device *ul_dev)
1804{
1805
1806
1807
1808
1809 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1810 false, false, true, NULL);
1811}
1812
1813static int
1814mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1815 struct net_device *ol_dev,
1816 struct netlink_ext_ack *extack)
1817{
1818 const struct mlxsw_sp_ipip_ops *ipip_ops;
1819 struct mlxsw_sp_ipip_entry *ipip_entry;
1820 int err;
1821
1822 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1823 if (!ipip_entry)
1824
1825
1826
1827
1828 return 0;
1829
1830
1831 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1832 ipip_entry->ipipt)) {
1833 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1834 return 0;
1835 }
1836
1837 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1838 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1839 return err;
1840}
1841
1842void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1843 struct mlxsw_sp_ipip_entry *ipip_entry)
1844{
1845 struct net_device *ol_dev = ipip_entry->ol_dev;
1846
1847 if (ol_dev->flags & IFF_UP)
1848 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1849 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1850}
1851
1852
1853
1854
1855
1856
1857
1858bool
1859mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1860 enum mlxsw_sp_l3proto ul_proto,
1861 union mlxsw_sp_l3addr saddr,
1862 u32 ul_tb_id,
1863 const struct mlxsw_sp_ipip_entry *except)
1864{
1865 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1866
1867 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1868 ipip_list_node) {
1869 if (ipip_entry != except &&
1870 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1871 ul_tb_id, ipip_entry)) {
1872 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1873 return true;
1874 }
1875 }
1876
1877 return false;
1878}
1879
1880static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1881 struct net_device *ul_dev)
1882{
1883 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1884
1885 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1886 ipip_list_node) {
1887 struct net_device *ol_dev = ipip_entry->ol_dev;
1888 struct net_device *ipip_ul_dev;
1889
1890 rcu_read_lock();
1891 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1892 rcu_read_unlock();
1893 if (ipip_ul_dev == ul_dev)
1894 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1895 }
1896}
1897
1898int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1899 struct net_device *ol_dev,
1900 unsigned long event,
1901 struct netdev_notifier_info *info)
1902{
1903 struct netdev_notifier_changeupper_info *chup;
1904 struct netlink_ext_ack *extack;
1905 int err = 0;
1906
1907 mutex_lock(&mlxsw_sp->router->lock);
1908 switch (event) {
1909 case NETDEV_REGISTER:
1910 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1911 break;
1912 case NETDEV_UNREGISTER:
1913 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1914 break;
1915 case NETDEV_UP:
1916 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1917 break;
1918 case NETDEV_DOWN:
1919 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1920 break;
1921 case NETDEV_CHANGEUPPER:
1922 chup = container_of(info, typeof(*chup), info);
1923 extack = info->extack;
1924 if (netif_is_l3_master(chup->upper_dev))
1925 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1926 ol_dev,
1927 extack);
1928 break;
1929 case NETDEV_CHANGE:
1930 extack = info->extack;
1931 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1932 ol_dev, extack);
1933 break;
1934 case NETDEV_CHANGEMTU:
1935 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1936 break;
1937 }
1938 mutex_unlock(&mlxsw_sp->router->lock);
1939 return err;
1940}
1941
1942static int
1943__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1944 struct mlxsw_sp_ipip_entry *ipip_entry,
1945 struct net_device *ul_dev,
1946 bool *demote_this,
1947 unsigned long event,
1948 struct netdev_notifier_info *info)
1949{
1950 struct netdev_notifier_changeupper_info *chup;
1951 struct netlink_ext_ack *extack;
1952
1953 switch (event) {
1954 case NETDEV_CHANGEUPPER:
1955 chup = container_of(info, typeof(*chup), info);
1956 extack = info->extack;
1957 if (netif_is_l3_master(chup->upper_dev))
1958 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1959 ipip_entry,
1960 ul_dev,
1961 demote_this,
1962 extack);
1963 break;
1964
1965 case NETDEV_UP:
1966 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1967 ul_dev);
1968 case NETDEV_DOWN:
1969 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1970 ipip_entry,
1971 ul_dev);
1972 }
1973 return 0;
1974}
1975
1976int
1977mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1978 struct net_device *ul_dev,
1979 unsigned long event,
1980 struct netdev_notifier_info *info)
1981{
1982 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1983 int err = 0;
1984
1985 mutex_lock(&mlxsw_sp->router->lock);
1986 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1987 ul_dev,
1988 ipip_entry))) {
1989 struct mlxsw_sp_ipip_entry *prev;
1990 bool demote_this = false;
1991
1992 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1993 ul_dev, &demote_this,
1994 event, info);
1995 if (err) {
1996 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1997 ul_dev);
1998 break;
1999 }
2000
2001 if (demote_this) {
2002 if (list_is_first(&ipip_entry->ipip_list_node,
2003 &mlxsw_sp->router->ipip_list))
2004 prev = NULL;
2005 else
2006
2007
2008
2009 prev = list_prev_entry(ipip_entry,
2010 ipip_list_node);
2011 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
2012 ipip_entry = prev;
2013 }
2014 }
2015 mutex_unlock(&mlxsw_sp->router->lock);
2016
2017 return err;
2018}
2019
2020int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2021 enum mlxsw_sp_l3proto ul_proto,
2022 const union mlxsw_sp_l3addr *ul_sip,
2023 u32 tunnel_index)
2024{
2025 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2026 struct mlxsw_sp_router *router = mlxsw_sp->router;
2027 struct mlxsw_sp_fib_entry *fib_entry;
2028 int err = 0;
2029
2030 mutex_lock(&mlxsw_sp->router->lock);
2031
2032 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2033 err = -EINVAL;
2034 goto out;
2035 }
2036
2037 router->nve_decap_config.ul_tb_id = ul_tb_id;
2038 router->nve_decap_config.tunnel_index = tunnel_index;
2039 router->nve_decap_config.ul_proto = ul_proto;
2040 router->nve_decap_config.ul_sip = *ul_sip;
2041 router->nve_decap_config.valid = true;
2042
2043
2044
2045
2046 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2047 ul_proto, ul_sip,
2048 type);
2049 if (!fib_entry)
2050 goto out;
2051
2052 fib_entry->decap.tunnel_index = tunnel_index;
2053 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2054
2055 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2056 if (err)
2057 goto err_fib_entry_update;
2058
2059 goto out;
2060
2061err_fib_entry_update:
2062 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2063 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2064out:
2065 mutex_unlock(&mlxsw_sp->router->lock);
2066 return err;
2067}
2068
2069void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2070 enum mlxsw_sp_l3proto ul_proto,
2071 const union mlxsw_sp_l3addr *ul_sip)
2072{
2073 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2074 struct mlxsw_sp_router *router = mlxsw_sp->router;
2075 struct mlxsw_sp_fib_entry *fib_entry;
2076
2077 mutex_lock(&mlxsw_sp->router->lock);
2078
2079 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2080 goto out;
2081
2082 router->nve_decap_config.valid = false;
2083
2084 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2085 ul_proto, ul_sip,
2086 type);
2087 if (!fib_entry)
2088 goto out;
2089
2090 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2091 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2092out:
2093 mutex_unlock(&mlxsw_sp->router->lock);
2094}
2095
2096static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2097 u32 ul_tb_id,
2098 enum mlxsw_sp_l3proto ul_proto,
2099 const union mlxsw_sp_l3addr *ul_sip)
2100{
2101 struct mlxsw_sp_router *router = mlxsw_sp->router;
2102
2103 return router->nve_decap_config.valid &&
2104 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2105 router->nve_decap_config.ul_proto == ul_proto &&
2106 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2107 sizeof(*ul_sip));
2108}
2109
2110struct mlxsw_sp_neigh_key {
2111 struct neighbour *n;
2112};
2113
2114struct mlxsw_sp_neigh_entry {
2115 struct list_head rif_list_node;
2116 struct rhash_head ht_node;
2117 struct mlxsw_sp_neigh_key key;
2118 u16 rif;
2119 bool connected;
2120 unsigned char ha[ETH_ALEN];
2121 struct list_head nexthop_list;
2122
2123
2124 struct list_head nexthop_neighs_list_node;
2125 unsigned int counter_index;
2126 bool counter_valid;
2127};
2128
2129static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2130 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2131 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2132 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2133};
2134
2135struct mlxsw_sp_neigh_entry *
2136mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2137 struct mlxsw_sp_neigh_entry *neigh_entry)
2138{
2139 if (!neigh_entry) {
2140 if (list_empty(&rif->neigh_list))
2141 return NULL;
2142 else
2143 return list_first_entry(&rif->neigh_list,
2144 typeof(*neigh_entry),
2145 rif_list_node);
2146 }
2147 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2148 return NULL;
2149 return list_next_entry(neigh_entry, rif_list_node);
2150}
2151
2152int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2153{
2154 return neigh_entry->key.n->tbl->family;
2155}
2156
2157unsigned char *
2158mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2159{
2160 return neigh_entry->ha;
2161}
2162
2163u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2164{
2165 struct neighbour *n;
2166
2167 n = neigh_entry->key.n;
2168 return ntohl(*((__be32 *) n->primary_key));
2169}
2170
2171struct in6_addr *
2172mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2173{
2174 struct neighbour *n;
2175
2176 n = neigh_entry->key.n;
2177 return (struct in6_addr *) &n->primary_key;
2178}
2179
2180int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2181 struct mlxsw_sp_neigh_entry *neigh_entry,
2182 u64 *p_counter)
2183{
2184 if (!neigh_entry->counter_valid)
2185 return -EINVAL;
2186
2187 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2188 p_counter, NULL);
2189}
2190
2191static struct mlxsw_sp_neigh_entry *
2192mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2193 u16 rif)
2194{
2195 struct mlxsw_sp_neigh_entry *neigh_entry;
2196
2197 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2198 if (!neigh_entry)
2199 return NULL;
2200
2201 neigh_entry->key.n = n;
2202 neigh_entry->rif = rif;
2203 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2204
2205 return neigh_entry;
2206}
2207
2208static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2209{
2210 kfree(neigh_entry);
2211}
2212
2213static int
2214mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2215 struct mlxsw_sp_neigh_entry *neigh_entry)
2216{
2217 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2218 &neigh_entry->ht_node,
2219 mlxsw_sp_neigh_ht_params);
2220}
2221
2222static void
2223mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2224 struct mlxsw_sp_neigh_entry *neigh_entry)
2225{
2226 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2227 &neigh_entry->ht_node,
2228 mlxsw_sp_neigh_ht_params);
2229}
2230
2231static bool
2232mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2233 struct mlxsw_sp_neigh_entry *neigh_entry)
2234{
2235 struct devlink *devlink;
2236 const char *table_name;
2237
2238 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2239 case AF_INET:
2240 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2241 break;
2242 case AF_INET6:
2243 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2244 break;
2245 default:
2246 WARN_ON(1);
2247 return false;
2248 }
2249
2250 devlink = priv_to_devlink(mlxsw_sp->core);
2251 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2252}
2253
2254static void
2255mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2256 struct mlxsw_sp_neigh_entry *neigh_entry)
2257{
2258 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2259 return;
2260
2261 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2262 return;
2263
2264 neigh_entry->counter_valid = true;
2265}
2266
2267static void
2268mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2269 struct mlxsw_sp_neigh_entry *neigh_entry)
2270{
2271 if (!neigh_entry->counter_valid)
2272 return;
2273 mlxsw_sp_flow_counter_free(mlxsw_sp,
2274 neigh_entry->counter_index);
2275 neigh_entry->counter_valid = false;
2276}
2277
2278static struct mlxsw_sp_neigh_entry *
2279mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2280{
2281 struct mlxsw_sp_neigh_entry *neigh_entry;
2282 struct mlxsw_sp_rif *rif;
2283 int err;
2284
2285 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2286 if (!rif)
2287 return ERR_PTR(-EINVAL);
2288
2289 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2290 if (!neigh_entry)
2291 return ERR_PTR(-ENOMEM);
2292
2293 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2294 if (err)
2295 goto err_neigh_entry_insert;
2296
2297 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2298 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2299
2300 return neigh_entry;
2301
2302err_neigh_entry_insert:
2303 mlxsw_sp_neigh_entry_free(neigh_entry);
2304 return ERR_PTR(err);
2305}
2306
2307static void
2308mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2309 struct mlxsw_sp_neigh_entry *neigh_entry)
2310{
2311 list_del(&neigh_entry->rif_list_node);
2312 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2313 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2314 mlxsw_sp_neigh_entry_free(neigh_entry);
2315}
2316
2317static struct mlxsw_sp_neigh_entry *
2318mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2319{
2320 struct mlxsw_sp_neigh_key key;
2321
2322 key.n = n;
2323 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2324 &key, mlxsw_sp_neigh_ht_params);
2325}
2326
2327static void
2328mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2329{
2330 unsigned long interval;
2331
2332#if IS_ENABLED(CONFIG_IPV6)
2333 interval = min_t(unsigned long,
2334 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2335 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2336#else
2337 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2338#endif
2339 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2340}
2341
2342static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2343 char *rauhtd_pl,
2344 int ent_index)
2345{
2346 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2347 struct net_device *dev;
2348 struct neighbour *n;
2349 __be32 dipn;
2350 u32 dip;
2351 u16 rif;
2352
2353 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2354
2355 if (WARN_ON_ONCE(rif >= max_rifs))
2356 return;
2357 if (!mlxsw_sp->router->rifs[rif]) {
2358 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2359 return;
2360 }
2361
2362 dipn = htonl(dip);
2363 dev = mlxsw_sp->router->rifs[rif]->dev;
2364 n = neigh_lookup(&arp_tbl, &dipn, dev);
2365 if (!n)
2366 return;
2367
2368 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2369 neigh_event_send(n, NULL);
2370 neigh_release(n);
2371}
2372
2373#if IS_ENABLED(CONFIG_IPV6)
2374static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2375 char *rauhtd_pl,
2376 int rec_index)
2377{
2378 struct net_device *dev;
2379 struct neighbour *n;
2380 struct in6_addr dip;
2381 u16 rif;
2382
2383 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2384 (char *) &dip);
2385
2386 if (!mlxsw_sp->router->rifs[rif]) {
2387 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2388 return;
2389 }
2390
2391 dev = mlxsw_sp->router->rifs[rif]->dev;
2392 n = neigh_lookup(&nd_tbl, &dip, dev);
2393 if (!n)
2394 return;
2395
2396 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2397 neigh_event_send(n, NULL);
2398 neigh_release(n);
2399}
2400#else
2401static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2402 char *rauhtd_pl,
2403 int rec_index)
2404{
2405}
2406#endif
2407
2408static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2409 char *rauhtd_pl,
2410 int rec_index)
2411{
2412 u8 num_entries;
2413 int i;
2414
2415 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2416 rec_index);
2417
2418 num_entries++;
2419
2420
2421 for (i = 0; i < num_entries; i++) {
2422 int ent_index;
2423
2424 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2425 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2426 ent_index);
2427 }
2428
2429}
2430
2431static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2432 char *rauhtd_pl,
2433 int rec_index)
2434{
2435
2436 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2437 rec_index);
2438}
2439
2440static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2441 char *rauhtd_pl, int rec_index)
2442{
2443 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2444 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2445 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2446 rec_index);
2447 break;
2448 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2449 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2450 rec_index);
2451 break;
2452 }
2453}
2454
2455static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2456{
2457 u8 num_rec, last_rec_index, num_entries;
2458
2459 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2460 last_rec_index = num_rec - 1;
2461
2462 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2463 return false;
2464 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2465 MLXSW_REG_RAUHTD_TYPE_IPV6)
2466 return true;
2467
2468 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2469 last_rec_index);
2470 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2471 return true;
2472 return false;
2473}
2474
2475static int
2476__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2477 char *rauhtd_pl,
2478 enum mlxsw_reg_rauhtd_type type)
2479{
2480 int i, num_rec;
2481 int err;
2482
2483
2484 mutex_lock(&mlxsw_sp->router->lock);
2485 do {
2486 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2487 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2488 rauhtd_pl);
2489 if (err) {
2490 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2491 break;
2492 }
2493 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2494 for (i = 0; i < num_rec; i++)
2495 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2496 i);
2497 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2498 mutex_unlock(&mlxsw_sp->router->lock);
2499
2500 return err;
2501}
2502
2503static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2504{
2505 enum mlxsw_reg_rauhtd_type type;
2506 char *rauhtd_pl;
2507 int err;
2508
2509 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2510 if (!rauhtd_pl)
2511 return -ENOMEM;
2512
2513 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2514 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2515 if (err)
2516 goto out;
2517
2518 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2519 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2520out:
2521 kfree(rauhtd_pl);
2522 return err;
2523}
2524
2525static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2526{
2527 struct mlxsw_sp_neigh_entry *neigh_entry;
2528
2529 mutex_lock(&mlxsw_sp->router->lock);
2530 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2531 nexthop_neighs_list_node)
2532
2533
2534
2535 neigh_event_send(neigh_entry->key.n, NULL);
2536 mutex_unlock(&mlxsw_sp->router->lock);
2537}
2538
2539static void
2540mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2541{
2542 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2543
2544 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2545 msecs_to_jiffies(interval));
2546}
2547
2548static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2549{
2550 struct mlxsw_sp_router *router;
2551 int err;
2552
2553 router = container_of(work, struct mlxsw_sp_router,
2554 neighs_update.dw.work);
2555 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2556 if (err)
2557 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2558
2559 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2560
2561 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2562}
2563
2564static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2565{
2566 struct mlxsw_sp_neigh_entry *neigh_entry;
2567 struct mlxsw_sp_router *router;
2568
2569 router = container_of(work, struct mlxsw_sp_router,
2570 nexthop_probe_dw.work);
2571
2572
2573
2574
2575
2576
2577 mutex_lock(&router->lock);
2578 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2579 nexthop_neighs_list_node)
2580 if (!neigh_entry->connected)
2581 neigh_event_send(neigh_entry->key.n, NULL);
2582 mutex_unlock(&router->lock);
2583
2584 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2585 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2586}
2587
2588static void
2589mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2590 struct mlxsw_sp_neigh_entry *neigh_entry,
2591 bool removing, bool dead);
2592
2593static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2594{
2595 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2596 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2597}
2598
2599static int
2600mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2601 struct mlxsw_sp_neigh_entry *neigh_entry,
2602 enum mlxsw_reg_rauht_op op)
2603{
2604 struct neighbour *n = neigh_entry->key.n;
2605 u32 dip = ntohl(*((__be32 *) n->primary_key));
2606 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2607
2608 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2609 dip);
2610 if (neigh_entry->counter_valid)
2611 mlxsw_reg_rauht_pack_counter(rauht_pl,
2612 neigh_entry->counter_index);
2613 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2614}
2615
2616static int
2617mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2618 struct mlxsw_sp_neigh_entry *neigh_entry,
2619 enum mlxsw_reg_rauht_op op)
2620{
2621 struct neighbour *n = neigh_entry->key.n;
2622 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2623 const char *dip = n->primary_key;
2624
2625 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2626 dip);
2627 if (neigh_entry->counter_valid)
2628 mlxsw_reg_rauht_pack_counter(rauht_pl,
2629 neigh_entry->counter_index);
2630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2631}
2632
2633bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2634{
2635 struct neighbour *n = neigh_entry->key.n;
2636
2637
2638
2639
2640
2641 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2642 IPV6_ADDR_LINKLOCAL)
2643 return true;
2644 return false;
2645}
2646
2647static void
2648mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2649 struct mlxsw_sp_neigh_entry *neigh_entry,
2650 bool adding)
2651{
2652 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2653 int err;
2654
2655 if (!adding && !neigh_entry->connected)
2656 return;
2657 neigh_entry->connected = adding;
2658 if (neigh_entry->key.n->tbl->family == AF_INET) {
2659 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2660 op);
2661 if (err)
2662 return;
2663 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2664 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2665 return;
2666 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2667 op);
2668 if (err)
2669 return;
2670 } else {
2671 WARN_ON_ONCE(1);
2672 return;
2673 }
2674
2675 if (adding)
2676 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2677 else
2678 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2679}
2680
2681void
2682mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2683 struct mlxsw_sp_neigh_entry *neigh_entry,
2684 bool adding)
2685{
2686 if (adding)
2687 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2688 else
2689 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2690 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2691}
2692
2693struct mlxsw_sp_netevent_work {
2694 struct work_struct work;
2695 struct mlxsw_sp *mlxsw_sp;
2696 struct neighbour *n;
2697};
2698
2699static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2700{
2701 struct mlxsw_sp_netevent_work *net_work =
2702 container_of(work, struct mlxsw_sp_netevent_work, work);
2703 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2704 struct mlxsw_sp_neigh_entry *neigh_entry;
2705 struct neighbour *n = net_work->n;
2706 unsigned char ha[ETH_ALEN];
2707 bool entry_connected;
2708 u8 nud_state, dead;
2709
2710
2711
2712
2713
2714 read_lock_bh(&n->lock);
2715 memcpy(ha, n->ha, ETH_ALEN);
2716 nud_state = n->nud_state;
2717 dead = n->dead;
2718 read_unlock_bh(&n->lock);
2719
2720 mutex_lock(&mlxsw_sp->router->lock);
2721 mlxsw_sp_span_respin(mlxsw_sp);
2722
2723 entry_connected = nud_state & NUD_VALID && !dead;
2724 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2725 if (!entry_connected && !neigh_entry)
2726 goto out;
2727 if (!neigh_entry) {
2728 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2729 if (IS_ERR(neigh_entry))
2730 goto out;
2731 }
2732
2733 if (neigh_entry->connected && entry_connected &&
2734 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2735 goto out;
2736
2737 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2738 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2739 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2740 dead);
2741
2742 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2743 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2744
2745out:
2746 mutex_unlock(&mlxsw_sp->router->lock);
2747 neigh_release(n);
2748 kfree(net_work);
2749}
2750
2751static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2752
2753static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2754{
2755 struct mlxsw_sp_netevent_work *net_work =
2756 container_of(work, struct mlxsw_sp_netevent_work, work);
2757 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2758
2759 mlxsw_sp_mp_hash_init(mlxsw_sp);
2760 kfree(net_work);
2761}
2762
2763static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2764
2765static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2766{
2767 struct mlxsw_sp_netevent_work *net_work =
2768 container_of(work, struct mlxsw_sp_netevent_work, work);
2769 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2770
2771 __mlxsw_sp_router_init(mlxsw_sp);
2772 kfree(net_work);
2773}
2774
2775static int mlxsw_sp_router_schedule_work(struct net *net,
2776 struct notifier_block *nb,
2777 void (*cb)(struct work_struct *))
2778{
2779 struct mlxsw_sp_netevent_work *net_work;
2780 struct mlxsw_sp_router *router;
2781
2782 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2783 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2784 return NOTIFY_DONE;
2785
2786 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2787 if (!net_work)
2788 return NOTIFY_BAD;
2789
2790 INIT_WORK(&net_work->work, cb);
2791 net_work->mlxsw_sp = router->mlxsw_sp;
2792 mlxsw_core_schedule_work(&net_work->work);
2793 return NOTIFY_DONE;
2794}
2795
2796static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2797 unsigned long event, void *ptr)
2798{
2799 struct mlxsw_sp_netevent_work *net_work;
2800 struct mlxsw_sp_port *mlxsw_sp_port;
2801 struct mlxsw_sp *mlxsw_sp;
2802 unsigned long interval;
2803 struct neigh_parms *p;
2804 struct neighbour *n;
2805
2806 switch (event) {
2807 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2808 p = ptr;
2809
2810
2811 if (!p->dev || (p->tbl->family != AF_INET &&
2812 p->tbl->family != AF_INET6))
2813 return NOTIFY_DONE;
2814
2815
2816
2817
2818 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2819 if (!mlxsw_sp_port)
2820 return NOTIFY_DONE;
2821
2822 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2823 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2824 mlxsw_sp->router->neighs_update.interval = interval;
2825
2826 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2827 break;
2828 case NETEVENT_NEIGH_UPDATE:
2829 n = ptr;
2830
2831 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2832 return NOTIFY_DONE;
2833
2834 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2835 if (!mlxsw_sp_port)
2836 return NOTIFY_DONE;
2837
2838 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2839 if (!net_work) {
2840 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2841 return NOTIFY_BAD;
2842 }
2843
2844 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2845 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2846 net_work->n = n;
2847
2848
2849
2850
2851
2852 neigh_clone(n);
2853 mlxsw_core_schedule_work(&net_work->work);
2854 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2855 break;
2856 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2857 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2858 return mlxsw_sp_router_schedule_work(ptr, nb,
2859 mlxsw_sp_router_mp_hash_event_work);
2860
2861 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2862 return mlxsw_sp_router_schedule_work(ptr, nb,
2863 mlxsw_sp_router_update_priority_work);
2864 }
2865
2866 return NOTIFY_DONE;
2867}
2868
2869static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2870{
2871 int err;
2872
2873 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2874 &mlxsw_sp_neigh_ht_params);
2875 if (err)
2876 return err;
2877
2878
2879
2880
2881 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2882
2883
2884 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2885 mlxsw_sp_router_neighs_update_work);
2886 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2887 mlxsw_sp_router_probe_unresolved_nexthops);
2888 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2889 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2890 return 0;
2891}
2892
2893static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2894{
2895 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2896 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2897 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2898}
2899
2900static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2901 struct mlxsw_sp_rif *rif)
2902{
2903 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2904
2905 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2906 rif_list_node) {
2907 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2908 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2909 }
2910}
2911
2912enum mlxsw_sp_nexthop_type {
2913 MLXSW_SP_NEXTHOP_TYPE_ETH,
2914 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2915};
2916
2917enum mlxsw_sp_nexthop_action {
2918
2919 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2920
2921 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2922
2923 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2924};
2925
2926struct mlxsw_sp_nexthop_key {
2927 struct fib_nh *fib_nh;
2928};
2929
2930struct mlxsw_sp_nexthop {
2931 struct list_head neigh_list_node;
2932 struct list_head rif_list_node;
2933 struct list_head router_list_node;
2934 struct mlxsw_sp_nexthop_group_info *nhgi;
2935
2936
2937 struct rhash_head ht_node;
2938 struct neigh_table *neigh_tbl;
2939 struct mlxsw_sp_nexthop_key key;
2940 unsigned char gw_addr[sizeof(struct in6_addr)];
2941 int ifindex;
2942 int nh_weight;
2943 int norm_nh_weight;
2944 int num_adj_entries;
2945 struct mlxsw_sp_rif *rif;
2946 u8 should_offload:1,
2947
2948
2949 offloaded:1,
2950
2951
2952 update:1;
2953
2954
2955 enum mlxsw_sp_nexthop_action action;
2956 enum mlxsw_sp_nexthop_type type;
2957 union {
2958 struct mlxsw_sp_neigh_entry *neigh_entry;
2959 struct mlxsw_sp_ipip_entry *ipip_entry;
2960 };
2961 unsigned int counter_index;
2962 bool counter_valid;
2963};
2964
2965enum mlxsw_sp_nexthop_group_type {
2966 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2967 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2968 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2969};
2970
2971struct mlxsw_sp_nexthop_group_info {
2972 struct mlxsw_sp_nexthop_group *nh_grp;
2973 u32 adj_index;
2974 u16 ecmp_size;
2975 u16 count;
2976 int sum_norm_weight;
2977 u8 adj_index_valid:1,
2978 gateway:1,
2979 is_resilient:1;
2980 struct list_head list;
2981 struct mlxsw_sp_nexthop nexthops[0];
2982#define nh_rif nexthops[0].rif
2983};
2984
2985struct mlxsw_sp_nexthop_group_vr_key {
2986 u16 vr_id;
2987 enum mlxsw_sp_l3proto proto;
2988};
2989
2990struct mlxsw_sp_nexthop_group_vr_entry {
2991 struct list_head list;
2992 struct rhash_head ht_node;
2993 refcount_t ref_count;
2994 struct mlxsw_sp_nexthop_group_vr_key key;
2995};
2996
2997struct mlxsw_sp_nexthop_group {
2998 struct rhash_head ht_node;
2999 struct list_head fib_list;
3000 union {
3001 struct {
3002 struct fib_info *fi;
3003 } ipv4;
3004 struct {
3005 u32 id;
3006 } obj;
3007 };
3008 struct mlxsw_sp_nexthop_group_info *nhgi;
3009 struct list_head vr_list;
3010 struct rhashtable vr_ht;
3011 enum mlxsw_sp_nexthop_group_type type;
3012 bool can_destroy;
3013};
3014
3015void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
3016 struct mlxsw_sp_nexthop *nh)
3017{
3018 struct devlink *devlink;
3019
3020 devlink = priv_to_devlink(mlxsw_sp->core);
3021 if (!devlink_dpipe_table_counter_enabled(devlink,
3022 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
3023 return;
3024
3025 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3026 return;
3027
3028 nh->counter_valid = true;
3029}
3030
3031void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3032 struct mlxsw_sp_nexthop *nh)
3033{
3034 if (!nh->counter_valid)
3035 return;
3036 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3037 nh->counter_valid = false;
3038}
3039
3040int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3041 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3042{
3043 if (!nh->counter_valid)
3044 return -EINVAL;
3045
3046 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3047 p_counter, NULL);
3048}
3049
3050struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3051 struct mlxsw_sp_nexthop *nh)
3052{
3053 if (!nh) {
3054 if (list_empty(&router->nexthop_list))
3055 return NULL;
3056 else
3057 return list_first_entry(&router->nexthop_list,
3058 typeof(*nh), router_list_node);
3059 }
3060 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3061 return NULL;
3062 return list_next_entry(nh, router_list_node);
3063}
3064
3065bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3066{
3067 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3068}
3069
3070unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3071{
3072 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3073 !mlxsw_sp_nexthop_is_forward(nh))
3074 return NULL;
3075 return nh->neigh_entry->ha;
3076}
3077
3078int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3079 u32 *p_adj_size, u32 *p_adj_hash_index)
3080{
3081 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3082 u32 adj_hash_index = 0;
3083 int i;
3084
3085 if (!nh->offloaded || !nhgi->adj_index_valid)
3086 return -EINVAL;
3087
3088 *p_adj_index = nhgi->adj_index;
3089 *p_adj_size = nhgi->ecmp_size;
3090
3091 for (i = 0; i < nhgi->count; i++) {
3092 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3093
3094 if (nh_iter == nh)
3095 break;
3096 if (nh_iter->offloaded)
3097 adj_hash_index += nh_iter->num_adj_entries;
3098 }
3099
3100 *p_adj_hash_index = adj_hash_index;
3101 return 0;
3102}
3103
3104struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3105{
3106 return nh->rif;
3107}
3108
3109bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3110{
3111 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3112 int i;
3113
3114 for (i = 0; i < nhgi->count; i++) {
3115 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3116
3117 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3118 return true;
3119 }
3120 return false;
3121}
3122
3123static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3124 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3125 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3126 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3127 .automatic_shrinking = true,
3128};
3129
3130static struct mlxsw_sp_nexthop_group_vr_entry *
3131mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3132 const struct mlxsw_sp_fib *fib)
3133{
3134 struct mlxsw_sp_nexthop_group_vr_key key;
3135
3136 memset(&key, 0, sizeof(key));
3137 key.vr_id = fib->vr->id;
3138 key.proto = fib->proto;
3139 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3140 mlxsw_sp_nexthop_group_vr_ht_params);
3141}
3142
3143static int
3144mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3145 const struct mlxsw_sp_fib *fib)
3146{
3147 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3148 int err;
3149
3150 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3151 if (!vr_entry)
3152 return -ENOMEM;
3153
3154 vr_entry->key.vr_id = fib->vr->id;
3155 vr_entry->key.proto = fib->proto;
3156 refcount_set(&vr_entry->ref_count, 1);
3157
3158 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3159 mlxsw_sp_nexthop_group_vr_ht_params);
3160 if (err)
3161 goto err_hashtable_insert;
3162
3163 list_add(&vr_entry->list, &nh_grp->vr_list);
3164
3165 return 0;
3166
3167err_hashtable_insert:
3168 kfree(vr_entry);
3169 return err;
3170}
3171
3172static void
3173mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3174 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3175{
3176 list_del(&vr_entry->list);
3177 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3178 mlxsw_sp_nexthop_group_vr_ht_params);
3179 kfree(vr_entry);
3180}
3181
3182static int
3183mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3184 const struct mlxsw_sp_fib *fib)
3185{
3186 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3187
3188 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3189 if (vr_entry) {
3190 refcount_inc(&vr_entry->ref_count);
3191 return 0;
3192 }
3193
3194 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3195}
3196
3197static void
3198mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3199 const struct mlxsw_sp_fib *fib)
3200{
3201 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3202
3203 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3204 if (WARN_ON_ONCE(!vr_entry))
3205 return;
3206
3207 if (!refcount_dec_and_test(&vr_entry->ref_count))
3208 return;
3209
3210 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3211}
3212
3213struct mlxsw_sp_nexthop_group_cmp_arg {
3214 enum mlxsw_sp_nexthop_group_type type;
3215 union {
3216 struct fib_info *fi;
3217 struct mlxsw_sp_fib6_entry *fib6_entry;
3218 u32 id;
3219 };
3220};
3221
3222static bool
3223mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3224 const struct in6_addr *gw, int ifindex,
3225 int weight)
3226{
3227 int i;
3228
3229 for (i = 0; i < nh_grp->nhgi->count; i++) {
3230 const struct mlxsw_sp_nexthop *nh;
3231
3232 nh = &nh_grp->nhgi->nexthops[i];
3233 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3234 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3235 return true;
3236 }
3237
3238 return false;
3239}
3240
3241static bool
3242mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3243 const struct mlxsw_sp_fib6_entry *fib6_entry)
3244{
3245 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3246
3247 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3248 return false;
3249
3250 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3251 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3252 struct in6_addr *gw;
3253 int ifindex, weight;
3254
3255 ifindex = fib6_nh->fib_nh_dev->ifindex;
3256 weight = fib6_nh->fib_nh_weight;
3257 gw = &fib6_nh->fib_nh_gw6;
3258 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3259 weight))
3260 return false;
3261 }
3262
3263 return true;
3264}
3265
3266static int
3267mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3268{
3269 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3270 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3271
3272 if (nh_grp->type != cmp_arg->type)
3273 return 1;
3274
3275 switch (cmp_arg->type) {
3276 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3277 return cmp_arg->fi != nh_grp->ipv4.fi;
3278 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3279 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3280 cmp_arg->fib6_entry);
3281 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3282 return cmp_arg->id != nh_grp->obj.id;
3283 default:
3284 WARN_ON(1);
3285 return 1;
3286 }
3287}
3288
3289static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3290{
3291 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3292 const struct mlxsw_sp_nexthop *nh;
3293 struct fib_info *fi;
3294 unsigned int val;
3295 int i;
3296
3297 switch (nh_grp->type) {
3298 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3299 fi = nh_grp->ipv4.fi;
3300 return jhash(&fi, sizeof(fi), seed);
3301 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3302 val = nh_grp->nhgi->count;
3303 for (i = 0; i < nh_grp->nhgi->count; i++) {
3304 nh = &nh_grp->nhgi->nexthops[i];
3305 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3306 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3307 }
3308 return jhash(&val, sizeof(val), seed);
3309 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3310 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3311 default:
3312 WARN_ON(1);
3313 return 0;
3314 }
3315}
3316
3317static u32
3318mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3319{
3320 unsigned int val = fib6_entry->nrt6;
3321 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3322
3323 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3324 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3325 struct net_device *dev = fib6_nh->fib_nh_dev;
3326 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3327
3328 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3329 val ^= jhash(gw, sizeof(*gw), seed);
3330 }
3331
3332 return jhash(&val, sizeof(val), seed);
3333}
3334
3335static u32
3336mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3337{
3338 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3339
3340 switch (cmp_arg->type) {
3341 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3342 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3343 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3344 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3345 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3346 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3347 default:
3348 WARN_ON(1);
3349 return 0;
3350 }
3351}
3352
3353static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3354 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3355 .hashfn = mlxsw_sp_nexthop_group_hash,
3356 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3357 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3358};
3359
3360static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3361 struct mlxsw_sp_nexthop_group *nh_grp)
3362{
3363 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3364 !nh_grp->nhgi->gateway)
3365 return 0;
3366
3367 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3368 &nh_grp->ht_node,
3369 mlxsw_sp_nexthop_group_ht_params);
3370}
3371
3372static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3373 struct mlxsw_sp_nexthop_group *nh_grp)
3374{
3375 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3376 !nh_grp->nhgi->gateway)
3377 return;
3378
3379 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3380 &nh_grp->ht_node,
3381 mlxsw_sp_nexthop_group_ht_params);
3382}
3383
3384static struct mlxsw_sp_nexthop_group *
3385mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3386 struct fib_info *fi)
3387{
3388 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3389
3390 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3391 cmp_arg.fi = fi;
3392 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3393 &cmp_arg,
3394 mlxsw_sp_nexthop_group_ht_params);
3395}
3396
3397static struct mlxsw_sp_nexthop_group *
3398mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3399 struct mlxsw_sp_fib6_entry *fib6_entry)
3400{
3401 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3402
3403 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3404 cmp_arg.fib6_entry = fib6_entry;
3405 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3406 &cmp_arg,
3407 mlxsw_sp_nexthop_group_ht_params);
3408}
3409
3410static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3411 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3412 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3413 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3414};
3415
3416static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3417 struct mlxsw_sp_nexthop *nh)
3418{
3419 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3420 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3421}
3422
3423static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3424 struct mlxsw_sp_nexthop *nh)
3425{
3426 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3427 mlxsw_sp_nexthop_ht_params);
3428}
3429
3430static struct mlxsw_sp_nexthop *
3431mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3432 struct mlxsw_sp_nexthop_key key)
3433{
3434 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3435 mlxsw_sp_nexthop_ht_params);
3436}
3437
3438static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3439 enum mlxsw_sp_l3proto proto,
3440 u16 vr_id,
3441 u32 adj_index, u16 ecmp_size,
3442 u32 new_adj_index,
3443 u16 new_ecmp_size)
3444{
3445 char raleu_pl[MLXSW_REG_RALEU_LEN];
3446
3447 mlxsw_reg_raleu_pack(raleu_pl,
3448 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3449 adj_index, ecmp_size, new_adj_index,
3450 new_ecmp_size);
3451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3452}
3453
3454static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3455 struct mlxsw_sp_nexthop_group *nh_grp,
3456 u32 old_adj_index, u16 old_ecmp_size)
3457{
3458 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3459 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3460 int err;
3461
3462 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3463 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3464 vr_entry->key.proto,
3465 vr_entry->key.vr_id,
3466 old_adj_index,
3467 old_ecmp_size,
3468 nhgi->adj_index,
3469 nhgi->ecmp_size);
3470 if (err)
3471 goto err_mass_update_vr;
3472 }
3473 return 0;
3474
3475err_mass_update_vr:
3476 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3477 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3478 vr_entry->key.vr_id,
3479 nhgi->adj_index,
3480 nhgi->ecmp_size,
3481 old_adj_index, old_ecmp_size);
3482 return err;
3483}
3484
3485static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3486 u32 adj_index,
3487 struct mlxsw_sp_nexthop *nh,
3488 bool force, char *ratr_pl)
3489{
3490 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3491 enum mlxsw_reg_ratr_op op;
3492 u16 rif_index;
3493
3494 rif_index = nh->rif ? nh->rif->rif_index :
3495 mlxsw_sp->router->lb_rif_index;
3496 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3497 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3498 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3499 adj_index, rif_index);
3500 switch (nh->action) {
3501 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3502 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3503 break;
3504 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3505 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3506 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3507 break;
3508 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3509 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3510 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3511 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3512 break;
3513 default:
3514 WARN_ON_ONCE(1);
3515 return -EINVAL;
3516 }
3517 if (nh->counter_valid)
3518 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3519 else
3520 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3521
3522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3523}
3524
3525int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3526 struct mlxsw_sp_nexthop *nh, bool force,
3527 char *ratr_pl)
3528{
3529 int i;
3530
3531 for (i = 0; i < nh->num_adj_entries; i++) {
3532 int err;
3533
3534 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3535 nh, force, ratr_pl);
3536 if (err)
3537 return err;
3538 }
3539
3540 return 0;
3541}
3542
3543static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3544 u32 adj_index,
3545 struct mlxsw_sp_nexthop *nh,
3546 bool force, char *ratr_pl)
3547{
3548 const struct mlxsw_sp_ipip_ops *ipip_ops;
3549
3550 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3551 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3552 force, ratr_pl);
3553}
3554
3555static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3556 u32 adj_index,
3557 struct mlxsw_sp_nexthop *nh, bool force,
3558 char *ratr_pl)
3559{
3560 int i;
3561
3562 for (i = 0; i < nh->num_adj_entries; i++) {
3563 int err;
3564
3565 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3566 nh, force, ratr_pl);
3567 if (err)
3568 return err;
3569 }
3570
3571 return 0;
3572}
3573
3574static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3575 struct mlxsw_sp_nexthop *nh, bool force,
3576 char *ratr_pl)
3577{
3578
3579
3580
3581 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3582 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3583 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3584 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3585 force, ratr_pl);
3586 else
3587 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3588 force, ratr_pl);
3589}
3590
3591static int
3592mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3593 struct mlxsw_sp_nexthop_group_info *nhgi,
3594 bool reallocate)
3595{
3596 char ratr_pl[MLXSW_REG_RATR_LEN];
3597 u32 adj_index = nhgi->adj_index;
3598 struct mlxsw_sp_nexthop *nh;
3599 int i;
3600
3601 for (i = 0; i < nhgi->count; i++) {
3602 nh = &nhgi->nexthops[i];
3603
3604 if (!nh->should_offload) {
3605 nh->offloaded = 0;
3606 continue;
3607 }
3608
3609 if (nh->update || reallocate) {
3610 int err = 0;
3611
3612 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3613 true, ratr_pl);
3614 if (err)
3615 return err;
3616 nh->update = 0;
3617 nh->offloaded = 1;
3618 }
3619 adj_index += nh->num_adj_entries;
3620 }
3621 return 0;
3622}
3623
3624static int
3625mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3626 struct mlxsw_sp_nexthop_group *nh_grp)
3627{
3628 struct mlxsw_sp_fib_entry *fib_entry;
3629 int err;
3630
3631 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3632 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3633 if (err)
3634 return err;
3635 }
3636 return 0;
3637}
3638
3639struct mlxsw_sp_adj_grp_size_range {
3640 u16 start;
3641 u16 end;
3642};
3643
3644
3645static const struct mlxsw_sp_adj_grp_size_range
3646mlxsw_sp1_adj_grp_size_ranges[] = {
3647 { .start = 1, .end = 64 },
3648 { .start = 512, .end = 512 },
3649 { .start = 1024, .end = 1024 },
3650 { .start = 2048, .end = 2048 },
3651 { .start = 4096, .end = 4096 },
3652};
3653
3654
3655static const struct mlxsw_sp_adj_grp_size_range
3656mlxsw_sp2_adj_grp_size_ranges[] = {
3657 { .start = 1, .end = 128 },
3658 { .start = 256, .end = 256 },
3659 { .start = 512, .end = 512 },
3660 { .start = 1024, .end = 1024 },
3661 { .start = 2048, .end = 2048 },
3662 { .start = 4096, .end = 4096 },
3663};
3664
3665static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3666 u16 *p_adj_grp_size)
3667{
3668 int i;
3669
3670 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3671 const struct mlxsw_sp_adj_grp_size_range *size_range;
3672
3673 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3674
3675 if (*p_adj_grp_size >= size_range->start &&
3676 *p_adj_grp_size <= size_range->end)
3677 return;
3678
3679 if (*p_adj_grp_size <= size_range->end) {
3680 *p_adj_grp_size = size_range->end;
3681 return;
3682 }
3683 }
3684}
3685
3686static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3687 u16 *p_adj_grp_size,
3688 unsigned int alloc_size)
3689{
3690 int i;
3691
3692 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3693 const struct mlxsw_sp_adj_grp_size_range *size_range;
3694
3695 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3696
3697 if (alloc_size >= size_range->end) {
3698 *p_adj_grp_size = size_range->end;
3699 return;
3700 }
3701 }
3702}
3703
3704static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3705 u16 *p_adj_grp_size)
3706{
3707 unsigned int alloc_size;
3708 int err;
3709
3710
3711
3712
3713 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3714 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3715 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3716 *p_adj_grp_size, &alloc_size);
3717 if (err)
3718 return err;
3719
3720
3721
3722
3723 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3724
3725 return 0;
3726}
3727
3728static void
3729mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3730{
3731 int i, g = 0, sum_norm_weight = 0;
3732 struct mlxsw_sp_nexthop *nh;
3733
3734 for (i = 0; i < nhgi->count; i++) {
3735 nh = &nhgi->nexthops[i];
3736
3737 if (!nh->should_offload)
3738 continue;
3739 if (g > 0)
3740 g = gcd(nh->nh_weight, g);
3741 else
3742 g = nh->nh_weight;
3743 }
3744
3745 for (i = 0; i < nhgi->count; i++) {
3746 nh = &nhgi->nexthops[i];
3747
3748 if (!nh->should_offload)
3749 continue;
3750 nh->norm_nh_weight = nh->nh_weight / g;
3751 sum_norm_weight += nh->norm_nh_weight;
3752 }
3753
3754 nhgi->sum_norm_weight = sum_norm_weight;
3755}
3756
3757static void
3758mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3759{
3760 int i, weight = 0, lower_bound = 0;
3761 int total = nhgi->sum_norm_weight;
3762 u16 ecmp_size = nhgi->ecmp_size;
3763
3764 for (i = 0; i < nhgi->count; i++) {
3765 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3766 int upper_bound;
3767
3768 if (!nh->should_offload)
3769 continue;
3770 weight += nh->norm_nh_weight;
3771 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3772 nh->num_adj_entries = upper_bound - lower_bound;
3773 lower_bound = upper_bound;
3774 }
3775}
3776
3777static struct mlxsw_sp_nexthop *
3778mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3779 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3780
3781static void
3782mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3783 struct mlxsw_sp_nexthop_group *nh_grp)
3784{
3785 int i;
3786
3787 for (i = 0; i < nh_grp->nhgi->count; i++) {
3788 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3789
3790 if (nh->offloaded)
3791 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3792 else
3793 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3794 }
3795}
3796
3797static void
3798__mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3799 struct mlxsw_sp_fib6_entry *fib6_entry)
3800{
3801 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3802
3803 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3804 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3805 struct mlxsw_sp_nexthop *nh;
3806
3807 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3808 if (nh && nh->offloaded)
3809 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3810 else
3811 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3812 }
3813}
3814
3815static void
3816mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3817 struct mlxsw_sp_nexthop_group *nh_grp)
3818{
3819 struct mlxsw_sp_fib6_entry *fib6_entry;
3820
3821
3822
3823
3824
3825 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3826 common.nexthop_group_node)
3827 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3828}
3829
3830static void
3831mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3832 const struct mlxsw_sp_nexthop *nh,
3833 u16 bucket_index)
3834{
3835 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3836 bool offload = false, trap = false;
3837
3838 if (nh->offloaded) {
3839 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3840 trap = true;
3841 else
3842 offload = true;
3843 }
3844 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3845 bucket_index, offload, trap);
3846}
3847
3848static void
3849mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3850 struct mlxsw_sp_nexthop_group *nh_grp)
3851{
3852 int i;
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862 if (nh_grp->can_destroy)
3863 return;
3864
3865 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3866 nh_grp->nhgi->adj_index_valid, false);
3867
3868
3869
3870
3871 if (!nh_grp->nhgi->is_resilient)
3872 return;
3873
3874 for (i = 0; i < nh_grp->nhgi->count; i++) {
3875 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3876
3877 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3878 }
3879}
3880
3881static void
3882mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3883 struct mlxsw_sp_nexthop_group *nh_grp)
3884{
3885 switch (nh_grp->type) {
3886 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3887 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3888 break;
3889 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3890 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3891 break;
3892 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3893 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3894 break;
3895 }
3896}
3897
3898static int
3899mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3900 struct mlxsw_sp_nexthop_group *nh_grp)
3901{
3902 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3903 u16 ecmp_size, old_ecmp_size;
3904 struct mlxsw_sp_nexthop *nh;
3905 bool offload_change = false;
3906 u32 adj_index;
3907 bool old_adj_index_valid;
3908 u32 old_adj_index;
3909 int i, err2, err;
3910
3911 if (!nhgi->gateway)
3912 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3913
3914 for (i = 0; i < nhgi->count; i++) {
3915 nh = &nhgi->nexthops[i];
3916
3917 if (nh->should_offload != nh->offloaded) {
3918 offload_change = true;
3919 if (nh->should_offload)
3920 nh->update = 1;
3921 }
3922 }
3923 if (!offload_change) {
3924
3925
3926
3927 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3928 if (err) {
3929 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3930 goto set_trap;
3931 }
3932
3933
3934
3935 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3936 return 0;
3937 }
3938 mlxsw_sp_nexthop_group_normalize(nhgi);
3939 if (!nhgi->sum_norm_weight) {
3940
3941
3942
3943 err = 0;
3944 goto set_trap;
3945 }
3946
3947 ecmp_size = nhgi->sum_norm_weight;
3948 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3949 if (err)
3950
3951 goto set_trap;
3952
3953 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3954 ecmp_size, &adj_index);
3955 if (err) {
3956
3957
3958
3959 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3960 goto set_trap;
3961 }
3962 old_adj_index_valid = nhgi->adj_index_valid;
3963 old_adj_index = nhgi->adj_index;
3964 old_ecmp_size = nhgi->ecmp_size;
3965 nhgi->adj_index_valid = 1;
3966 nhgi->adj_index = adj_index;
3967 nhgi->ecmp_size = ecmp_size;
3968 mlxsw_sp_nexthop_group_rebalance(nhgi);
3969 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3970 if (err) {
3971 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3972 goto set_trap;
3973 }
3974
3975 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3976
3977 if (!old_adj_index_valid) {
3978
3979
3980
3981 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3982 if (err) {
3983 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3984 goto set_trap;
3985 }
3986 return 0;
3987 }
3988
3989 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3990 old_adj_index, old_ecmp_size);
3991 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3992 old_ecmp_size, old_adj_index);
3993 if (err) {
3994 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3995 goto set_trap;
3996 }
3997
3998 return 0;
3999
4000set_trap:
4001 old_adj_index_valid = nhgi->adj_index_valid;
4002 nhgi->adj_index_valid = 0;
4003 for (i = 0; i < nhgi->count; i++) {
4004 nh = &nhgi->nexthops[i];
4005 nh->offloaded = 0;
4006 }
4007 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
4008 if (err2)
4009 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
4010 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
4011 if (old_adj_index_valid)
4012 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4013 nhgi->ecmp_size, nhgi->adj_index);
4014 return err;
4015}
4016
4017static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
4018 bool removing)
4019{
4020 if (!removing) {
4021 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
4022 nh->should_offload = 1;
4023 } else if (nh->nhgi->is_resilient) {
4024 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4025 nh->should_offload = 1;
4026 } else {
4027 nh->should_offload = 0;
4028 }
4029 nh->update = 1;
4030}
4031
4032static int
4033mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4034 struct mlxsw_sp_neigh_entry *neigh_entry)
4035{
4036 struct neighbour *n, *old_n = neigh_entry->key.n;
4037 struct mlxsw_sp_nexthop *nh;
4038 bool entry_connected;
4039 u8 nud_state, dead;
4040 int err;
4041
4042 nh = list_first_entry(&neigh_entry->nexthop_list,
4043 struct mlxsw_sp_nexthop, neigh_list_node);
4044
4045 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4046 if (!n) {
4047 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4048 if (IS_ERR(n))
4049 return PTR_ERR(n);
4050 neigh_event_send(n, NULL);
4051 }
4052
4053 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4054 neigh_entry->key.n = n;
4055 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4056 if (err)
4057 goto err_neigh_entry_insert;
4058
4059 read_lock_bh(&n->lock);
4060 nud_state = n->nud_state;
4061 dead = n->dead;
4062 read_unlock_bh(&n->lock);
4063 entry_connected = nud_state & NUD_VALID && !dead;
4064
4065 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4066 neigh_list_node) {
4067 neigh_release(old_n);
4068 neigh_clone(n);
4069 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4070 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4071 }
4072
4073 neigh_release(n);
4074
4075 return 0;
4076
4077err_neigh_entry_insert:
4078 neigh_entry->key.n = old_n;
4079 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4080 neigh_release(n);
4081 return err;
4082}
4083
4084static void
4085mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4086 struct mlxsw_sp_neigh_entry *neigh_entry,
4087 bool removing, bool dead)
4088{
4089 struct mlxsw_sp_nexthop *nh;
4090
4091 if (list_empty(&neigh_entry->nexthop_list))
4092 return;
4093
4094 if (dead) {
4095 int err;
4096
4097 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4098 neigh_entry);
4099 if (err)
4100 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4101 return;
4102 }
4103
4104 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4105 neigh_list_node) {
4106 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4107 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4108 }
4109}
4110
4111static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4112 struct mlxsw_sp_rif *rif)
4113{
4114 if (nh->rif)
4115 return;
4116
4117 nh->rif = rif;
4118 list_add(&nh->rif_list_node, &rif->nexthop_list);
4119}
4120
4121static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4122{
4123 if (!nh->rif)
4124 return;
4125
4126 list_del(&nh->rif_list_node);
4127 nh->rif = NULL;
4128}
4129
4130static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4131 struct mlxsw_sp_nexthop *nh)
4132{
4133 struct mlxsw_sp_neigh_entry *neigh_entry;
4134 struct neighbour *n;
4135 u8 nud_state, dead;
4136 int err;
4137
4138 if (!nh->nhgi->gateway || nh->neigh_entry)
4139 return 0;
4140
4141
4142
4143
4144
4145
4146 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4147 if (!n) {
4148 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4149 if (IS_ERR(n))
4150 return PTR_ERR(n);
4151 neigh_event_send(n, NULL);
4152 }
4153 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4154 if (!neigh_entry) {
4155 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4156 if (IS_ERR(neigh_entry)) {
4157 err = -EINVAL;
4158 goto err_neigh_entry_create;
4159 }
4160 }
4161
4162
4163
4164
4165 if (list_empty(&neigh_entry->nexthop_list))
4166 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4167 &mlxsw_sp->router->nexthop_neighs_list);
4168
4169 nh->neigh_entry = neigh_entry;
4170 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4171 read_lock_bh(&n->lock);
4172 nud_state = n->nud_state;
4173 dead = n->dead;
4174 read_unlock_bh(&n->lock);
4175 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4176
4177 return 0;
4178
4179err_neigh_entry_create:
4180 neigh_release(n);
4181 return err;
4182}
4183
4184static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4185 struct mlxsw_sp_nexthop *nh)
4186{
4187 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4188 struct neighbour *n;
4189
4190 if (!neigh_entry)
4191 return;
4192 n = neigh_entry->key.n;
4193
4194 __mlxsw_sp_nexthop_neigh_update(nh, true);
4195 list_del(&nh->neigh_list_node);
4196 nh->neigh_entry = NULL;
4197
4198
4199
4200
4201 if (list_empty(&neigh_entry->nexthop_list))
4202 list_del(&neigh_entry->nexthop_neighs_list_node);
4203
4204 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4205 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4206
4207 neigh_release(n);
4208}
4209
4210static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4211{
4212 struct net_device *ul_dev;
4213 bool is_up;
4214
4215 rcu_read_lock();
4216 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4217 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4218 rcu_read_unlock();
4219
4220 return is_up;
4221}
4222
4223static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4224 struct mlxsw_sp_nexthop *nh,
4225 struct mlxsw_sp_ipip_entry *ipip_entry)
4226{
4227 bool removing;
4228
4229 if (!nh->nhgi->gateway || nh->ipip_entry)
4230 return;
4231
4232 nh->ipip_entry = ipip_entry;
4233 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4234 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4235 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4236}
4237
4238static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4239 struct mlxsw_sp_nexthop *nh)
4240{
4241 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4242
4243 if (!ipip_entry)
4244 return;
4245
4246 __mlxsw_sp_nexthop_neigh_update(nh, true);
4247 nh->ipip_entry = NULL;
4248}
4249
4250static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4251 const struct fib_nh *fib_nh,
4252 enum mlxsw_sp_ipip_type *p_ipipt)
4253{
4254 struct net_device *dev = fib_nh->fib_nh_dev;
4255
4256 return dev &&
4257 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4258 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4259}
4260
4261static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4262 struct mlxsw_sp_nexthop *nh,
4263 const struct net_device *dev)
4264{
4265 const struct mlxsw_sp_ipip_ops *ipip_ops;
4266 struct mlxsw_sp_ipip_entry *ipip_entry;
4267 struct mlxsw_sp_rif *rif;
4268 int err;
4269
4270 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4271 if (ipip_entry) {
4272 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4273 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4274 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4275 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4276 return 0;
4277 }
4278 }
4279
4280 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4281 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4282 if (!rif)
4283 return 0;
4284
4285 mlxsw_sp_nexthop_rif_init(nh, rif);
4286 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4287 if (err)
4288 goto err_neigh_init;
4289
4290 return 0;
4291
4292err_neigh_init:
4293 mlxsw_sp_nexthop_rif_fini(nh);
4294 return err;
4295}
4296
4297static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4298 struct mlxsw_sp_nexthop *nh)
4299{
4300 switch (nh->type) {
4301 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4302 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4303 mlxsw_sp_nexthop_rif_fini(nh);
4304 break;
4305 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4306 mlxsw_sp_nexthop_rif_fini(nh);
4307 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4308 break;
4309 }
4310}
4311
4312static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4313 struct mlxsw_sp_nexthop_group *nh_grp,
4314 struct mlxsw_sp_nexthop *nh,
4315 struct fib_nh *fib_nh)
4316{
4317 struct net_device *dev = fib_nh->fib_nh_dev;
4318 struct in_device *in_dev;
4319 int err;
4320
4321 nh->nhgi = nh_grp->nhgi;
4322 nh->key.fib_nh = fib_nh;
4323#ifdef CONFIG_IP_ROUTE_MULTIPATH
4324 nh->nh_weight = fib_nh->fib_nh_weight;
4325#else
4326 nh->nh_weight = 1;
4327#endif
4328 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4329 nh->neigh_tbl = &arp_tbl;
4330 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4331 if (err)
4332 return err;
4333
4334 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4335 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4336
4337 if (!dev)
4338 return 0;
4339 nh->ifindex = dev->ifindex;
4340
4341 rcu_read_lock();
4342 in_dev = __in_dev_get_rcu(dev);
4343 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4344 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4345 rcu_read_unlock();
4346 return 0;
4347 }
4348 rcu_read_unlock();
4349
4350 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4351 if (err)
4352 goto err_nexthop_neigh_init;
4353
4354 return 0;
4355
4356err_nexthop_neigh_init:
4357 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4358 return err;
4359}
4360
4361static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4362 struct mlxsw_sp_nexthop *nh)
4363{
4364 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4365 list_del(&nh->router_list_node);
4366 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4367 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4368}
4369
4370static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4371 unsigned long event, struct fib_nh *fib_nh)
4372{
4373 struct mlxsw_sp_nexthop_key key;
4374 struct mlxsw_sp_nexthop *nh;
4375
4376 key.fib_nh = fib_nh;
4377 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4378 if (!nh)
4379 return;
4380
4381 switch (event) {
4382 case FIB_EVENT_NH_ADD:
4383 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4384 break;
4385 case FIB_EVENT_NH_DEL:
4386 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4387 break;
4388 }
4389
4390 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4391}
4392
4393static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4394 struct mlxsw_sp_rif *rif)
4395{
4396 struct mlxsw_sp_nexthop *nh;
4397 bool removing;
4398
4399 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4400 switch (nh->type) {
4401 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4402 removing = false;
4403 break;
4404 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4405 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4406 break;
4407 default:
4408 WARN_ON(1);
4409 continue;
4410 }
4411
4412 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4413 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4414 }
4415}
4416
4417static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4418 struct mlxsw_sp_rif *old_rif,
4419 struct mlxsw_sp_rif *new_rif)
4420{
4421 struct mlxsw_sp_nexthop *nh;
4422
4423 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4424 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4425 nh->rif = new_rif;
4426 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4427}
4428
4429static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4430 struct mlxsw_sp_rif *rif)
4431{
4432 struct mlxsw_sp_nexthop *nh, *tmp;
4433
4434 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4435 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4436 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4437 }
4438}
4439
4440static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4441{
4442 enum mlxsw_reg_ratr_trap_action trap_action;
4443 char ratr_pl[MLXSW_REG_RATR_LEN];
4444 int err;
4445
4446 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4447 &mlxsw_sp->router->adj_trap_index);
4448 if (err)
4449 return err;
4450
4451 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4452 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4453 MLXSW_REG_RATR_TYPE_ETHERNET,
4454 mlxsw_sp->router->adj_trap_index,
4455 mlxsw_sp->router->lb_rif_index);
4456 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4457 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4458 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4459 if (err)
4460 goto err_ratr_write;
4461
4462 return 0;
4463
4464err_ratr_write:
4465 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4466 mlxsw_sp->router->adj_trap_index);
4467 return err;
4468}
4469
4470static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4471{
4472 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4473 mlxsw_sp->router->adj_trap_index);
4474}
4475
4476static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4477{
4478 int err;
4479
4480 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4481 return 0;
4482
4483 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4484 if (err)
4485 return err;
4486
4487 refcount_set(&mlxsw_sp->router->num_groups, 1);
4488
4489 return 0;
4490}
4491
4492static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4493{
4494 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4495 return;
4496
4497 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4498}
4499
4500static void
4501mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4502 const struct mlxsw_sp_nexthop_group *nh_grp,
4503 unsigned long *activity)
4504{
4505 char *ratrad_pl;
4506 int i, err;
4507
4508 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4509 if (!ratrad_pl)
4510 return;
4511
4512 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4513 nh_grp->nhgi->count);
4514 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4515 if (err)
4516 goto out;
4517
4518 for (i = 0; i < nh_grp->nhgi->count; i++) {
4519 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4520 continue;
4521 bitmap_set(activity, i, 1);
4522 }
4523
4524out:
4525 kfree(ratrad_pl);
4526}
4527
4528#define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000
4529
4530static void
4531mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4532 const struct mlxsw_sp_nexthop_group *nh_grp)
4533{
4534 unsigned long *activity;
4535
4536 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4537 if (!activity)
4538 return;
4539
4540 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4541 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4542 nh_grp->nhgi->count, activity);
4543
4544 bitmap_free(activity);
4545}
4546
4547static void
4548mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4549{
4550 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4551
4552 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4553 msecs_to_jiffies(interval));
4554}
4555
4556static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4557{
4558 struct mlxsw_sp_nexthop_group_info *nhgi;
4559 struct mlxsw_sp_router *router;
4560 bool reschedule = false;
4561
4562 router = container_of(work, struct mlxsw_sp_router,
4563 nh_grp_activity_dw.work);
4564
4565 mutex_lock(&router->lock);
4566
4567 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4568 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4569 reschedule = true;
4570 }
4571
4572 mutex_unlock(&router->lock);
4573
4574 if (!reschedule)
4575 return;
4576 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4577}
4578
4579static int
4580mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4581 const struct nh_notifier_single_info *nh,
4582 struct netlink_ext_ack *extack)
4583{
4584 int err = -EINVAL;
4585
4586 if (nh->is_fdb)
4587 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4588 else if (nh->has_encap)
4589 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4590 else
4591 err = 0;
4592
4593 return err;
4594}
4595
4596static int
4597mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4598 const struct nh_notifier_single_info *nh,
4599 struct netlink_ext_ack *extack)
4600{
4601 int err;
4602
4603 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4604 if (err)
4605 return err;
4606
4607
4608
4609
4610 if (!nh->gw_family && !nh->is_reject &&
4611 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4612 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4613 return -EINVAL;
4614 }
4615
4616 return 0;
4617}
4618
4619static int
4620mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4621 const struct nh_notifier_grp_info *nh_grp,
4622 struct netlink_ext_ack *extack)
4623{
4624 int i;
4625
4626 if (nh_grp->is_fdb) {
4627 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4628 return -EINVAL;
4629 }
4630
4631 for (i = 0; i < nh_grp->num_nh; i++) {
4632 const struct nh_notifier_single_info *nh;
4633 int err;
4634
4635 nh = &nh_grp->nh_entries[i].nh;
4636 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4637 extack);
4638 if (err)
4639 return err;
4640 }
4641
4642 return 0;
4643}
4644
4645static int
4646mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4647 const struct nh_notifier_res_table_info *nh_res_table,
4648 struct netlink_ext_ack *extack)
4649{
4650 unsigned int alloc_size;
4651 bool valid_size = false;
4652 int err, i;
4653
4654 if (nh_res_table->num_nh_buckets < 32) {
4655 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4656 return -EINVAL;
4657 }
4658
4659 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4660 const struct mlxsw_sp_adj_grp_size_range *size_range;
4661
4662 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4663
4664 if (nh_res_table->num_nh_buckets >= size_range->start &&
4665 nh_res_table->num_nh_buckets <= size_range->end) {
4666 valid_size = true;
4667 break;
4668 }
4669 }
4670
4671 if (!valid_size) {
4672 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4673 return -EINVAL;
4674 }
4675
4676 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4677 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4678 nh_res_table->num_nh_buckets,
4679 &alloc_size);
4680 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4681 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4682 return -EINVAL;
4683 }
4684
4685 return 0;
4686}
4687
4688static int
4689mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4690 const struct nh_notifier_res_table_info *nh_res_table,
4691 struct netlink_ext_ack *extack)
4692{
4693 int err;
4694 u16 i;
4695
4696 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4697 nh_res_table,
4698 extack);
4699 if (err)
4700 return err;
4701
4702 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4703 const struct nh_notifier_single_info *nh;
4704 int err;
4705
4706 nh = &nh_res_table->nhs[i];
4707 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4708 extack);
4709 if (err)
4710 return err;
4711 }
4712
4713 return 0;
4714}
4715
4716static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4717 unsigned long event,
4718 struct nh_notifier_info *info)
4719{
4720 struct nh_notifier_single_info *nh;
4721
4722 if (event != NEXTHOP_EVENT_REPLACE &&
4723 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4724 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4725 return 0;
4726
4727 switch (info->type) {
4728 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4729 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4730 info->extack);
4731 case NH_NOTIFIER_INFO_TYPE_GRP:
4732 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4733 info->nh_grp,
4734 info->extack);
4735 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4736 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4737 info->nh_res_table,
4738 info->extack);
4739 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4740 nh = &info->nh_res_bucket->new_nh;
4741 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4742 info->extack);
4743 default:
4744 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4745 return -EOPNOTSUPP;
4746 }
4747}
4748
4749static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4750 const struct nh_notifier_info *info)
4751{
4752 const struct net_device *dev;
4753
4754 switch (info->type) {
4755 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4756 dev = info->nh->dev;
4757 return info->nh->gw_family || info->nh->is_reject ||
4758 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4759 case NH_NOTIFIER_INFO_TYPE_GRP:
4760 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4761
4762 return true;
4763 default:
4764 return false;
4765 }
4766}
4767
4768static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4769 struct mlxsw_sp_nexthop *nh)
4770{
4771 u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4772
4773 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4774 nh->should_offload = 1;
4775
4776
4777
4778
4779 nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4780}
4781
4782static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4783 struct mlxsw_sp_nexthop *nh)
4784{
4785 nh->rif = NULL;
4786 nh->should_offload = 0;
4787}
4788
4789static int
4790mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4791 struct mlxsw_sp_nexthop_group *nh_grp,
4792 struct mlxsw_sp_nexthop *nh,
4793 struct nh_notifier_single_info *nh_obj, int weight)
4794{
4795 struct net_device *dev = nh_obj->dev;
4796 int err;
4797
4798 nh->nhgi = nh_grp->nhgi;
4799 nh->nh_weight = weight;
4800
4801 switch (nh_obj->gw_family) {
4802 case AF_INET:
4803 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4804 nh->neigh_tbl = &arp_tbl;
4805 break;
4806 case AF_INET6:
4807 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4808#if IS_ENABLED(CONFIG_IPV6)
4809 nh->neigh_tbl = &nd_tbl;
4810#endif
4811 break;
4812 }
4813
4814 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4815 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4816 nh->ifindex = dev->ifindex;
4817
4818 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4819 if (err)
4820 goto err_type_init;
4821
4822 if (nh_obj->is_reject)
4823 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4824
4825
4826
4827
4828
4829 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4830 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4831 nh->should_offload = 1;
4832 }
4833
4834 return 0;
4835
4836err_type_init:
4837 list_del(&nh->router_list_node);
4838 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4839 return err;
4840}
4841
4842static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4843 struct mlxsw_sp_nexthop *nh)
4844{
4845 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4846 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4847 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4848 list_del(&nh->router_list_node);
4849 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4850 nh->should_offload = 0;
4851}
4852
4853static int
4854mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4855 struct mlxsw_sp_nexthop_group *nh_grp,
4856 struct nh_notifier_info *info)
4857{
4858 struct mlxsw_sp_nexthop_group_info *nhgi;
4859 struct mlxsw_sp_nexthop *nh;
4860 bool is_resilient = false;
4861 unsigned int nhs;
4862 int err, i;
4863
4864 switch (info->type) {
4865 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4866 nhs = 1;
4867 break;
4868 case NH_NOTIFIER_INFO_TYPE_GRP:
4869 nhs = info->nh_grp->num_nh;
4870 break;
4871 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4872 nhs = info->nh_res_table->num_nh_buckets;
4873 is_resilient = true;
4874 break;
4875 default:
4876 return -EINVAL;
4877 }
4878
4879 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4880 if (!nhgi)
4881 return -ENOMEM;
4882 nh_grp->nhgi = nhgi;
4883 nhgi->nh_grp = nh_grp;
4884 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4885 nhgi->is_resilient = is_resilient;
4886 nhgi->count = nhs;
4887 for (i = 0; i < nhgi->count; i++) {
4888 struct nh_notifier_single_info *nh_obj;
4889 int weight;
4890
4891 nh = &nhgi->nexthops[i];
4892 switch (info->type) {
4893 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4894 nh_obj = info->nh;
4895 weight = 1;
4896 break;
4897 case NH_NOTIFIER_INFO_TYPE_GRP:
4898 nh_obj = &info->nh_grp->nh_entries[i].nh;
4899 weight = info->nh_grp->nh_entries[i].weight;
4900 break;
4901 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4902 nh_obj = &info->nh_res_table->nhs[i];
4903 weight = 1;
4904 break;
4905 default:
4906 err = -EINVAL;
4907 goto err_nexthop_obj_init;
4908 }
4909 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4910 weight);
4911 if (err)
4912 goto err_nexthop_obj_init;
4913 }
4914 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4915 if (err)
4916 goto err_group_inc;
4917 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4918 if (err) {
4919 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4920 goto err_group_refresh;
4921 }
4922
4923
4924
4925
4926 if (nhgi->is_resilient) {
4927 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4928 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4929 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4930 }
4931
4932 return 0;
4933
4934err_group_refresh:
4935 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4936err_group_inc:
4937 i = nhgi->count;
4938err_nexthop_obj_init:
4939 for (i--; i >= 0; i--) {
4940 nh = &nhgi->nexthops[i];
4941 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4942 }
4943 kfree(nhgi);
4944 return err;
4945}
4946
4947static void
4948mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4949 struct mlxsw_sp_nexthop_group *nh_grp)
4950{
4951 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4952 struct mlxsw_sp_router *router = mlxsw_sp->router;
4953 int i;
4954
4955 if (nhgi->is_resilient) {
4956 list_del(&nhgi->list);
4957 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4958 cancel_delayed_work(&router->nh_grp_activity_dw);
4959 }
4960
4961 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4962 for (i = nhgi->count - 1; i >= 0; i--) {
4963 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4964
4965 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4966 }
4967 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4968 WARN_ON_ONCE(nhgi->adj_index_valid);
4969 kfree(nhgi);
4970}
4971
4972static struct mlxsw_sp_nexthop_group *
4973mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4974 struct nh_notifier_info *info)
4975{
4976 struct mlxsw_sp_nexthop_group *nh_grp;
4977 int err;
4978
4979 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4980 if (!nh_grp)
4981 return ERR_PTR(-ENOMEM);
4982 INIT_LIST_HEAD(&nh_grp->vr_list);
4983 err = rhashtable_init(&nh_grp->vr_ht,
4984 &mlxsw_sp_nexthop_group_vr_ht_params);
4985 if (err)
4986 goto err_nexthop_group_vr_ht_init;
4987 INIT_LIST_HEAD(&nh_grp->fib_list);
4988 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4989 nh_grp->obj.id = info->id;
4990
4991 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4992 if (err)
4993 goto err_nexthop_group_info_init;
4994
4995 nh_grp->can_destroy = false;
4996
4997 return nh_grp;
4998
4999err_nexthop_group_info_init:
5000 rhashtable_destroy(&nh_grp->vr_ht);
5001err_nexthop_group_vr_ht_init:
5002 kfree(nh_grp);
5003 return ERR_PTR(err);
5004}
5005
5006static void
5007mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
5008 struct mlxsw_sp_nexthop_group *nh_grp)
5009{
5010 if (!nh_grp->can_destroy)
5011 return;
5012 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
5013 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
5014 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5015 rhashtable_destroy(&nh_grp->vr_ht);
5016 kfree(nh_grp);
5017}
5018
5019static struct mlxsw_sp_nexthop_group *
5020mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
5021{
5022 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
5023
5024 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5025 cmp_arg.id = id;
5026 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5027 &cmp_arg,
5028 mlxsw_sp_nexthop_group_ht_params);
5029}
5030
5031static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5032 struct mlxsw_sp_nexthop_group *nh_grp)
5033{
5034 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5035}
5036
5037static int
5038mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5039 struct mlxsw_sp_nexthop_group *nh_grp,
5040 struct mlxsw_sp_nexthop_group *old_nh_grp,
5041 struct netlink_ext_ack *extack)
5042{
5043 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5044 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5045 int err;
5046
5047 old_nh_grp->nhgi = new_nhgi;
5048 new_nhgi->nh_grp = old_nh_grp;
5049 nh_grp->nhgi = old_nhgi;
5050 old_nhgi->nh_grp = nh_grp;
5051
5052 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5053
5054
5055
5056
5057 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5058 old_nhgi->adj_index,
5059 old_nhgi->ecmp_size);
5060 if (err) {
5061 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5062 goto err_out;
5063 }
5064 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5065
5066
5067
5068
5069 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5070 if (err) {
5071 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5072 goto err_out;
5073 }
5074 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5075
5076
5077
5078
5079 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5080 if (err) {
5081 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5082 goto err_out;
5083 }
5084 }
5085
5086
5087
5088
5089 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5090
5091
5092
5093
5094
5095 nh_grp->can_destroy = true;
5096 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5097
5098 return 0;
5099
5100err_out:
5101 old_nhgi->nh_grp = old_nh_grp;
5102 nh_grp->nhgi = new_nhgi;
5103 new_nhgi->nh_grp = nh_grp;
5104 old_nh_grp->nhgi = old_nhgi;
5105 return err;
5106}
5107
5108static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5109 struct nh_notifier_info *info)
5110{
5111 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5112 struct netlink_ext_ack *extack = info->extack;
5113 int err;
5114
5115 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5116 if (IS_ERR(nh_grp))
5117 return PTR_ERR(nh_grp);
5118
5119 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5120 if (!old_nh_grp)
5121 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5122 else
5123 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5124 old_nh_grp, extack);
5125
5126 if (err) {
5127 nh_grp->can_destroy = true;
5128 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5129 }
5130
5131 return err;
5132}
5133
5134static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5135 struct nh_notifier_info *info)
5136{
5137 struct mlxsw_sp_nexthop_group *nh_grp;
5138
5139 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5140 if (!nh_grp)
5141 return;
5142
5143 nh_grp->can_destroy = true;
5144 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5145
5146
5147
5148
5149 if (!list_empty(&nh_grp->fib_list))
5150 return;
5151 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5152}
5153
5154static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5155 u32 adj_index, char *ratr_pl)
5156{
5157 MLXSW_REG_ZERO(ratr, ratr_pl);
5158 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5159 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5160 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5161
5162 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5163}
5164
5165static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5166{
5167
5168
5169
5170 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5171 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5172 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5173 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5174
5175
5176
5177
5178 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5179 return 0;
5180
5181 return -EINVAL;
5182}
5183
5184static int
5185mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5186 struct mlxsw_sp_nexthop *nh,
5187 struct nh_notifier_info *info)
5188{
5189 u16 bucket_index = info->nh_res_bucket->bucket_index;
5190 struct netlink_ext_ack *extack = info->extack;
5191 bool force = info->nh_res_bucket->force;
5192 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5193 char ratr_pl[MLXSW_REG_RATR_LEN];
5194 u32 adj_index;
5195 int err;
5196
5197
5198
5199
5200 if (!force && info->nh_res_bucket->idle_timer_ms <
5201 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5202 force = true;
5203
5204 adj_index = nh->nhgi->adj_index + bucket_index;
5205 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5206 if (err) {
5207 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5208 return err;
5209 }
5210
5211 if (!force) {
5212 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5213 ratr_pl_new);
5214 if (err) {
5215 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5216 return err;
5217 }
5218
5219 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5220 if (err) {
5221 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5222 return err;
5223 }
5224 }
5225
5226 nh->update = 0;
5227 nh->offloaded = 1;
5228 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5229
5230 return 0;
5231}
5232
5233static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5234 struct nh_notifier_info *info)
5235{
5236 u16 bucket_index = info->nh_res_bucket->bucket_index;
5237 struct netlink_ext_ack *extack = info->extack;
5238 struct mlxsw_sp_nexthop_group_info *nhgi;
5239 struct nh_notifier_single_info *nh_obj;
5240 struct mlxsw_sp_nexthop_group *nh_grp;
5241 struct mlxsw_sp_nexthop *nh;
5242 int err;
5243
5244 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5245 if (!nh_grp) {
5246 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5247 return -EINVAL;
5248 }
5249
5250 nhgi = nh_grp->nhgi;
5251
5252 if (bucket_index >= nhgi->count) {
5253 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5254 return -EINVAL;
5255 }
5256
5257 nh = &nhgi->nexthops[bucket_index];
5258 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5259
5260 nh_obj = &info->nh_res_bucket->new_nh;
5261 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5262 if (err) {
5263 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5264 goto err_nexthop_obj_init;
5265 }
5266
5267 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5268 if (err)
5269 goto err_nexthop_obj_bucket_adj_update;
5270
5271 return 0;
5272
5273err_nexthop_obj_bucket_adj_update:
5274 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5275err_nexthop_obj_init:
5276 nh_obj = &info->nh_res_bucket->old_nh;
5277 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5278
5279 nh->update = 0;
5280 nh->offloaded = 1;
5281 return err;
5282}
5283
5284static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5285 unsigned long event, void *ptr)
5286{
5287 struct nh_notifier_info *info = ptr;
5288 struct mlxsw_sp_router *router;
5289 int err = 0;
5290
5291 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5292 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5293 if (err)
5294 goto out;
5295
5296 mutex_lock(&router->lock);
5297
5298 switch (event) {
5299 case NEXTHOP_EVENT_REPLACE:
5300 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5301 break;
5302 case NEXTHOP_EVENT_DEL:
5303 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5304 break;
5305 case NEXTHOP_EVENT_BUCKET_REPLACE:
5306 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5307 info);
5308 break;
5309 default:
5310 break;
5311 }
5312
5313 mutex_unlock(&router->lock);
5314
5315out:
5316 return notifier_from_errno(err);
5317}
5318
5319static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5320 struct fib_info *fi)
5321{
5322 const struct fib_nh *nh = fib_info_nh(fi, 0);
5323
5324 return nh->fib_nh_scope == RT_SCOPE_LINK ||
5325 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5326}
5327
5328static int
5329mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5330 struct mlxsw_sp_nexthop_group *nh_grp)
5331{
5332 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5333 struct mlxsw_sp_nexthop_group_info *nhgi;
5334 struct mlxsw_sp_nexthop *nh;
5335 int err, i;
5336
5337 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5338 if (!nhgi)
5339 return -ENOMEM;
5340 nh_grp->nhgi = nhgi;
5341 nhgi->nh_grp = nh_grp;
5342 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5343 nhgi->count = nhs;
5344 for (i = 0; i < nhgi->count; i++) {
5345 struct fib_nh *fib_nh;
5346
5347 nh = &nhgi->nexthops[i];
5348 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5349 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5350 if (err)
5351 goto err_nexthop4_init;
5352 }
5353 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5354 if (err)
5355 goto err_group_inc;
5356 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5357 if (err)
5358 goto err_group_refresh;
5359
5360 return 0;
5361
5362err_group_refresh:
5363 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5364err_group_inc:
5365 i = nhgi->count;
5366err_nexthop4_init:
5367 for (i--; i >= 0; i--) {
5368 nh = &nhgi->nexthops[i];
5369 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5370 }
5371 kfree(nhgi);
5372 return err;
5373}
5374
5375static void
5376mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5377 struct mlxsw_sp_nexthop_group *nh_grp)
5378{
5379 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5380 int i;
5381
5382 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5383 for (i = nhgi->count - 1; i >= 0; i--) {
5384 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5385
5386 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5387 }
5388 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5389 WARN_ON_ONCE(nhgi->adj_index_valid);
5390 kfree(nhgi);
5391}
5392
5393static struct mlxsw_sp_nexthop_group *
5394mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5395{
5396 struct mlxsw_sp_nexthop_group *nh_grp;
5397 int err;
5398
5399 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5400 if (!nh_grp)
5401 return ERR_PTR(-ENOMEM);
5402 INIT_LIST_HEAD(&nh_grp->vr_list);
5403 err = rhashtable_init(&nh_grp->vr_ht,
5404 &mlxsw_sp_nexthop_group_vr_ht_params);
5405 if (err)
5406 goto err_nexthop_group_vr_ht_init;
5407 INIT_LIST_HEAD(&nh_grp->fib_list);
5408 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5409 nh_grp->ipv4.fi = fi;
5410 fib_info_hold(fi);
5411
5412 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5413 if (err)
5414 goto err_nexthop_group_info_init;
5415
5416 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5417 if (err)
5418 goto err_nexthop_group_insert;
5419
5420 nh_grp->can_destroy = true;
5421
5422 return nh_grp;
5423
5424err_nexthop_group_insert:
5425 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5426err_nexthop_group_info_init:
5427 fib_info_put(fi);
5428 rhashtable_destroy(&nh_grp->vr_ht);
5429err_nexthop_group_vr_ht_init:
5430 kfree(nh_grp);
5431 return ERR_PTR(err);
5432}
5433
5434static void
5435mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5436 struct mlxsw_sp_nexthop_group *nh_grp)
5437{
5438 if (!nh_grp->can_destroy)
5439 return;
5440 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5441 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5442 fib_info_put(nh_grp->ipv4.fi);
5443 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5444 rhashtable_destroy(&nh_grp->vr_ht);
5445 kfree(nh_grp);
5446}
5447
5448static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5449 struct mlxsw_sp_fib_entry *fib_entry,
5450 struct fib_info *fi)
5451{
5452 struct mlxsw_sp_nexthop_group *nh_grp;
5453
5454 if (fi->nh) {
5455 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5456 fi->nh->id);
5457 if (WARN_ON_ONCE(!nh_grp))
5458 return -EINVAL;
5459 goto out;
5460 }
5461
5462 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5463 if (!nh_grp) {
5464 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5465 if (IS_ERR(nh_grp))
5466 return PTR_ERR(nh_grp);
5467 }
5468out:
5469 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5470 fib_entry->nh_group = nh_grp;
5471 return 0;
5472}
5473
5474static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5475 struct mlxsw_sp_fib_entry *fib_entry)
5476{
5477 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5478
5479 list_del(&fib_entry->nexthop_group_node);
5480 if (!list_empty(&nh_grp->fib_list))
5481 return;
5482
5483 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5484 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5485 return;
5486 }
5487
5488 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5489}
5490
5491static bool
5492mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5493{
5494 struct mlxsw_sp_fib4_entry *fib4_entry;
5495
5496 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5497 common);
5498 return !fib4_entry->tos;
5499}
5500
5501static bool
5502mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5503{
5504 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5505
5506 switch (fib_entry->fib_node->fib->proto) {
5507 case MLXSW_SP_L3_PROTO_IPV4:
5508 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5509 return false;
5510 break;
5511 case MLXSW_SP_L3_PROTO_IPV6:
5512 break;
5513 }
5514
5515 switch (fib_entry->type) {
5516 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5517 return !!nh_group->nhgi->adj_index_valid;
5518 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5519 return !!nh_group->nhgi->nh_rif;
5520 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5521 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5522 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5523 return true;
5524 default:
5525 return false;
5526 }
5527}
5528
5529static struct mlxsw_sp_nexthop *
5530mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5531 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5532{
5533 int i;
5534
5535 for (i = 0; i < nh_grp->nhgi->count; i++) {
5536 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5537 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5538
5539 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5540 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5541 &rt->fib6_nh->fib_nh_gw6))
5542 return nh;
5543 }
5544
5545 return NULL;
5546}
5547
5548static void
5549mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5550 struct fib_entry_notifier_info *fen_info)
5551{
5552 u32 *p_dst = (u32 *) &fen_info->dst;
5553 struct fib_rt_info fri;
5554
5555 fri.fi = fen_info->fi;
5556 fri.tb_id = fen_info->tb_id;
5557 fri.dst = cpu_to_be32(*p_dst);
5558 fri.dst_len = fen_info->dst_len;
5559 fri.tos = fen_info->tos;
5560 fri.type = fen_info->type;
5561 fri.offload = false;
5562 fri.trap = false;
5563 fri.offload_failed = true;
5564 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5565}
5566
5567static void
5568mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5569 struct mlxsw_sp_fib_entry *fib_entry)
5570{
5571 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5572 int dst_len = fib_entry->fib_node->key.prefix_len;
5573 struct mlxsw_sp_fib4_entry *fib4_entry;
5574 struct fib_rt_info fri;
5575 bool should_offload;
5576
5577 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5578 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5579 common);
5580 fri.fi = fib4_entry->fi;
5581 fri.tb_id = fib4_entry->tb_id;
5582 fri.dst = cpu_to_be32(*p_dst);
5583 fri.dst_len = dst_len;
5584 fri.tos = fib4_entry->tos;
5585 fri.type = fib4_entry->type;
5586 fri.offload = should_offload;
5587 fri.trap = !should_offload;
5588 fri.offload_failed = false;
5589 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5590}
5591
5592static void
5593mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5594 struct mlxsw_sp_fib_entry *fib_entry)
5595{
5596 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5597 int dst_len = fib_entry->fib_node->key.prefix_len;
5598 struct mlxsw_sp_fib4_entry *fib4_entry;
5599 struct fib_rt_info fri;
5600
5601 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5602 common);
5603 fri.fi = fib4_entry->fi;
5604 fri.tb_id = fib4_entry->tb_id;
5605 fri.dst = cpu_to_be32(*p_dst);
5606 fri.dst_len = dst_len;
5607 fri.tos = fib4_entry->tos;
5608 fri.type = fib4_entry->type;
5609 fri.offload = false;
5610 fri.trap = false;
5611 fri.offload_failed = false;
5612 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5613}
5614
5615#if IS_ENABLED(CONFIG_IPV6)
5616static void
5617mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5618 struct fib6_info **rt_arr,
5619 unsigned int nrt6)
5620{
5621 int i;
5622
5623
5624
5625
5626 for (i = 0; i < nrt6; i++)
5627 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5628 false, false, true);
5629}
5630#else
5631static void
5632mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5633 struct fib6_info **rt_arr,
5634 unsigned int nrt6)
5635{
5636}
5637#endif
5638
5639#if IS_ENABLED(CONFIG_IPV6)
5640static void
5641mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5642 struct mlxsw_sp_fib_entry *fib_entry)
5643{
5644 struct mlxsw_sp_fib6_entry *fib6_entry;
5645 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5646 bool should_offload;
5647
5648 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5649
5650
5651
5652
5653 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5654 common);
5655 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5656 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5657 should_offload, !should_offload, false);
5658}
5659#else
5660static void
5661mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5662 struct mlxsw_sp_fib_entry *fib_entry)
5663{
5664}
5665#endif
5666
5667#if IS_ENABLED(CONFIG_IPV6)
5668static void
5669mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5670 struct mlxsw_sp_fib_entry *fib_entry)
5671{
5672 struct mlxsw_sp_fib6_entry *fib6_entry;
5673 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5674
5675 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5676 common);
5677 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5678 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5679 false, false, false);
5680}
5681#else
5682static void
5683mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5684 struct mlxsw_sp_fib_entry *fib_entry)
5685{
5686}
5687#endif
5688
5689static void
5690mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5691 struct mlxsw_sp_fib_entry *fib_entry)
5692{
5693 switch (fib_entry->fib_node->fib->proto) {
5694 case MLXSW_SP_L3_PROTO_IPV4:
5695 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5696 break;
5697 case MLXSW_SP_L3_PROTO_IPV6:
5698 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5699 break;
5700 }
5701}
5702
5703static void
5704mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5705 struct mlxsw_sp_fib_entry *fib_entry)
5706{
5707 switch (fib_entry->fib_node->fib->proto) {
5708 case MLXSW_SP_L3_PROTO_IPV4:
5709 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5710 break;
5711 case MLXSW_SP_L3_PROTO_IPV6:
5712 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5713 break;
5714 }
5715}
5716
5717static void
5718mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5719 struct mlxsw_sp_fib_entry *fib_entry,
5720 enum mlxsw_sp_fib_entry_op op)
5721{
5722 switch (op) {
5723 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5724 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5725 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5726 break;
5727 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5728 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5729 break;
5730 default:
5731 break;
5732 }
5733}
5734
5735struct mlxsw_sp_fib_entry_op_ctx_basic {
5736 char ralue_pl[MLXSW_REG_RALUE_LEN];
5737};
5738
5739static void
5740mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5741 enum mlxsw_sp_l3proto proto,
5742 enum mlxsw_sp_fib_entry_op op,
5743 u16 virtual_router, u8 prefix_len,
5744 unsigned char *addr,
5745 struct mlxsw_sp_fib_entry_priv *priv)
5746{
5747 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5748 enum mlxsw_reg_ralxx_protocol ralxx_proto;
5749 char *ralue_pl = op_ctx_basic->ralue_pl;
5750 enum mlxsw_reg_ralue_op ralue_op;
5751
5752 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
5753
5754 switch (op) {
5755 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
5756 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
5757 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
5758 break;
5759 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
5760 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
5761 break;
5762 default:
5763 WARN_ON_ONCE(1);
5764 return;
5765 }
5766
5767 switch (proto) {
5768 case MLXSW_SP_L3_PROTO_IPV4:
5769 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
5770 virtual_router, prefix_len, (u32 *) addr);
5771 break;
5772 case MLXSW_SP_L3_PROTO_IPV6:
5773 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
5774 virtual_router, prefix_len, addr);
5775 break;
5776 }
5777}
5778
5779static void
5780mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5781 enum mlxsw_reg_ralue_trap_action trap_action,
5782 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
5783{
5784 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5785
5786 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
5787 trap_id, adjacency_index, ecmp_size);
5788}
5789
5790static void
5791mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5792 enum mlxsw_reg_ralue_trap_action trap_action,
5793 u16 trap_id, u16 local_erif)
5794{
5795 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5796
5797 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
5798 trap_id, local_erif);
5799}
5800
5801static void
5802mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
5803{
5804 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5805
5806 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
5807}
5808
5809static void
5810mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5811 u32 tunnel_ptr)
5812{
5813 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5814
5815 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
5816}
5817
5818static int
5819mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5820 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5821 bool *postponed_for_bulk)
5822{
5823 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
5824
5825 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
5826 op_ctx_basic->ralue_pl);
5827}
5828
5829static bool
5830mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
5831{
5832 return true;
5833}
5834
5835static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5836 struct mlxsw_sp_fib_entry *fib_entry,
5837 enum mlxsw_sp_fib_entry_op op)
5838{
5839 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5840
5841 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
5842 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
5843 fib_entry->fib_node->key.prefix_len,
5844 fib_entry->fib_node->key.addr,
5845 fib_entry->priv);
5846}
5847
5848static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
5849 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5850 const struct mlxsw_sp_router_ll_ops *ll_ops)
5851{
5852 bool postponed_for_bulk = false;
5853 int err;
5854
5855 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
5856 if (!postponed_for_bulk)
5857 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
5858 return err;
5859}
5860
5861static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5862 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5863 struct mlxsw_sp_fib_entry *fib_entry,
5864 enum mlxsw_sp_fib_entry_op op)
5865{
5866 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5867 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5868 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5869 enum mlxsw_reg_ralue_trap_action trap_action;
5870 u16 trap_id = 0;
5871 u32 adjacency_index = 0;
5872 u16 ecmp_size = 0;
5873
5874
5875
5876
5877
5878 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5879 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5880 adjacency_index = nhgi->adj_index;
5881 ecmp_size = nhgi->ecmp_size;
5882 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5883 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5884 adjacency_index = mlxsw_sp->router->adj_trap_index;
5885 ecmp_size = 1;
5886 } else {
5887 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5888 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5889 }
5890
5891 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5892 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
5893 adjacency_index, ecmp_size);
5894 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5895}
5896
5897static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5898 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5899 struct mlxsw_sp_fib_entry *fib_entry,
5900 enum mlxsw_sp_fib_entry_op op)
5901{
5902 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5903 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5904 enum mlxsw_reg_ralue_trap_action trap_action;
5905 u16 trap_id = 0;
5906 u16 rif_index = 0;
5907
5908 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5909 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5910 rif_index = rif->rif_index;
5911 } else {
5912 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5913 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5914 }
5915
5916 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5917 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
5918 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5919}
5920
5921static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5922 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5923 struct mlxsw_sp_fib_entry *fib_entry,
5924 enum mlxsw_sp_fib_entry_op op)
5925{
5926 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5927
5928 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5929 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5930 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5931}
5932
5933static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5934 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5935 struct mlxsw_sp_fib_entry *fib_entry,
5936 enum mlxsw_sp_fib_entry_op op)
5937{
5938 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5939 enum mlxsw_reg_ralue_trap_action trap_action;
5940
5941 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5942 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5943 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
5944 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5945}
5946
5947static int
5948mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5949 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5950 struct mlxsw_sp_fib_entry *fib_entry,
5951 enum mlxsw_sp_fib_entry_op op)
5952{
5953 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5954 enum mlxsw_reg_ralue_trap_action trap_action;
5955 u16 trap_id;
5956
5957 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5958 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5959
5960 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5961 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
5962 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5963}
5964
5965static int
5966mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5967 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5968 struct mlxsw_sp_fib_entry *fib_entry,
5969 enum mlxsw_sp_fib_entry_op op)
5970{
5971 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5972 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5973 const struct mlxsw_sp_ipip_ops *ipip_ops;
5974 int err;
5975
5976 if (WARN_ON(!ipip_entry))
5977 return -EINVAL;
5978
5979 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5980 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5981 fib_entry->decap.tunnel_index);
5982 if (err)
5983 return err;
5984
5985 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5986 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
5987 fib_entry->decap.tunnel_index);
5988 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
5989}
5990
5991static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5992 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5993 struct mlxsw_sp_fib_entry *fib_entry,
5994 enum mlxsw_sp_fib_entry_op op)
5995{
5996 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
5997
5998 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
5999 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
6000 fib_entry->decap.tunnel_index);
6001 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
6002}
6003
6004static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6005 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6006 struct mlxsw_sp_fib_entry *fib_entry,
6007 enum mlxsw_sp_fib_entry_op op)
6008{
6009 switch (fib_entry->type) {
6010 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
6011 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
6012 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
6013 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
6014 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
6015 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
6016 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
6017 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
6018 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
6019 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
6020 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6021 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
6022 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
6023 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
6024 }
6025 return -EINVAL;
6026}
6027
6028static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
6029 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6030 struct mlxsw_sp_fib_entry *fib_entry,
6031 enum mlxsw_sp_fib_entry_op op)
6032{
6033 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
6034
6035 if (err)
6036 return err;
6037
6038 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
6039
6040 return err;
6041}
6042
6043static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6044 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6045 struct mlxsw_sp_fib_entry *fib_entry,
6046 bool is_new)
6047{
6048 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6049 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
6050 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
6051}
6052
6053static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
6054 struct mlxsw_sp_fib_entry *fib_entry)
6055{
6056 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6057
6058 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6059 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
6060}
6061
6062static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
6063 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6064 struct mlxsw_sp_fib_entry *fib_entry)
6065{
6066 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
6067
6068 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
6069 return 0;
6070 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
6071 MLXSW_SP_FIB_ENTRY_OP_DELETE);
6072}
6073
6074static int
6075mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6076 const struct fib_entry_notifier_info *fen_info,
6077 struct mlxsw_sp_fib_entry *fib_entry)
6078{
6079 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6080 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
6081 struct mlxsw_sp_router *router = mlxsw_sp->router;
6082 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
6083 int ifindex = nhgi->nexthops[0].ifindex;
6084 struct mlxsw_sp_ipip_entry *ipip_entry;
6085
6086 switch (fen_info->type) {
6087 case RTN_LOCAL:
6088 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6089 MLXSW_SP_L3_PROTO_IPV4, dip);
6090 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6091 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6092 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
6093 fib_entry,
6094 ipip_entry);
6095 }
6096 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6097 MLXSW_SP_L3_PROTO_IPV4,
6098 &dip)) {
6099 u32 tunnel_index;
6100
6101 tunnel_index = router->nve_decap_config.tunnel_index;
6102 fib_entry->decap.tunnel_index = tunnel_index;
6103 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6104 return 0;
6105 }
6106 fallthrough;
6107 case RTN_BROADCAST:
6108 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6109 return 0;
6110 case RTN_BLACKHOLE:
6111 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6112 return 0;
6113 case RTN_UNREACHABLE:
6114 case RTN_PROHIBIT:
6115
6116
6117
6118
6119 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6120 return 0;
6121 case RTN_UNICAST:
6122 if (nhgi->gateway)
6123 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6124 else
6125 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6126 return 0;
6127 default:
6128 return -EINVAL;
6129 }
6130}
6131
6132static void
6133mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6134 struct mlxsw_sp_fib_entry *fib_entry)
6135{
6136 switch (fib_entry->type) {
6137 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
6138 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
6139 break;
6140 default:
6141 break;
6142 }
6143}
6144
6145static void
6146mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6147 struct mlxsw_sp_fib4_entry *fib4_entry)
6148{
6149 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6150}
6151
6152static struct mlxsw_sp_fib4_entry *
6153mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6154 struct mlxsw_sp_fib_node *fib_node,
6155 const struct fib_entry_notifier_info *fen_info)
6156{
6157 struct mlxsw_sp_fib4_entry *fib4_entry;
6158 struct mlxsw_sp_fib_entry *fib_entry;
6159 int err;
6160
6161 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6162 if (!fib4_entry)
6163 return ERR_PTR(-ENOMEM);
6164 fib_entry = &fib4_entry->common;
6165
6166 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
6167 if (IS_ERR(fib_entry->priv)) {
6168 err = PTR_ERR(fib_entry->priv);
6169 goto err_fib_entry_priv_create;
6170 }
6171
6172 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6173 if (err)
6174 goto err_nexthop4_group_get;
6175
6176 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6177 fib_node->fib);
6178 if (err)
6179 goto err_nexthop_group_vr_link;
6180
6181 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6182 if (err)
6183 goto err_fib4_entry_type_set;
6184
6185 fib4_entry->fi = fen_info->fi;
6186 fib_info_hold(fib4_entry->fi);
6187 fib4_entry->tb_id = fen_info->tb_id;
6188 fib4_entry->type = fen_info->type;
6189 fib4_entry->tos = fen_info->tos;
6190
6191 fib_entry->fib_node = fib_node;
6192
6193 return fib4_entry;
6194
6195err_fib4_entry_type_set:
6196 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6197err_nexthop_group_vr_link:
6198 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6199err_nexthop4_group_get:
6200 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
6201err_fib_entry_priv_create:
6202 kfree(fib4_entry);
6203 return ERR_PTR(err);
6204}
6205
6206static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6207 struct mlxsw_sp_fib4_entry *fib4_entry)
6208{
6209 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6210
6211 fib_info_put(fib4_entry->fi);
6212 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6213 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6214 fib_node->fib);
6215 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6216 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
6217 kfree(fib4_entry);
6218}
6219
6220static struct mlxsw_sp_fib4_entry *
6221mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6222 const struct fib_entry_notifier_info *fen_info)
6223{
6224 struct mlxsw_sp_fib4_entry *fib4_entry;
6225 struct mlxsw_sp_fib_node *fib_node;
6226 struct mlxsw_sp_fib *fib;
6227 struct mlxsw_sp_vr *vr;
6228
6229 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6230 if (!vr)
6231 return NULL;
6232 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6233
6234 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6235 sizeof(fen_info->dst),
6236 fen_info->dst_len);
6237 if (!fib_node)
6238 return NULL;
6239
6240 fib4_entry = container_of(fib_node->fib_entry,
6241 struct mlxsw_sp_fib4_entry, common);
6242 if (fib4_entry->tb_id == fen_info->tb_id &&
6243 fib4_entry->tos == fen_info->tos &&
6244 fib4_entry->type == fen_info->type &&
6245 fib4_entry->fi == fen_info->fi)
6246 return fib4_entry;
6247
6248 return NULL;
6249}
6250
6251static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6252 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6253 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6254 .key_len = sizeof(struct mlxsw_sp_fib_key),
6255 .automatic_shrinking = true,
6256};
6257
6258static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6259 struct mlxsw_sp_fib_node *fib_node)
6260{
6261 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6262 mlxsw_sp_fib_ht_params);
6263}
6264
6265static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6266 struct mlxsw_sp_fib_node *fib_node)
6267{
6268 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6269 mlxsw_sp_fib_ht_params);
6270}
6271
6272static struct mlxsw_sp_fib_node *
6273mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6274 size_t addr_len, unsigned char prefix_len)
6275{
6276 struct mlxsw_sp_fib_key key;
6277
6278 memset(&key, 0, sizeof(key));
6279 memcpy(key.addr, addr, addr_len);
6280 key.prefix_len = prefix_len;
6281 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6282}
6283
6284static struct mlxsw_sp_fib_node *
6285mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6286 size_t addr_len, unsigned char prefix_len)
6287{
6288 struct mlxsw_sp_fib_node *fib_node;
6289
6290 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6291 if (!fib_node)
6292 return NULL;
6293
6294 list_add(&fib_node->list, &fib->node_list);
6295 memcpy(fib_node->key.addr, addr, addr_len);
6296 fib_node->key.prefix_len = prefix_len;
6297
6298 return fib_node;
6299}
6300
6301static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6302{
6303 list_del(&fib_node->list);
6304 kfree(fib_node);
6305}
6306
6307static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6308 struct mlxsw_sp_fib_node *fib_node)
6309{
6310 struct mlxsw_sp_prefix_usage req_prefix_usage;
6311 struct mlxsw_sp_fib *fib = fib_node->fib;
6312 struct mlxsw_sp_lpm_tree *lpm_tree;
6313 int err;
6314
6315 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6316 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6317 goto out;
6318
6319 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6320 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6321 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6322 fib->proto);
6323 if (IS_ERR(lpm_tree))
6324 return PTR_ERR(lpm_tree);
6325
6326 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6327 if (err)
6328 goto err_lpm_tree_replace;
6329
6330out:
6331 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6332 return 0;
6333
6334err_lpm_tree_replace:
6335 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6336 return err;
6337}
6338
6339static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6340 struct mlxsw_sp_fib_node *fib_node)
6341{
6342 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6343 struct mlxsw_sp_prefix_usage req_prefix_usage;
6344 struct mlxsw_sp_fib *fib = fib_node->fib;
6345 int err;
6346
6347 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6348 return;
6349
6350
6351
6352 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6353 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6354 fib_node->key.prefix_len);
6355 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6356 fib->proto);
6357 if (IS_ERR(lpm_tree))
6358 return;
6359
6360 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6361 if (err)
6362 goto err_lpm_tree_replace;
6363
6364 return;
6365
6366err_lpm_tree_replace:
6367 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6368}
6369
6370static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6371 struct mlxsw_sp_fib_node *fib_node,
6372 struct mlxsw_sp_fib *fib)
6373{
6374 int err;
6375
6376 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6377 if (err)
6378 return err;
6379 fib_node->fib = fib;
6380
6381 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6382 if (err)
6383 goto err_fib_lpm_tree_link;
6384
6385 return 0;
6386
6387err_fib_lpm_tree_link:
6388 fib_node->fib = NULL;
6389 mlxsw_sp_fib_node_remove(fib, fib_node);
6390 return err;
6391}
6392
6393static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6394 struct mlxsw_sp_fib_node *fib_node)
6395{
6396 struct mlxsw_sp_fib *fib = fib_node->fib;
6397
6398 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6399 fib_node->fib = NULL;
6400 mlxsw_sp_fib_node_remove(fib, fib_node);
6401}
6402
6403static struct mlxsw_sp_fib_node *
6404mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6405 size_t addr_len, unsigned char prefix_len,
6406 enum mlxsw_sp_l3proto proto)
6407{
6408 struct mlxsw_sp_fib_node *fib_node;
6409 struct mlxsw_sp_fib *fib;
6410 struct mlxsw_sp_vr *vr;
6411 int err;
6412
6413 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6414 if (IS_ERR(vr))
6415 return ERR_CAST(vr);
6416 fib = mlxsw_sp_vr_fib(vr, proto);
6417
6418 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6419 if (fib_node)
6420 return fib_node;
6421
6422 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6423 if (!fib_node) {
6424 err = -ENOMEM;
6425 goto err_fib_node_create;
6426 }
6427
6428 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6429 if (err)
6430 goto err_fib_node_init;
6431
6432 return fib_node;
6433
6434err_fib_node_init:
6435 mlxsw_sp_fib_node_destroy(fib_node);
6436err_fib_node_create:
6437 mlxsw_sp_vr_put(mlxsw_sp, vr);
6438 return ERR_PTR(err);
6439}
6440
6441static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6442 struct mlxsw_sp_fib_node *fib_node)
6443{
6444 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6445
6446 if (fib_node->fib_entry)
6447 return;
6448 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6449 mlxsw_sp_fib_node_destroy(fib_node);
6450 mlxsw_sp_vr_put(mlxsw_sp, vr);
6451}
6452
6453static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6454 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6455 struct mlxsw_sp_fib_entry *fib_entry)
6456{
6457 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6458 bool is_new = !fib_node->fib_entry;
6459 int err;
6460
6461 fib_node->fib_entry = fib_entry;
6462
6463 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
6464 if (err)
6465 goto err_fib_entry_update;
6466
6467 return 0;
6468
6469err_fib_entry_update:
6470 fib_node->fib_entry = NULL;
6471 return err;
6472}
6473
6474static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6475 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6476 struct mlxsw_sp_fib_entry *fib_entry)
6477{
6478 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6479 int err;
6480
6481 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
6482 fib_node->fib_entry = NULL;
6483 return err;
6484}
6485
6486static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6487 struct mlxsw_sp_fib_entry *fib_entry)
6488{
6489 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
6490
6491 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6492 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
6493}
6494
6495static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6496{
6497 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6498 struct mlxsw_sp_fib4_entry *fib4_replaced;
6499
6500 if (!fib_node->fib_entry)
6501 return true;
6502
6503 fib4_replaced = container_of(fib_node->fib_entry,
6504 struct mlxsw_sp_fib4_entry, common);
6505 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6506 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6507 return false;
6508
6509 return true;
6510}
6511
6512static int
6513mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6514 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6515 const struct fib_entry_notifier_info *fen_info)
6516{
6517 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6518 struct mlxsw_sp_fib_entry *replaced;
6519 struct mlxsw_sp_fib_node *fib_node;
6520 int err;
6521
6522 if (fen_info->fi->nh &&
6523 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6524 return 0;
6525
6526 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6527 &fen_info->dst, sizeof(fen_info->dst),
6528 fen_info->dst_len,
6529 MLXSW_SP_L3_PROTO_IPV4);
6530 if (IS_ERR(fib_node)) {
6531 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6532 return PTR_ERR(fib_node);
6533 }
6534
6535 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6536 if (IS_ERR(fib4_entry)) {
6537 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6538 err = PTR_ERR(fib4_entry);
6539 goto err_fib4_entry_create;
6540 }
6541
6542 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6543 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6544 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6545 return 0;
6546 }
6547
6548 replaced = fib_node->fib_entry;
6549 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
6550 if (err) {
6551 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6552 goto err_fib_node_entry_link;
6553 }
6554
6555
6556 if (!replaced)
6557 return 0;
6558
6559 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6560 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6561 common);
6562 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6563
6564 return 0;
6565
6566err_fib_node_entry_link:
6567 fib_node->fib_entry = replaced;
6568 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6569err_fib4_entry_create:
6570 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6571 return err;
6572}
6573
6574static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6575 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6576 struct fib_entry_notifier_info *fen_info)
6577{
6578 struct mlxsw_sp_fib4_entry *fib4_entry;
6579 struct mlxsw_sp_fib_node *fib_node;
6580 int err;
6581
6582 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6583 if (!fib4_entry)
6584 return 0;
6585 fib_node = fib4_entry->common.fib_node;
6586
6587 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
6588 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6589 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6590 return err;
6591}
6592
6593static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6594{
6595
6596
6597
6598 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6599 return true;
6600
6601
6602 if (rt->fib6_flags & RTF_CACHE)
6603 return true;
6604
6605 return false;
6606}
6607
6608static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6609{
6610 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6611
6612 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6613 if (!mlxsw_sp_rt6)
6614 return ERR_PTR(-ENOMEM);
6615
6616
6617
6618
6619
6620 mlxsw_sp_rt6->rt = rt;
6621 fib6_info_hold(rt);
6622
6623 return mlxsw_sp_rt6;
6624}
6625
6626#if IS_ENABLED(CONFIG_IPV6)
6627static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6628{
6629 fib6_info_release(rt);
6630}
6631#else
6632static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6633{
6634}
6635#endif
6636
6637static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6638{
6639 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6640
6641 if (!mlxsw_sp_rt6->rt->nh)
6642 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6643 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6644 kfree(mlxsw_sp_rt6);
6645}
6646
6647static struct fib6_info *
6648mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6649{
6650 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6651 list)->rt;
6652}
6653
6654static struct mlxsw_sp_rt6 *
6655mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6656 const struct fib6_info *rt)
6657{
6658 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6659
6660 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6661 if (mlxsw_sp_rt6->rt == rt)
6662 return mlxsw_sp_rt6;
6663 }
6664
6665 return NULL;
6666}
6667
6668static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6669 const struct fib6_info *rt,
6670 enum mlxsw_sp_ipip_type *ret)
6671{
6672 return rt->fib6_nh->fib_nh_dev &&
6673 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6674}
6675
6676static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6677 struct mlxsw_sp_nexthop_group *nh_grp,
6678 struct mlxsw_sp_nexthop *nh,
6679 const struct fib6_info *rt)
6680{
6681 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6682
6683 nh->nhgi = nh_grp->nhgi;
6684 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6685 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6686#if IS_ENABLED(CONFIG_IPV6)
6687 nh->neigh_tbl = &nd_tbl;
6688#endif
6689 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6690
6691 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6692
6693 if (!dev)
6694 return 0;
6695 nh->ifindex = dev->ifindex;
6696
6697 return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6698}
6699
6700static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6701 struct mlxsw_sp_nexthop *nh)
6702{
6703 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6704 list_del(&nh->router_list_node);
6705 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6706}
6707
6708static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6709 const struct fib6_info *rt)
6710{
6711 return rt->fib6_nh->fib_nh_gw_family ||
6712 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6713}
6714
6715static int
6716mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6717 struct mlxsw_sp_nexthop_group *nh_grp,
6718 struct mlxsw_sp_fib6_entry *fib6_entry)
6719{
6720 struct mlxsw_sp_nexthop_group_info *nhgi;
6721 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6722 struct mlxsw_sp_nexthop *nh;
6723 int err, i;
6724
6725 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6726 GFP_KERNEL);
6727 if (!nhgi)
6728 return -ENOMEM;
6729 nh_grp->nhgi = nhgi;
6730 nhgi->nh_grp = nh_grp;
6731 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6732 struct mlxsw_sp_rt6, list);
6733 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6734 nhgi->count = fib6_entry->nrt6;
6735 for (i = 0; i < nhgi->count; i++) {
6736 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6737
6738 nh = &nhgi->nexthops[i];
6739 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6740 if (err)
6741 goto err_nexthop6_init;
6742 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6743 }
6744 nh_grp->nhgi = nhgi;
6745 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6746 if (err)
6747 goto err_group_inc;
6748 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6749 if (err)
6750 goto err_group_refresh;
6751
6752 return 0;
6753
6754err_group_refresh:
6755 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6756err_group_inc:
6757 i = nhgi->count;
6758err_nexthop6_init:
6759 for (i--; i >= 0; i--) {
6760 nh = &nhgi->nexthops[i];
6761 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6762 }
6763 kfree(nhgi);
6764 return err;
6765}
6766
6767static void
6768mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6769 struct mlxsw_sp_nexthop_group *nh_grp)
6770{
6771 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6772 int i;
6773
6774 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6775 for (i = nhgi->count - 1; i >= 0; i--) {
6776 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6777
6778 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6779 }
6780 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6781 WARN_ON_ONCE(nhgi->adj_index_valid);
6782 kfree(nhgi);
6783}
6784
6785static struct mlxsw_sp_nexthop_group *
6786mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6787 struct mlxsw_sp_fib6_entry *fib6_entry)
6788{
6789 struct mlxsw_sp_nexthop_group *nh_grp;
6790 int err;
6791
6792 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6793 if (!nh_grp)
6794 return ERR_PTR(-ENOMEM);
6795 INIT_LIST_HEAD(&nh_grp->vr_list);
6796 err = rhashtable_init(&nh_grp->vr_ht,
6797 &mlxsw_sp_nexthop_group_vr_ht_params);
6798 if (err)
6799 goto err_nexthop_group_vr_ht_init;
6800 INIT_LIST_HEAD(&nh_grp->fib_list);
6801 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6802
6803 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6804 if (err)
6805 goto err_nexthop_group_info_init;
6806
6807 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6808 if (err)
6809 goto err_nexthop_group_insert;
6810
6811 nh_grp->can_destroy = true;
6812
6813 return nh_grp;
6814
6815err_nexthop_group_insert:
6816 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6817err_nexthop_group_info_init:
6818 rhashtable_destroy(&nh_grp->vr_ht);
6819err_nexthop_group_vr_ht_init:
6820 kfree(nh_grp);
6821 return ERR_PTR(err);
6822}
6823
6824static void
6825mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6826 struct mlxsw_sp_nexthop_group *nh_grp)
6827{
6828 if (!nh_grp->can_destroy)
6829 return;
6830 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6831 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6832 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6833 rhashtable_destroy(&nh_grp->vr_ht);
6834 kfree(nh_grp);
6835}
6836
6837static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6838 struct mlxsw_sp_fib6_entry *fib6_entry)
6839{
6840 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6841 struct mlxsw_sp_nexthop_group *nh_grp;
6842
6843 if (rt->nh) {
6844 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6845 rt->nh->id);
6846 if (WARN_ON_ONCE(!nh_grp))
6847 return -EINVAL;
6848 goto out;
6849 }
6850
6851 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6852 if (!nh_grp) {
6853 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6854 if (IS_ERR(nh_grp))
6855 return PTR_ERR(nh_grp);
6856 }
6857
6858
6859
6860
6861 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6862
6863out:
6864 list_add_tail(&fib6_entry->common.nexthop_group_node,
6865 &nh_grp->fib_list);
6866 fib6_entry->common.nh_group = nh_grp;
6867
6868 return 0;
6869}
6870
6871static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6872 struct mlxsw_sp_fib_entry *fib_entry)
6873{
6874 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6875
6876 list_del(&fib_entry->nexthop_group_node);
6877 if (!list_empty(&nh_grp->fib_list))
6878 return;
6879
6880 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6881 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6882 return;
6883 }
6884
6885 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6886}
6887
6888static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6889 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6890 struct mlxsw_sp_fib6_entry *fib6_entry)
6891{
6892 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6893 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6894 int err;
6895
6896 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6897 fib6_entry->common.nh_group = NULL;
6898 list_del(&fib6_entry->common.nexthop_group_node);
6899
6900 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6901 if (err)
6902 goto err_nexthop6_group_get;
6903
6904 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6905 fib_node->fib);
6906 if (err)
6907 goto err_nexthop_group_vr_link;
6908
6909
6910
6911
6912
6913 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
6914 &fib6_entry->common, false);
6915 if (err)
6916 goto err_fib_entry_update;
6917
6918 if (list_empty(&old_nh_grp->fib_list))
6919 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6920
6921 return 0;
6922
6923err_fib_entry_update:
6924 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6925 fib_node->fib);
6926err_nexthop_group_vr_link:
6927 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6928err_nexthop6_group_get:
6929 list_add_tail(&fib6_entry->common.nexthop_group_node,
6930 &old_nh_grp->fib_list);
6931 fib6_entry->common.nh_group = old_nh_grp;
6932 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6933 return err;
6934}
6935
6936static int
6937mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6938 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6939 struct mlxsw_sp_fib6_entry *fib6_entry,
6940 struct fib6_info **rt_arr, unsigned int nrt6)
6941{
6942 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6943 int err, i;
6944
6945 for (i = 0; i < nrt6; i++) {
6946 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6947 if (IS_ERR(mlxsw_sp_rt6)) {
6948 err = PTR_ERR(mlxsw_sp_rt6);
6949 goto err_rt6_create;
6950 }
6951
6952 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6953 fib6_entry->nrt6++;
6954 }
6955
6956 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6957 if (err)
6958 goto err_nexthop6_group_update;
6959
6960 return 0;
6961
6962err_nexthop6_group_update:
6963 i = nrt6;
6964err_rt6_create:
6965 for (i--; i >= 0; i--) {
6966 fib6_entry->nrt6--;
6967 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6968 struct mlxsw_sp_rt6, list);
6969 list_del(&mlxsw_sp_rt6->list);
6970 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6971 }
6972 return err;
6973}
6974
6975static void
6976mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6977 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6978 struct mlxsw_sp_fib6_entry *fib6_entry,
6979 struct fib6_info **rt_arr, unsigned int nrt6)
6980{
6981 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6982 int i;
6983
6984 for (i = 0; i < nrt6; i++) {
6985 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6986 rt_arr[i]);
6987 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6988 continue;
6989
6990 fib6_entry->nrt6--;
6991 list_del(&mlxsw_sp_rt6->list);
6992 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6993 }
6994
6995 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
6996}
6997
6998static int
6999mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
7000 struct mlxsw_sp_fib_entry *fib_entry,
7001 const struct fib6_info *rt)
7002{
7003 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
7004 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
7005 int ifindex = nhgi->nexthops[0].ifindex;
7006 struct mlxsw_sp_ipip_entry *ipip_entry;
7007
7008 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7009 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
7010 MLXSW_SP_L3_PROTO_IPV6,
7011 dip);
7012
7013 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
7014 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
7015 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
7016 ipip_entry);
7017 }
7018
7019 return 0;
7020}
7021
7022static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
7023 struct mlxsw_sp_fib_entry *fib_entry,
7024 const struct fib6_info *rt)
7025{
7026 if (rt->fib6_flags & RTF_LOCAL)
7027 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
7028 rt);
7029 if (rt->fib6_flags & RTF_ANYCAST)
7030 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
7031 else if (rt->fib6_type == RTN_BLACKHOLE)
7032 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
7033 else if (rt->fib6_flags & RTF_REJECT)
7034 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
7035 else if (fib_entry->nh_group->nhgi->gateway)
7036 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
7037 else
7038 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
7039
7040 return 0;
7041}
7042
7043static void
7044mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
7045{
7046 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
7047
7048 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
7049 list) {
7050 fib6_entry->nrt6--;
7051 list_del(&mlxsw_sp_rt6->list);
7052 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7053 }
7054}
7055
7056static struct mlxsw_sp_fib6_entry *
7057mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
7058 struct mlxsw_sp_fib_node *fib_node,
7059 struct fib6_info **rt_arr, unsigned int nrt6)
7060{
7061 struct mlxsw_sp_fib6_entry *fib6_entry;
7062 struct mlxsw_sp_fib_entry *fib_entry;
7063 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
7064 int err, i;
7065
7066 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
7067 if (!fib6_entry)
7068 return ERR_PTR(-ENOMEM);
7069 fib_entry = &fib6_entry->common;
7070
7071 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
7072 if (IS_ERR(fib_entry->priv)) {
7073 err = PTR_ERR(fib_entry->priv);
7074 goto err_fib_entry_priv_create;
7075 }
7076
7077 INIT_LIST_HEAD(&fib6_entry->rt6_list);
7078
7079 for (i = 0; i < nrt6; i++) {
7080 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
7081 if (IS_ERR(mlxsw_sp_rt6)) {
7082 err = PTR_ERR(mlxsw_sp_rt6);
7083 goto err_rt6_create;
7084 }
7085 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
7086 fib6_entry->nrt6++;
7087 }
7088
7089 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
7090 if (err)
7091 goto err_nexthop6_group_get;
7092
7093 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
7094 fib_node->fib);
7095 if (err)
7096 goto err_nexthop_group_vr_link;
7097
7098 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
7099 if (err)
7100 goto err_fib6_entry_type_set;
7101
7102 fib_entry->fib_node = fib_node;
7103
7104 return fib6_entry;
7105
7106err_fib6_entry_type_set:
7107 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
7108err_nexthop_group_vr_link:
7109 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
7110err_nexthop6_group_get:
7111 i = nrt6;
7112err_rt6_create:
7113 for (i--; i >= 0; i--) {
7114 fib6_entry->nrt6--;
7115 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
7116 struct mlxsw_sp_rt6, list);
7117 list_del(&mlxsw_sp_rt6->list);
7118 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
7119 }
7120 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
7121err_fib_entry_priv_create:
7122 kfree(fib6_entry);
7123 return ERR_PTR(err);
7124}
7125
7126static void
7127mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
7128 struct mlxsw_sp_fib6_entry *fib6_entry)
7129{
7130 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
7131}
7132
7133static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
7134 struct mlxsw_sp_fib6_entry *fib6_entry)
7135{
7136 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7137
7138 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
7139 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
7140 fib_node->fib);
7141 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
7142 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
7143 WARN_ON(fib6_entry->nrt6);
7144 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
7145 kfree(fib6_entry);
7146}
7147
7148static struct mlxsw_sp_fib6_entry *
7149mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
7150 const struct fib6_info *rt)
7151{
7152 struct mlxsw_sp_fib6_entry *fib6_entry;
7153 struct mlxsw_sp_fib_node *fib_node;
7154 struct mlxsw_sp_fib *fib;
7155 struct fib6_info *cmp_rt;
7156 struct mlxsw_sp_vr *vr;
7157
7158 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
7159 if (!vr)
7160 return NULL;
7161 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
7162
7163 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
7164 sizeof(rt->fib6_dst.addr),
7165 rt->fib6_dst.plen);
7166 if (!fib_node)
7167 return NULL;
7168
7169 fib6_entry = container_of(fib_node->fib_entry,
7170 struct mlxsw_sp_fib6_entry, common);
7171 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7172 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7173 rt->fib6_metric == cmp_rt->fib6_metric &&
7174 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7175 return fib6_entry;
7176
7177 return NULL;
7178}
7179
7180static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7181{
7182 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7183 struct mlxsw_sp_fib6_entry *fib6_replaced;
7184 struct fib6_info *rt, *rt_replaced;
7185
7186 if (!fib_node->fib_entry)
7187 return true;
7188
7189 fib6_replaced = container_of(fib_node->fib_entry,
7190 struct mlxsw_sp_fib6_entry,
7191 common);
7192 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7193 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7194 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7195 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7196 return false;
7197
7198 return true;
7199}
7200
7201static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7202 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7203 struct fib6_info **rt_arr, unsigned int nrt6)
7204{
7205 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7206 struct mlxsw_sp_fib_entry *replaced;
7207 struct mlxsw_sp_fib_node *fib_node;
7208 struct fib6_info *rt = rt_arr[0];
7209 int err;
7210
7211 if (rt->fib6_src.plen)
7212 return -EINVAL;
7213
7214 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7215 return 0;
7216
7217 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7218 return 0;
7219
7220 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7221 &rt->fib6_dst.addr,
7222 sizeof(rt->fib6_dst.addr),
7223 rt->fib6_dst.plen,
7224 MLXSW_SP_L3_PROTO_IPV6);
7225 if (IS_ERR(fib_node))
7226 return PTR_ERR(fib_node);
7227
7228 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7229 nrt6);
7230 if (IS_ERR(fib6_entry)) {
7231 err = PTR_ERR(fib6_entry);
7232 goto err_fib6_entry_create;
7233 }
7234
7235 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7236 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7237 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7238 return 0;
7239 }
7240
7241 replaced = fib_node->fib_entry;
7242 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
7243 if (err)
7244 goto err_fib_node_entry_link;
7245
7246
7247 if (!replaced)
7248 return 0;
7249
7250 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7251 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7252 common);
7253 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7254
7255 return 0;
7256
7257err_fib_node_entry_link:
7258 fib_node->fib_entry = replaced;
7259 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7260err_fib6_entry_create:
7261 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7262 return err;
7263}
7264
7265static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7266 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7267 struct fib6_info **rt_arr, unsigned int nrt6)
7268{
7269 struct mlxsw_sp_fib6_entry *fib6_entry;
7270 struct mlxsw_sp_fib_node *fib_node;
7271 struct fib6_info *rt = rt_arr[0];
7272 int err;
7273
7274 if (rt->fib6_src.plen)
7275 return -EINVAL;
7276
7277 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7278 return 0;
7279
7280 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7281 &rt->fib6_dst.addr,
7282 sizeof(rt->fib6_dst.addr),
7283 rt->fib6_dst.plen,
7284 MLXSW_SP_L3_PROTO_IPV6);
7285 if (IS_ERR(fib_node))
7286 return PTR_ERR(fib_node);
7287
7288 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7289 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7290 return -EINVAL;
7291 }
7292
7293 fib6_entry = container_of(fib_node->fib_entry,
7294 struct mlxsw_sp_fib6_entry, common);
7295 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7296 if (err)
7297 goto err_fib6_entry_nexthop_add;
7298
7299 return 0;
7300
7301err_fib6_entry_nexthop_add:
7302 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7303 return err;
7304}
7305
7306static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7307 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7308 struct fib6_info **rt_arr, unsigned int nrt6)
7309{
7310 struct mlxsw_sp_fib6_entry *fib6_entry;
7311 struct mlxsw_sp_fib_node *fib_node;
7312 struct fib6_info *rt = rt_arr[0];
7313 int err;
7314
7315 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7316 return 0;
7317
7318
7319
7320
7321
7322
7323 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7324 if (!fib6_entry)
7325 return 0;
7326
7327
7328
7329
7330 if (nrt6 != fib6_entry->nrt6) {
7331 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
7332 return 0;
7333 }
7334
7335 fib_node = fib6_entry->common.fib_node;
7336
7337 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
7338 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7339 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7340 return err;
7341}
7342
7343static struct mlxsw_sp_mr_table *
7344mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7345{
7346 if (family == RTNL_FAMILY_IPMR)
7347 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7348 else
7349 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7350}
7351
7352static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7353 struct mfc_entry_notifier_info *men_info,
7354 bool replace)
7355{
7356 struct mlxsw_sp_mr_table *mrt;
7357 struct mlxsw_sp_vr *vr;
7358
7359 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7360 if (IS_ERR(vr))
7361 return PTR_ERR(vr);
7362
7363 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7364 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7365}
7366
7367static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7368 struct mfc_entry_notifier_info *men_info)
7369{
7370 struct mlxsw_sp_mr_table *mrt;
7371 struct mlxsw_sp_vr *vr;
7372
7373 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7374 if (WARN_ON(!vr))
7375 return;
7376
7377 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7378 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7379 mlxsw_sp_vr_put(mlxsw_sp, vr);
7380}
7381
7382static int
7383mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7384 struct vif_entry_notifier_info *ven_info)
7385{
7386 struct mlxsw_sp_mr_table *mrt;
7387 struct mlxsw_sp_rif *rif;
7388 struct mlxsw_sp_vr *vr;
7389
7390 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7391 if (IS_ERR(vr))
7392 return PTR_ERR(vr);
7393
7394 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7395 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7396 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7397 ven_info->vif_index,
7398 ven_info->vif_flags, rif);
7399}
7400
7401static void
7402mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7403 struct vif_entry_notifier_info *ven_info)
7404{
7405 struct mlxsw_sp_mr_table *mrt;
7406 struct mlxsw_sp_vr *vr;
7407
7408 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7409 if (WARN_ON(!vr))
7410 return;
7411
7412 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7413 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7414 mlxsw_sp_vr_put(mlxsw_sp, vr);
7415}
7416
7417static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7418 struct mlxsw_sp_fib_node *fib_node)
7419{
7420 struct mlxsw_sp_fib4_entry *fib4_entry;
7421
7422 fib4_entry = container_of(fib_node->fib_entry,
7423 struct mlxsw_sp_fib4_entry, common);
7424 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7425 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7426 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7427}
7428
7429static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7430 struct mlxsw_sp_fib_node *fib_node)
7431{
7432 struct mlxsw_sp_fib6_entry *fib6_entry;
7433
7434 fib6_entry = container_of(fib_node->fib_entry,
7435 struct mlxsw_sp_fib6_entry, common);
7436 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7437 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7438 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7439}
7440
7441static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7442 struct mlxsw_sp_fib_node *fib_node)
7443{
7444 switch (fib_node->fib->proto) {
7445 case MLXSW_SP_L3_PROTO_IPV4:
7446 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7447 break;
7448 case MLXSW_SP_L3_PROTO_IPV6:
7449 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7450 break;
7451 }
7452}
7453
7454static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7455 struct mlxsw_sp_vr *vr,
7456 enum mlxsw_sp_l3proto proto)
7457{
7458 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7459 struct mlxsw_sp_fib_node *fib_node, *tmp;
7460
7461 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7462 bool do_break = &tmp->list == &fib->node_list;
7463
7464 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7465 if (do_break)
7466 break;
7467 }
7468}
7469
7470static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7471{
7472 int i, j;
7473
7474 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7475 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7476
7477 if (!mlxsw_sp_vr_is_used(vr))
7478 continue;
7479
7480 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7481 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7482 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7483
7484
7485
7486
7487 if (!mlxsw_sp_vr_is_used(vr))
7488 continue;
7489 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7490 }
7491}
7492
7493struct mlxsw_sp_fib6_event {
7494 struct fib6_info **rt_arr;
7495 unsigned int nrt6;
7496};
7497
7498struct mlxsw_sp_fib_event {
7499 struct list_head list;
7500 union {
7501 struct mlxsw_sp_fib6_event fib6_event;
7502 struct fib_entry_notifier_info fen_info;
7503 struct fib_rule_notifier_info fr_info;
7504 struct fib_nh_notifier_info fnh_info;
7505 struct mfc_entry_notifier_info men_info;
7506 struct vif_entry_notifier_info ven_info;
7507 };
7508 struct mlxsw_sp *mlxsw_sp;
7509 unsigned long event;
7510 int family;
7511};
7512
7513static int
7514mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
7515 struct fib6_entry_notifier_info *fen6_info)
7516{
7517 struct fib6_info *rt = fen6_info->rt;
7518 struct fib6_info **rt_arr;
7519 struct fib6_info *iter;
7520 unsigned int nrt6;
7521 int i = 0;
7522
7523 nrt6 = fen6_info->nsiblings + 1;
7524
7525 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7526 if (!rt_arr)
7527 return -ENOMEM;
7528
7529 fib6_event->rt_arr = rt_arr;
7530 fib6_event->nrt6 = nrt6;
7531
7532 rt_arr[0] = rt;
7533 fib6_info_hold(rt);
7534
7535 if (!fen6_info->nsiblings)
7536 return 0;
7537
7538 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7539 if (i == fen6_info->nsiblings)
7540 break;
7541
7542 rt_arr[i + 1] = iter;
7543 fib6_info_hold(iter);
7544 i++;
7545 }
7546 WARN_ON_ONCE(i != fen6_info->nsiblings);
7547
7548 return 0;
7549}
7550
7551static void
7552mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
7553{
7554 int i;
7555
7556 for (i = 0; i < fib6_event->nrt6; i++)
7557 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
7558 kfree(fib6_event->rt_arr);
7559}
7560
7561static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
7562 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7563 struct mlxsw_sp_fib_event *fib_event)
7564{
7565 int err;
7566
7567 mlxsw_sp_span_respin(mlxsw_sp);
7568
7569 switch (fib_event->event) {
7570 case FIB_EVENT_ENTRY_REPLACE:
7571 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
7572 if (err) {
7573 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7574 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7575 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7576 &fib_event->fen_info);
7577 }
7578 fib_info_put(fib_event->fen_info.fi);
7579 break;
7580 case FIB_EVENT_ENTRY_DEL:
7581 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
7582 if (err)
7583 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7584 fib_info_put(fib_event->fen_info.fi);
7585 break;
7586 case FIB_EVENT_NH_ADD:
7587 case FIB_EVENT_NH_DEL:
7588 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
7589 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
7590 break;
7591 }
7592}
7593
7594static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
7595 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
7596 struct mlxsw_sp_fib_event *fib_event)
7597{
7598 struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
7599 int err;
7600
7601 mlxsw_sp_span_respin(mlxsw_sp);
7602
7603 switch (fib_event->event) {
7604 case FIB_EVENT_ENTRY_REPLACE:
7605 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7606 fib_event->fib6_event.nrt6);
7607 if (err) {
7608 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7609 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7610 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7611 fib6_event->rt_arr,
7612 fib6_event->nrt6);
7613 }
7614 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7615 break;
7616 case FIB_EVENT_ENTRY_APPEND:
7617 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7618 fib_event->fib6_event.nrt6);
7619 if (err) {
7620 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7621 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7622 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7623 fib6_event->rt_arr,
7624 fib6_event->nrt6);
7625 }
7626 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7627 break;
7628 case FIB_EVENT_ENTRY_DEL:
7629 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
7630 fib_event->fib6_event.nrt6);
7631 if (err)
7632 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
7633 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
7634 break;
7635 }
7636}
7637
7638static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
7639 struct mlxsw_sp_fib_event *fib_event)
7640{
7641 bool replace;
7642 int err;
7643
7644 rtnl_lock();
7645 mutex_lock(&mlxsw_sp->router->lock);
7646 switch (fib_event->event) {
7647 case FIB_EVENT_ENTRY_REPLACE:
7648 case FIB_EVENT_ENTRY_ADD:
7649 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
7650
7651 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
7652 if (err)
7653 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7654 mr_cache_put(fib_event->men_info.mfc);
7655 break;
7656 case FIB_EVENT_ENTRY_DEL:
7657 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
7658 mr_cache_put(fib_event->men_info.mfc);
7659 break;
7660 case FIB_EVENT_VIF_ADD:
7661 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7662 &fib_event->ven_info);
7663 if (err)
7664 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7665 dev_put(fib_event->ven_info.dev);
7666 break;
7667 case FIB_EVENT_VIF_DEL:
7668 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
7669 dev_put(fib_event->ven_info.dev);
7670 break;
7671 }
7672 mutex_unlock(&mlxsw_sp->router->lock);
7673 rtnl_unlock();
7674}
7675
7676static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
7677{
7678 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
7679 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
7680 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
7681 struct mlxsw_sp_fib_event *next_fib_event;
7682 struct mlxsw_sp_fib_event *fib_event;
7683 int last_family = AF_UNSPEC;
7684 LIST_HEAD(fib_event_queue);
7685
7686 spin_lock_bh(&router->fib_event_queue_lock);
7687 list_splice_init(&router->fib_event_queue, &fib_event_queue);
7688 spin_unlock_bh(&router->fib_event_queue_lock);
7689
7690
7691
7692
7693
7694 mutex_lock(&router->lock);
7695 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
7696 list_for_each_entry_safe(fib_event, next_fib_event,
7697 &fib_event_queue, list) {
7698
7699
7700
7701
7702
7703 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
7704 fib_event->family == next_fib_event->family &&
7705 fib_event->event == next_fib_event->event;
7706 op_ctx->event = fib_event->event;
7707
7708
7709
7710
7711
7712
7713 if (fib_event->family != last_family)
7714 op_ctx->initialized = false;
7715
7716 switch (fib_event->family) {
7717 case AF_INET:
7718 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
7719 fib_event);
7720 break;
7721 case AF_INET6:
7722 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
7723 fib_event);
7724 break;
7725 case RTNL_FAMILY_IP6MR:
7726 case RTNL_FAMILY_IPMR:
7727
7728
7729
7730
7731 mutex_unlock(&router->lock);
7732 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
7733 fib_event);
7734 mutex_lock(&router->lock);
7735 break;
7736 default:
7737 WARN_ON_ONCE(1);
7738 }
7739 last_family = fib_event->family;
7740 kfree(fib_event);
7741 cond_resched();
7742 }
7743 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
7744 mutex_unlock(&router->lock);
7745}
7746
7747static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
7748 struct fib_notifier_info *info)
7749{
7750 struct fib_entry_notifier_info *fen_info;
7751 struct fib_nh_notifier_info *fnh_info;
7752
7753 switch (fib_event->event) {
7754 case FIB_EVENT_ENTRY_REPLACE:
7755 case FIB_EVENT_ENTRY_DEL:
7756 fen_info = container_of(info, struct fib_entry_notifier_info,
7757 info);
7758 fib_event->fen_info = *fen_info;
7759
7760
7761
7762 fib_info_hold(fib_event->fen_info.fi);
7763 break;
7764 case FIB_EVENT_NH_ADD:
7765 case FIB_EVENT_NH_DEL:
7766 fnh_info = container_of(info, struct fib_nh_notifier_info,
7767 info);
7768 fib_event->fnh_info = *fnh_info;
7769 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
7770 break;
7771 }
7772}
7773
7774static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
7775 struct fib_notifier_info *info)
7776{
7777 struct fib6_entry_notifier_info *fen6_info;
7778 int err;
7779
7780 switch (fib_event->event) {
7781 case FIB_EVENT_ENTRY_REPLACE:
7782 case FIB_EVENT_ENTRY_APPEND:
7783 case FIB_EVENT_ENTRY_DEL:
7784 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7785 info);
7786 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
7787 fen6_info);
7788 if (err)
7789 return err;
7790 break;
7791 }
7792
7793 return 0;
7794}
7795
7796static void
7797mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
7798 struct fib_notifier_info *info)
7799{
7800 switch (fib_event->event) {
7801 case FIB_EVENT_ENTRY_REPLACE:
7802 case FIB_EVENT_ENTRY_ADD:
7803 case FIB_EVENT_ENTRY_DEL:
7804 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
7805 mr_cache_hold(fib_event->men_info.mfc);
7806 break;
7807 case FIB_EVENT_VIF_ADD:
7808 case FIB_EVENT_VIF_DEL:
7809 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
7810 dev_hold(fib_event->ven_info.dev);
7811 break;
7812 }
7813}
7814
7815static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7816 struct fib_notifier_info *info,
7817 struct mlxsw_sp *mlxsw_sp)
7818{
7819 struct netlink_ext_ack *extack = info->extack;
7820 struct fib_rule_notifier_info *fr_info;
7821 struct fib_rule *rule;
7822 int err = 0;
7823
7824
7825 if (event == FIB_EVENT_RULE_DEL)
7826 return 0;
7827
7828 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7829 rule = fr_info->rule;
7830
7831
7832 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7833 return 0;
7834
7835 switch (info->family) {
7836 case AF_INET:
7837 if (!fib4_rule_default(rule) && !rule->l3mdev)
7838 err = -EOPNOTSUPP;
7839 break;
7840 case AF_INET6:
7841 if (!fib6_rule_default(rule) && !rule->l3mdev)
7842 err = -EOPNOTSUPP;
7843 break;
7844 case RTNL_FAMILY_IPMR:
7845 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7846 err = -EOPNOTSUPP;
7847 break;
7848 case RTNL_FAMILY_IP6MR:
7849 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7850 err = -EOPNOTSUPP;
7851 break;
7852 }
7853
7854 if (err < 0)
7855 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7856
7857 return err;
7858}
7859
7860
7861static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7862 unsigned long event, void *ptr)
7863{
7864 struct mlxsw_sp_fib_event *fib_event;
7865 struct fib_notifier_info *info = ptr;
7866 struct mlxsw_sp_router *router;
7867 int err;
7868
7869 if ((info->family != AF_INET && info->family != AF_INET6 &&
7870 info->family != RTNL_FAMILY_IPMR &&
7871 info->family != RTNL_FAMILY_IP6MR))
7872 return NOTIFY_DONE;
7873
7874 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7875
7876 switch (event) {
7877 case FIB_EVENT_RULE_ADD:
7878 case FIB_EVENT_RULE_DEL:
7879 err = mlxsw_sp_router_fib_rule_event(event, info,
7880 router->mlxsw_sp);
7881 return notifier_from_errno(err);
7882 case FIB_EVENT_ENTRY_ADD:
7883 case FIB_EVENT_ENTRY_REPLACE:
7884 case FIB_EVENT_ENTRY_APPEND:
7885 if (info->family == AF_INET) {
7886 struct fib_entry_notifier_info *fen_info = ptr;
7887
7888 if (fen_info->fi->fib_nh_is_v6) {
7889 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7890 return notifier_from_errno(-EINVAL);
7891 }
7892 }
7893 break;
7894 }
7895
7896 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
7897 if (!fib_event)
7898 return NOTIFY_BAD;
7899
7900 fib_event->mlxsw_sp = router->mlxsw_sp;
7901 fib_event->event = event;
7902 fib_event->family = info->family;
7903
7904 switch (info->family) {
7905 case AF_INET:
7906 mlxsw_sp_router_fib4_event(fib_event, info);
7907 break;
7908 case AF_INET6:
7909 err = mlxsw_sp_router_fib6_event(fib_event, info);
7910 if (err)
7911 goto err_fib_event;
7912 break;
7913 case RTNL_FAMILY_IP6MR:
7914 case RTNL_FAMILY_IPMR:
7915 mlxsw_sp_router_fibmr_event(fib_event, info);
7916 break;
7917 }
7918
7919
7920 spin_lock_bh(&router->fib_event_queue_lock);
7921 list_add_tail(&fib_event->list, &router->fib_event_queue);
7922 spin_unlock_bh(&router->fib_event_queue_lock);
7923 mlxsw_core_schedule_work(&router->fib_event_work);
7924
7925 return NOTIFY_DONE;
7926
7927err_fib_event:
7928 kfree(fib_event);
7929 return NOTIFY_BAD;
7930}
7931
7932static struct mlxsw_sp_rif *
7933mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7934 const struct net_device *dev)
7935{
7936 int i;
7937
7938 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7939 if (mlxsw_sp->router->rifs[i] &&
7940 mlxsw_sp->router->rifs[i]->dev == dev)
7941 return mlxsw_sp->router->rifs[i];
7942
7943 return NULL;
7944}
7945
7946bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7947 const struct net_device *dev)
7948{
7949 struct mlxsw_sp_rif *rif;
7950
7951 mutex_lock(&mlxsw_sp->router->lock);
7952 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7953 mutex_unlock(&mlxsw_sp->router->lock);
7954
7955 return rif;
7956}
7957
7958u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7959{
7960 struct mlxsw_sp_rif *rif;
7961 u16 vid = 0;
7962
7963 mutex_lock(&mlxsw_sp->router->lock);
7964 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7965 if (!rif)
7966 goto out;
7967
7968
7969
7970
7971 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7972 goto out;
7973
7974 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7975
7976out:
7977 mutex_unlock(&mlxsw_sp->router->lock);
7978 return vid;
7979}
7980
7981static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7982{
7983 char ritr_pl[MLXSW_REG_RITR_LEN];
7984 int err;
7985
7986 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7987 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7988 if (err)
7989 return err;
7990
7991 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7992 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7993}
7994
7995static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7996 struct mlxsw_sp_rif *rif)
7997{
7998 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7999 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
8000 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
8001}
8002
8003static bool
8004mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
8005 unsigned long event)
8006{
8007 struct inet6_dev *inet6_dev;
8008 bool addr_list_empty = true;
8009 struct in_device *idev;
8010
8011 switch (event) {
8012 case NETDEV_UP:
8013 return rif == NULL;
8014 case NETDEV_DOWN:
8015 rcu_read_lock();
8016 idev = __in_dev_get_rcu(dev);
8017 if (idev && idev->ifa_list)
8018 addr_list_empty = false;
8019
8020 inet6_dev = __in6_dev_get(dev);
8021 if (addr_list_empty && inet6_dev &&
8022 !list_empty(&inet6_dev->addr_list))
8023 addr_list_empty = false;
8024 rcu_read_unlock();
8025
8026
8027
8028
8029 if (netif_is_macvlan(dev) && addr_list_empty)
8030 return true;
8031
8032 if (rif && addr_list_empty &&
8033 !netif_is_l3_slave(rif->dev))
8034 return true;
8035
8036
8037
8038
8039 return false;
8040 }
8041
8042 return false;
8043}
8044
8045static enum mlxsw_sp_rif_type
8046mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
8047 const struct net_device *dev)
8048{
8049 enum mlxsw_sp_fid_type type;
8050
8051 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
8052 return MLXSW_SP_RIF_TYPE_IPIP_LB;
8053
8054
8055 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
8056 type = MLXSW_SP_FID_TYPE_8021Q;
8057 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
8058 type = MLXSW_SP_FID_TYPE_8021Q;
8059 else if (netif_is_bridge_master(dev))
8060 type = MLXSW_SP_FID_TYPE_8021D;
8061 else
8062 type = MLXSW_SP_FID_TYPE_RFID;
8063
8064 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
8065}
8066
8067static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
8068{
8069 int i;
8070
8071 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
8072 if (!mlxsw_sp->router->rifs[i]) {
8073 *p_rif_index = i;
8074 return 0;
8075 }
8076 }
8077
8078 return -ENOBUFS;
8079}
8080
8081static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
8082 u16 vr_id,
8083 struct net_device *l3_dev)
8084{
8085 struct mlxsw_sp_rif *rif;
8086
8087 rif = kzalloc(rif_size, GFP_KERNEL);
8088 if (!rif)
8089 return NULL;
8090
8091 INIT_LIST_HEAD(&rif->nexthop_list);
8092 INIT_LIST_HEAD(&rif->neigh_list);
8093 if (l3_dev) {
8094 ether_addr_copy(rif->addr, l3_dev->dev_addr);
8095 rif->mtu = l3_dev->mtu;
8096 rif->dev = l3_dev;
8097 }
8098 rif->vr_id = vr_id;
8099 rif->rif_index = rif_index;
8100
8101 return rif;
8102}
8103
8104struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
8105 u16 rif_index)
8106{
8107 return mlxsw_sp->router->rifs[rif_index];
8108}
8109
8110u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
8111{
8112 return rif->rif_index;
8113}
8114
8115u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8116{
8117 return lb_rif->common.rif_index;
8118}
8119
8120u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8121{
8122 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
8123 struct mlxsw_sp_vr *ul_vr;
8124
8125 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
8126 if (WARN_ON(IS_ERR(ul_vr)))
8127 return 0;
8128
8129 return ul_vr->id;
8130}
8131
8132u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
8133{
8134 return lb_rif->ul_rif_id;
8135}
8136
8137int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8138{
8139 return rif->dev->ifindex;
8140}
8141
8142const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8143{
8144 return rif->dev;
8145}
8146
8147static struct mlxsw_sp_rif *
8148mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8149 const struct mlxsw_sp_rif_params *params,
8150 struct netlink_ext_ack *extack)
8151{
8152 u32 tb_id = l3mdev_fib_table(params->dev);
8153 const struct mlxsw_sp_rif_ops *ops;
8154 struct mlxsw_sp_fid *fid = NULL;
8155 enum mlxsw_sp_rif_type type;
8156 struct mlxsw_sp_rif *rif;
8157 struct mlxsw_sp_vr *vr;
8158 u16 rif_index;
8159 int i, err;
8160
8161 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8162 ops = mlxsw_sp->router->rif_ops_arr[type];
8163
8164 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8165 if (IS_ERR(vr))
8166 return ERR_CAST(vr);
8167 vr->rif_count++;
8168
8169 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8170 if (err) {
8171 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8172 goto err_rif_index_alloc;
8173 }
8174
8175 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8176 if (!rif) {
8177 err = -ENOMEM;
8178 goto err_rif_alloc;
8179 }
8180 dev_hold(rif->dev);
8181 mlxsw_sp->router->rifs[rif_index] = rif;
8182 rif->mlxsw_sp = mlxsw_sp;
8183 rif->ops = ops;
8184
8185 if (ops->fid_get) {
8186 fid = ops->fid_get(rif, extack);
8187 if (IS_ERR(fid)) {
8188 err = PTR_ERR(fid);
8189 goto err_fid_get;
8190 }
8191 rif->fid = fid;
8192 }
8193
8194 if (ops->setup)
8195 ops->setup(rif, params);
8196
8197 err = ops->configure(rif, extack);
8198 if (err)
8199 goto err_configure;
8200
8201 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8202 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8203 if (err)
8204 goto err_mr_rif_add;
8205 }
8206
8207 mlxsw_sp_rif_counters_alloc(rif);
8208
8209 return rif;
8210
8211err_mr_rif_add:
8212 for (i--; i >= 0; i--)
8213 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8214 ops->deconfigure(rif);
8215err_configure:
8216 if (fid)
8217 mlxsw_sp_fid_put(fid);
8218err_fid_get:
8219 mlxsw_sp->router->rifs[rif_index] = NULL;
8220 dev_put(rif->dev);
8221 kfree(rif);
8222err_rif_alloc:
8223err_rif_index_alloc:
8224 vr->rif_count--;
8225 mlxsw_sp_vr_put(mlxsw_sp, vr);
8226 return ERR_PTR(err);
8227}
8228
8229static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8230{
8231 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8232 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8233 struct mlxsw_sp_fid *fid = rif->fid;
8234 struct mlxsw_sp_vr *vr;
8235 int i;
8236
8237 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8238 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8239
8240 mlxsw_sp_rif_counters_free(rif);
8241 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8242 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8243 ops->deconfigure(rif);
8244 if (fid)
8245
8246 mlxsw_sp_fid_put(fid);
8247 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8248 dev_put(rif->dev);
8249 kfree(rif);
8250 vr->rif_count--;
8251 mlxsw_sp_vr_put(mlxsw_sp, vr);
8252}
8253
8254void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8255 struct net_device *dev)
8256{
8257 struct mlxsw_sp_rif *rif;
8258
8259 mutex_lock(&mlxsw_sp->router->lock);
8260 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8261 if (!rif)
8262 goto out;
8263 mlxsw_sp_rif_destroy(rif);
8264out:
8265 mutex_unlock(&mlxsw_sp->router->lock);
8266}
8267
8268static void
8269mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8270 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8271{
8272 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8273
8274 params->vid = mlxsw_sp_port_vlan->vid;
8275 params->lag = mlxsw_sp_port->lagged;
8276 if (params->lag)
8277 params->lag_id = mlxsw_sp_port->lag_id;
8278 else
8279 params->system_port = mlxsw_sp_port->local_port;
8280}
8281
8282static struct mlxsw_sp_rif_subport *
8283mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8284{
8285 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8286}
8287
8288static struct mlxsw_sp_rif *
8289mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8290 const struct mlxsw_sp_rif_params *params,
8291 struct netlink_ext_ack *extack)
8292{
8293 struct mlxsw_sp_rif_subport *rif_subport;
8294 struct mlxsw_sp_rif *rif;
8295
8296 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8297 if (!rif)
8298 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8299
8300 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8301 refcount_inc(&rif_subport->ref_count);
8302 return rif;
8303}
8304
8305static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8306{
8307 struct mlxsw_sp_rif_subport *rif_subport;
8308
8309 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8310 if (!refcount_dec_and_test(&rif_subport->ref_count))
8311 return;
8312
8313 mlxsw_sp_rif_destroy(rif);
8314}
8315
8316static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8317 struct mlxsw_sp_rif_mac_profile *profile,
8318 struct netlink_ext_ack *extack)
8319{
8320 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8321 struct mlxsw_sp_router *router = mlxsw_sp->router;
8322 int id;
8323
8324 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8325 max_rif_mac_profiles, GFP_KERNEL);
8326
8327 if (id >= 0) {
8328 profile->id = id;
8329 return 0;
8330 }
8331
8332 if (id == -ENOSPC)
8333 NL_SET_ERR_MSG_MOD(extack,
8334 "Exceeded number of supported router interface MAC profiles");
8335
8336 return id;
8337}
8338
8339static struct mlxsw_sp_rif_mac_profile *
8340mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8341{
8342 struct mlxsw_sp_rif_mac_profile *profile;
8343
8344 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8345 mac_profile);
8346 WARN_ON(!profile);
8347 return profile;
8348}
8349
8350static struct mlxsw_sp_rif_mac_profile *
8351mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8352{
8353 struct mlxsw_sp_rif_mac_profile *profile;
8354
8355 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8356 if (!profile)
8357 return NULL;
8358
8359 ether_addr_copy(profile->mac_prefix, mac);
8360 refcount_set(&profile->ref_count, 1);
8361 return profile;
8362}
8363
8364static struct mlxsw_sp_rif_mac_profile *
8365mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8366{
8367 struct mlxsw_sp_router *router = mlxsw_sp->router;
8368 struct mlxsw_sp_rif_mac_profile *profile;
8369 int id;
8370
8371 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8372 if (!profile)
8373 continue;
8374
8375 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8376 mlxsw_sp->mac_mask))
8377 return profile;
8378 }
8379
8380 return NULL;
8381}
8382
8383static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8384{
8385 const struct mlxsw_sp *mlxsw_sp = priv;
8386
8387 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8388}
8389
8390static struct mlxsw_sp_rif_mac_profile *
8391mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8392 struct netlink_ext_ack *extack)
8393{
8394 struct mlxsw_sp_rif_mac_profile *profile;
8395 int err;
8396
8397 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8398 if (!profile)
8399 return ERR_PTR(-ENOMEM);
8400
8401 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8402 if (err)
8403 goto profile_index_alloc_err;
8404
8405 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8406 return profile;
8407
8408profile_index_alloc_err:
8409 kfree(profile);
8410 return ERR_PTR(err);
8411}
8412
8413static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8414 u8 mac_profile)
8415{
8416 struct mlxsw_sp_rif_mac_profile *profile;
8417
8418 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8419 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8420 kfree(profile);
8421}
8422
8423static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8424 const char *mac, u8 *p_mac_profile,
8425 struct netlink_ext_ack *extack)
8426{
8427 struct mlxsw_sp_rif_mac_profile *profile;
8428
8429 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8430 if (profile) {
8431 refcount_inc(&profile->ref_count);
8432 goto out;
8433 }
8434
8435 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8436 if (IS_ERR(profile))
8437 return PTR_ERR(profile);
8438
8439out:
8440 *p_mac_profile = profile->id;
8441 return 0;
8442}
8443
8444static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8445 u8 mac_profile)
8446{
8447 struct mlxsw_sp_rif_mac_profile *profile;
8448
8449 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8450 mac_profile);
8451 if (WARN_ON(!profile))
8452 return;
8453
8454 if (!refcount_dec_and_test(&profile->ref_count))
8455 return;
8456
8457 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8458}
8459
8460static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8461{
8462 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8463 struct mlxsw_sp_rif_mac_profile *profile;
8464
8465 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8466 rif->mac_profile_id);
8467 if (WARN_ON(!profile))
8468 return false;
8469
8470 return refcount_read(&profile->ref_count) > 1;
8471}
8472
8473static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8474 const char *new_mac)
8475{
8476 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8477 struct mlxsw_sp_rif_mac_profile *profile;
8478
8479 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8480 rif->mac_profile_id);
8481 if (WARN_ON(!profile))
8482 return -EINVAL;
8483
8484 ether_addr_copy(profile->mac_prefix, new_mac);
8485 return 0;
8486}
8487
8488static int
8489mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8490 struct mlxsw_sp_rif *rif,
8491 const char *new_mac,
8492 struct netlink_ext_ack *extack)
8493{
8494 u8 mac_profile;
8495 int err;
8496
8497 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8498 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8499 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8500
8501 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8502 &mac_profile, extack);
8503 if (err)
8504 return err;
8505
8506 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8507 rif->mac_profile_id = mac_profile;
8508 return 0;
8509}
8510
8511static int
8512__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8513 struct net_device *l3_dev,
8514 struct netlink_ext_ack *extack)
8515{
8516 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8517 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8518 struct mlxsw_sp_rif_params params = {
8519 .dev = l3_dev,
8520 };
8521 u16 vid = mlxsw_sp_port_vlan->vid;
8522 struct mlxsw_sp_rif *rif;
8523 struct mlxsw_sp_fid *fid;
8524 int err;
8525
8526 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8527 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8528 if (IS_ERR(rif))
8529 return PTR_ERR(rif);
8530
8531
8532 fid = rif->ops->fid_get(rif, extack);
8533 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8534 if (err)
8535 goto err_fid_port_vid_map;
8536
8537 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8538 if (err)
8539 goto err_port_vid_learning_set;
8540
8541 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8542 BR_STATE_FORWARDING);
8543 if (err)
8544 goto err_port_vid_stp_set;
8545
8546 mlxsw_sp_port_vlan->fid = fid;
8547
8548 return 0;
8549
8550err_port_vid_stp_set:
8551 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8552err_port_vid_learning_set:
8553 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8554err_fid_port_vid_map:
8555 mlxsw_sp_fid_put(fid);
8556 mlxsw_sp_rif_subport_put(rif);
8557 return err;
8558}
8559
8560static void
8561__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8562{
8563 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8564 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8565 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8566 u16 vid = mlxsw_sp_port_vlan->vid;
8567
8568 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8569 return;
8570
8571 mlxsw_sp_port_vlan->fid = NULL;
8572 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8573 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8574 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8575 mlxsw_sp_fid_put(fid);
8576 mlxsw_sp_rif_subport_put(rif);
8577}
8578
8579int
8580mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8581 struct net_device *l3_dev,
8582 struct netlink_ext_ack *extack)
8583{
8584 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8585 struct mlxsw_sp_rif *rif;
8586 int err = 0;
8587
8588 mutex_lock(&mlxsw_sp->router->lock);
8589 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8590 if (!rif)
8591 goto out;
8592
8593 err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8594 extack);
8595out:
8596 mutex_unlock(&mlxsw_sp->router->lock);
8597 return err;
8598}
8599
8600void
8601mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8602{
8603 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8604
8605 mutex_lock(&mlxsw_sp->router->lock);
8606 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8607 mutex_unlock(&mlxsw_sp->router->lock);
8608}
8609
8610static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8611 struct net_device *port_dev,
8612 unsigned long event, u16 vid,
8613 struct netlink_ext_ack *extack)
8614{
8615 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8616 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8617
8618 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8619 if (WARN_ON(!mlxsw_sp_port_vlan))
8620 return -EINVAL;
8621
8622 switch (event) {
8623 case NETDEV_UP:
8624 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8625 l3_dev, extack);
8626 case NETDEV_DOWN:
8627 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8628 break;
8629 }
8630
8631 return 0;
8632}
8633
8634static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8635 unsigned long event,
8636 struct netlink_ext_ack *extack)
8637{
8638 if (netif_is_bridge_port(port_dev) ||
8639 netif_is_lag_port(port_dev) ||
8640 netif_is_ovs_port(port_dev))
8641 return 0;
8642
8643 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8644 MLXSW_SP_DEFAULT_VID, extack);
8645}
8646
8647static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8648 struct net_device *lag_dev,
8649 unsigned long event, u16 vid,
8650 struct netlink_ext_ack *extack)
8651{
8652 struct net_device *port_dev;
8653 struct list_head *iter;
8654 int err;
8655
8656 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8657 if (mlxsw_sp_port_dev_check(port_dev)) {
8658 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8659 port_dev,
8660 event, vid,
8661 extack);
8662 if (err)
8663 return err;
8664 }
8665 }
8666
8667 return 0;
8668}
8669
8670static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8671 unsigned long event,
8672 struct netlink_ext_ack *extack)
8673{
8674 if (netif_is_bridge_port(lag_dev))
8675 return 0;
8676
8677 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8678 MLXSW_SP_DEFAULT_VID, extack);
8679}
8680
8681static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8682 struct net_device *l3_dev,
8683 unsigned long event,
8684 struct netlink_ext_ack *extack)
8685{
8686 struct mlxsw_sp_rif_params params = {
8687 .dev = l3_dev,
8688 };
8689 struct mlxsw_sp_rif *rif;
8690
8691 switch (event) {
8692 case NETDEV_UP:
8693 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8694 u16 proto;
8695
8696 br_vlan_get_proto(l3_dev, &proto);
8697 if (proto == ETH_P_8021AD) {
8698 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8699 return -EOPNOTSUPP;
8700 }
8701 }
8702 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8703 if (IS_ERR(rif))
8704 return PTR_ERR(rif);
8705 break;
8706 case NETDEV_DOWN:
8707 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8708 mlxsw_sp_rif_destroy(rif);
8709 break;
8710 }
8711
8712 return 0;
8713}
8714
8715static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8716 struct net_device *vlan_dev,
8717 unsigned long event,
8718 struct netlink_ext_ack *extack)
8719{
8720 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8721 u16 vid = vlan_dev_vlan_id(vlan_dev);
8722
8723 if (netif_is_bridge_port(vlan_dev))
8724 return 0;
8725
8726 if (mlxsw_sp_port_dev_check(real_dev))
8727 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8728 event, vid, extack);
8729 else if (netif_is_lag_master(real_dev))
8730 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8731 vid, extack);
8732 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8733 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8734 extack);
8735
8736 return 0;
8737}
8738
8739static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8740{
8741 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8742 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8743
8744 return ether_addr_equal_masked(mac, vrrp4, mask);
8745}
8746
8747static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8748{
8749 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8750 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8751
8752 return ether_addr_equal_masked(mac, vrrp6, mask);
8753}
8754
8755static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8756 const u8 *mac, bool adding)
8757{
8758 char ritr_pl[MLXSW_REG_RITR_LEN];
8759 u8 vrrp_id = adding ? mac[5] : 0;
8760 int err;
8761
8762 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8763 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8764 return 0;
8765
8766 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8767 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8768 if (err)
8769 return err;
8770
8771 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8772 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8773 else
8774 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8775
8776 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8777}
8778
8779static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8780 const struct net_device *macvlan_dev,
8781 struct netlink_ext_ack *extack)
8782{
8783 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8784 struct mlxsw_sp_rif *rif;
8785 int err;
8786
8787 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8788 if (!rif) {
8789 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8790 return -EOPNOTSUPP;
8791 }
8792
8793 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8794 mlxsw_sp_fid_index(rif->fid), true);
8795 if (err)
8796 return err;
8797
8798 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8799 macvlan_dev->dev_addr, true);
8800 if (err)
8801 goto err_rif_vrrp_add;
8802
8803
8804
8805
8806 if (rif->ops->fdb_del)
8807 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8808
8809 return 0;
8810
8811err_rif_vrrp_add:
8812 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8813 mlxsw_sp_fid_index(rif->fid), false);
8814 return err;
8815}
8816
8817static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8818 const struct net_device *macvlan_dev)
8819{
8820 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8821 struct mlxsw_sp_rif *rif;
8822
8823 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8824
8825
8826
8827 if (!rif)
8828 return;
8829 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8830 false);
8831 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8832 mlxsw_sp_fid_index(rif->fid), false);
8833}
8834
8835void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8836 const struct net_device *macvlan_dev)
8837{
8838 mutex_lock(&mlxsw_sp->router->lock);
8839 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8840 mutex_unlock(&mlxsw_sp->router->lock);
8841}
8842
8843static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8844 struct net_device *macvlan_dev,
8845 unsigned long event,
8846 struct netlink_ext_ack *extack)
8847{
8848 switch (event) {
8849 case NETDEV_UP:
8850 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8851 case NETDEV_DOWN:
8852 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8853 break;
8854 }
8855
8856 return 0;
8857}
8858
8859static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8860 struct net_device *dev,
8861 unsigned long event,
8862 struct netlink_ext_ack *extack)
8863{
8864 if (mlxsw_sp_port_dev_check(dev))
8865 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8866 else if (netif_is_lag_master(dev))
8867 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8868 else if (netif_is_bridge_master(dev))
8869 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8870 extack);
8871 else if (is_vlan_dev(dev))
8872 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8873 extack);
8874 else if (netif_is_macvlan(dev))
8875 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8876 extack);
8877 else
8878 return 0;
8879}
8880
8881static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8882 unsigned long event, void *ptr)
8883{
8884 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8885 struct net_device *dev = ifa->ifa_dev->dev;
8886 struct mlxsw_sp_router *router;
8887 struct mlxsw_sp_rif *rif;
8888 int err = 0;
8889
8890
8891 if (event == NETDEV_UP)
8892 return NOTIFY_DONE;
8893
8894 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8895 mutex_lock(&router->lock);
8896 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8897 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8898 goto out;
8899
8900 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8901out:
8902 mutex_unlock(&router->lock);
8903 return notifier_from_errno(err);
8904}
8905
8906int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8907 unsigned long event, void *ptr)
8908{
8909 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8910 struct net_device *dev = ivi->ivi_dev->dev;
8911 struct mlxsw_sp *mlxsw_sp;
8912 struct mlxsw_sp_rif *rif;
8913 int err = 0;
8914
8915 mlxsw_sp = mlxsw_sp_lower_get(dev);
8916 if (!mlxsw_sp)
8917 return NOTIFY_DONE;
8918
8919 mutex_lock(&mlxsw_sp->router->lock);
8920 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8921 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8922 goto out;
8923
8924 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8925out:
8926 mutex_unlock(&mlxsw_sp->router->lock);
8927 return notifier_from_errno(err);
8928}
8929
8930struct mlxsw_sp_inet6addr_event_work {
8931 struct work_struct work;
8932 struct mlxsw_sp *mlxsw_sp;
8933 struct net_device *dev;
8934 unsigned long event;
8935};
8936
8937static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8938{
8939 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8940 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8941 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8942 struct net_device *dev = inet6addr_work->dev;
8943 unsigned long event = inet6addr_work->event;
8944 struct mlxsw_sp_rif *rif;
8945
8946 rtnl_lock();
8947 mutex_lock(&mlxsw_sp->router->lock);
8948
8949 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8950 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8951 goto out;
8952
8953 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8954out:
8955 mutex_unlock(&mlxsw_sp->router->lock);
8956 rtnl_unlock();
8957 dev_put(dev);
8958 kfree(inet6addr_work);
8959}
8960
8961
8962static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8963 unsigned long event, void *ptr)
8964{
8965 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8966 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8967 struct net_device *dev = if6->idev->dev;
8968 struct mlxsw_sp_router *router;
8969
8970
8971 if (event == NETDEV_UP)
8972 return NOTIFY_DONE;
8973
8974 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8975 if (!inet6addr_work)
8976 return NOTIFY_BAD;
8977
8978 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8979 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8980 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8981 inet6addr_work->dev = dev;
8982 inet6addr_work->event = event;
8983 dev_hold(dev);
8984 mlxsw_core_schedule_work(&inet6addr_work->work);
8985
8986 return NOTIFY_DONE;
8987}
8988
8989int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8990 unsigned long event, void *ptr)
8991{
8992 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8993 struct net_device *dev = i6vi->i6vi_dev->dev;
8994 struct mlxsw_sp *mlxsw_sp;
8995 struct mlxsw_sp_rif *rif;
8996 int err = 0;
8997
8998 mlxsw_sp = mlxsw_sp_lower_get(dev);
8999 if (!mlxsw_sp)
9000 return NOTIFY_DONE;
9001
9002 mutex_lock(&mlxsw_sp->router->lock);
9003 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9004 if (!mlxsw_sp_rif_should_config(rif, dev, event))
9005 goto out;
9006
9007 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
9008out:
9009 mutex_unlock(&mlxsw_sp->router->lock);
9010 return notifier_from_errno(err);
9011}
9012
9013static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
9014 const char *mac, int mtu, u8 mac_profile)
9015{
9016 char ritr_pl[MLXSW_REG_RITR_LEN];
9017 int err;
9018
9019 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
9020 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9021 if (err)
9022 return err;
9023
9024 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
9025 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
9026 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
9027 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
9028 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9029}
9030
9031static int
9032mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9033 struct mlxsw_sp_rif *rif,
9034 struct netlink_ext_ack *extack)
9035{
9036 struct net_device *dev = rif->dev;
9037 u8 old_mac_profile;
9038 u16 fid_index;
9039 int err;
9040
9041 fid_index = mlxsw_sp_fid_index(rif->fid);
9042
9043 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9044 if (err)
9045 return err;
9046
9047 old_mac_profile = rif->mac_profile_id;
9048 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9049 extack);
9050 if (err)
9051 goto err_rif_mac_profile_replace;
9052
9053 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9054 dev->mtu, rif->mac_profile_id);
9055 if (err)
9056 goto err_rif_edit;
9057
9058 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9059 if (err)
9060 goto err_rif_fdb_op;
9061
9062 if (rif->mtu != dev->mtu) {
9063 struct mlxsw_sp_vr *vr;
9064 int i;
9065
9066
9067
9068
9069
9070 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9071 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9072 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9073 rif, dev->mtu);
9074 }
9075
9076 ether_addr_copy(rif->addr, dev->dev_addr);
9077 rif->mtu = dev->mtu;
9078
9079 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9080
9081 return 0;
9082
9083err_rif_fdb_op:
9084 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9085 old_mac_profile);
9086err_rif_edit:
9087 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9088err_rif_mac_profile_replace:
9089 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9090 return err;
9091}
9092
9093static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9094 struct netdev_notifier_pre_changeaddr_info *info)
9095{
9096 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9097 struct mlxsw_sp_rif_mac_profile *profile;
9098 struct netlink_ext_ack *extack;
9099 u8 max_rif_mac_profiles;
9100 u64 occ;
9101
9102 extack = netdev_notifier_info_to_extack(&info->info);
9103
9104 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9105 if (profile)
9106 return 0;
9107
9108 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9109 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9110 if (occ < max_rif_mac_profiles)
9111 return 0;
9112
9113 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9114 return 0;
9115
9116 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9117 return -ENOBUFS;
9118}
9119
9120int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9121 unsigned long event, void *ptr)
9122{
9123 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9124 struct mlxsw_sp *mlxsw_sp;
9125 struct mlxsw_sp_rif *rif;
9126 int err = 0;
9127
9128 mlxsw_sp = mlxsw_sp_lower_get(dev);
9129 if (!mlxsw_sp)
9130 return 0;
9131
9132 mutex_lock(&mlxsw_sp->router->lock);
9133 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9134 if (!rif)
9135 goto out;
9136
9137 switch (event) {
9138 case NETDEV_CHANGEMTU:
9139 case NETDEV_CHANGEADDR:
9140 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9141 break;
9142 case NETDEV_PRE_CHANGEADDR:
9143 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9144 break;
9145 }
9146
9147out:
9148 mutex_unlock(&mlxsw_sp->router->lock);
9149 return err;
9150}
9151
9152static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9153 struct net_device *l3_dev,
9154 struct netlink_ext_ack *extack)
9155{
9156 struct mlxsw_sp_rif *rif;
9157
9158
9159
9160
9161 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9162 if (rif)
9163 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9164 extack);
9165
9166 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9167}
9168
9169static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9170 struct net_device *l3_dev)
9171{
9172 struct mlxsw_sp_rif *rif;
9173
9174 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9175 if (!rif)
9176 return;
9177 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9178}
9179
9180int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9181 struct netdev_notifier_changeupper_info *info)
9182{
9183 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9184 int err = 0;
9185
9186
9187
9188
9189 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9190 return 0;
9191
9192 mutex_lock(&mlxsw_sp->router->lock);
9193 switch (event) {
9194 case NETDEV_PRECHANGEUPPER:
9195 break;
9196 case NETDEV_CHANGEUPPER:
9197 if (info->linking) {
9198 struct netlink_ext_ack *extack;
9199
9200 extack = netdev_notifier_info_to_extack(&info->info);
9201 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9202 } else {
9203 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9204 }
9205 break;
9206 }
9207 mutex_unlock(&mlxsw_sp->router->lock);
9208
9209 return err;
9210}
9211
9212static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9213 struct netdev_nested_priv *priv)
9214{
9215 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9216
9217 if (!netif_is_macvlan(dev))
9218 return 0;
9219
9220 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9221 mlxsw_sp_fid_index(rif->fid), false);
9222}
9223
9224static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9225{
9226 struct netdev_nested_priv priv = {
9227 .data = (void *)rif,
9228 };
9229
9230 if (!netif_is_macvlan_port(rif->dev))
9231 return 0;
9232
9233 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9234 return netdev_walk_all_upper_dev_rcu(rif->dev,
9235 __mlxsw_sp_rif_macvlan_flush, &priv);
9236}
9237
9238static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9239 const struct mlxsw_sp_rif_params *params)
9240{
9241 struct mlxsw_sp_rif_subport *rif_subport;
9242
9243 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9244 refcount_set(&rif_subport->ref_count, 1);
9245 rif_subport->vid = params->vid;
9246 rif_subport->lag = params->lag;
9247 if (params->lag)
9248 rif_subport->lag_id = params->lag_id;
9249 else
9250 rif_subport->system_port = params->system_port;
9251}
9252
9253static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9254{
9255 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9256 struct mlxsw_sp_rif_subport *rif_subport;
9257 char ritr_pl[MLXSW_REG_RITR_LEN];
9258
9259 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9260 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9261 rif->rif_index, rif->vr_id, rif->dev->mtu);
9262 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9263 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9264 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9265 rif_subport->lag ? rif_subport->lag_id :
9266 rif_subport->system_port,
9267 rif_subport->vid);
9268
9269 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9270}
9271
9272static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9273 struct netlink_ext_ack *extack)
9274{
9275 u8 mac_profile;
9276 int err;
9277
9278 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9279 &mac_profile, extack);
9280 if (err)
9281 return err;
9282 rif->mac_profile_id = mac_profile;
9283
9284 err = mlxsw_sp_rif_subport_op(rif, true);
9285 if (err)
9286 goto err_rif_subport_op;
9287
9288 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9289 mlxsw_sp_fid_index(rif->fid), true);
9290 if (err)
9291 goto err_rif_fdb_op;
9292
9293 mlxsw_sp_fid_rif_set(rif->fid, rif);
9294 return 0;
9295
9296err_rif_fdb_op:
9297 mlxsw_sp_rif_subport_op(rif, false);
9298err_rif_subport_op:
9299 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9300 return err;
9301}
9302
9303static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9304{
9305 struct mlxsw_sp_fid *fid = rif->fid;
9306
9307 mlxsw_sp_fid_rif_set(fid, NULL);
9308 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9309 mlxsw_sp_fid_index(fid), false);
9310 mlxsw_sp_rif_macvlan_flush(rif);
9311 mlxsw_sp_rif_subport_op(rif, false);
9312 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9313}
9314
9315static struct mlxsw_sp_fid *
9316mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9317 struct netlink_ext_ack *extack)
9318{
9319 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9320}
9321
9322static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9323 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9324 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9325 .setup = mlxsw_sp_rif_subport_setup,
9326 .configure = mlxsw_sp_rif_subport_configure,
9327 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9328 .fid_get = mlxsw_sp_rif_subport_fid_get,
9329};
9330
9331static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
9332 enum mlxsw_reg_ritr_if_type type,
9333 u16 vid_fid, bool enable)
9334{
9335 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9336 char ritr_pl[MLXSW_REG_RITR_LEN];
9337
9338 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9339 rif->dev->mtu);
9340 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9341 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9342 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
9343
9344 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9345}
9346
9347u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9348{
9349 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9350}
9351
9352static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9353 struct netlink_ext_ack *extack)
9354{
9355 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9356 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9357 u8 mac_profile;
9358 int err;
9359
9360 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9361 &mac_profile, extack);
9362 if (err)
9363 return err;
9364 rif->mac_profile_id = mac_profile;
9365
9366 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
9367 true);
9368 if (err)
9369 goto err_rif_vlan_fid_op;
9370
9371 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9372 mlxsw_sp_router_port(mlxsw_sp), true);
9373 if (err)
9374 goto err_fid_mc_flood_set;
9375
9376 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9377 mlxsw_sp_router_port(mlxsw_sp), true);
9378 if (err)
9379 goto err_fid_bc_flood_set;
9380
9381 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9382 mlxsw_sp_fid_index(rif->fid), true);
9383 if (err)
9384 goto err_rif_fdb_op;
9385
9386 mlxsw_sp_fid_rif_set(rif->fid, rif);
9387 return 0;
9388
9389err_rif_fdb_op:
9390 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9391 mlxsw_sp_router_port(mlxsw_sp), false);
9392err_fid_bc_flood_set:
9393 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9394 mlxsw_sp_router_port(mlxsw_sp), false);
9395err_fid_mc_flood_set:
9396 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9397err_rif_vlan_fid_op:
9398 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9399 return err;
9400}
9401
9402static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9403{
9404 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9405 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9406 struct mlxsw_sp_fid *fid = rif->fid;
9407
9408 mlxsw_sp_fid_rif_set(fid, NULL);
9409 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9410 mlxsw_sp_fid_index(fid), false);
9411 mlxsw_sp_rif_macvlan_flush(rif);
9412 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9413 mlxsw_sp_router_port(mlxsw_sp), false);
9414 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9415 mlxsw_sp_router_port(mlxsw_sp), false);
9416 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
9417 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9418}
9419
9420static struct mlxsw_sp_fid *
9421mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9422 struct netlink_ext_ack *extack)
9423{
9424 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9425}
9426
9427static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9428{
9429 struct switchdev_notifier_fdb_info info = {};
9430 struct net_device *dev;
9431
9432 dev = br_fdb_find_port(rif->dev, mac, 0);
9433 if (!dev)
9434 return;
9435
9436 info.addr = mac;
9437 info.vid = 0;
9438 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9439 NULL);
9440}
9441
9442static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9443 .type = MLXSW_SP_RIF_TYPE_FID,
9444 .rif_size = sizeof(struct mlxsw_sp_rif),
9445 .configure = mlxsw_sp_rif_fid_configure,
9446 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9447 .fid_get = mlxsw_sp_rif_fid_fid_get,
9448 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9449};
9450
9451static struct mlxsw_sp_fid *
9452mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9453 struct netlink_ext_ack *extack)
9454{
9455 struct net_device *br_dev;
9456 u16 vid;
9457 int err;
9458
9459 if (is_vlan_dev(rif->dev)) {
9460 vid = vlan_dev_vlan_id(rif->dev);
9461 br_dev = vlan_dev_real_dev(rif->dev);
9462 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9463 return ERR_PTR(-EINVAL);
9464 } else {
9465 err = br_vlan_get_pvid(rif->dev, &vid);
9466 if (err < 0 || !vid) {
9467 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9468 return ERR_PTR(-EINVAL);
9469 }
9470 }
9471
9472 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9473}
9474
9475static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9476{
9477 struct switchdev_notifier_fdb_info info = {};
9478 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9479 struct net_device *br_dev;
9480 struct net_device *dev;
9481
9482 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9483 dev = br_fdb_find_port(br_dev, mac, vid);
9484 if (!dev)
9485 return;
9486
9487 info.addr = mac;
9488 info.vid = vid;
9489 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9490 NULL);
9491}
9492
9493static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
9494 .type = MLXSW_SP_RIF_TYPE_VLAN,
9495 .rif_size = sizeof(struct mlxsw_sp_rif),
9496 .configure = mlxsw_sp_rif_fid_configure,
9497 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9498 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9499 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9500};
9501
9502static struct mlxsw_sp_rif_ipip_lb *
9503mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9504{
9505 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9506}
9507
9508static void
9509mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9510 const struct mlxsw_sp_rif_params *params)
9511{
9512 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9513 struct mlxsw_sp_rif_ipip_lb *rif_lb;
9514
9515 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9516 common);
9517 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9518 rif_lb->lb_config = params_lb->lb_config;
9519}
9520
9521static int
9522mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9523 struct netlink_ext_ack *extack)
9524{
9525 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9526 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9527 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9528 struct mlxsw_sp_vr *ul_vr;
9529 int err;
9530
9531 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9532 if (IS_ERR(ul_vr))
9533 return PTR_ERR(ul_vr);
9534
9535 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9536 if (err)
9537 goto err_loopback_op;
9538
9539 lb_rif->ul_vr_id = ul_vr->id;
9540 lb_rif->ul_rif_id = 0;
9541 ++ul_vr->rif_count;
9542 return 0;
9543
9544err_loopback_op:
9545 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9546 return err;
9547}
9548
9549static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9550{
9551 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9552 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9553 struct mlxsw_sp_vr *ul_vr;
9554
9555 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9556 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9557
9558 --ul_vr->rif_count;
9559 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9560}
9561
9562static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9563 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9564 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9565 .setup = mlxsw_sp_rif_ipip_lb_setup,
9566 .configure = mlxsw_sp1_rif_ipip_lb_configure,
9567 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
9568};
9569
9570static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9571 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9572 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9573 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9574 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
9575};
9576
9577static int
9578mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9579{
9580 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9581 char ritr_pl[MLXSW_REG_RITR_LEN];
9582
9583 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9584 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9585 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9586 MLXSW_REG_RITR_LOOPBACK_GENERIC);
9587
9588 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9589}
9590
9591static struct mlxsw_sp_rif *
9592mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9593 struct netlink_ext_ack *extack)
9594{
9595 struct mlxsw_sp_rif *ul_rif;
9596 u16 rif_index;
9597 int err;
9598
9599 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
9600 if (err) {
9601 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9602 return ERR_PTR(err);
9603 }
9604
9605 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9606 if (!ul_rif)
9607 return ERR_PTR(-ENOMEM);
9608
9609 mlxsw_sp->router->rifs[rif_index] = ul_rif;
9610 ul_rif->mlxsw_sp = mlxsw_sp;
9611 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9612 if (err)
9613 goto ul_rif_op_err;
9614
9615 return ul_rif;
9616
9617ul_rif_op_err:
9618 mlxsw_sp->router->rifs[rif_index] = NULL;
9619 kfree(ul_rif);
9620 return ERR_PTR(err);
9621}
9622
9623static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9624{
9625 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9626
9627 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9628 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9629 kfree(ul_rif);
9630}
9631
9632static struct mlxsw_sp_rif *
9633mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9634 struct netlink_ext_ack *extack)
9635{
9636 struct mlxsw_sp_vr *vr;
9637 int err;
9638
9639 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9640 if (IS_ERR(vr))
9641 return ERR_CAST(vr);
9642
9643 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9644 return vr->ul_rif;
9645
9646 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9647 if (IS_ERR(vr->ul_rif)) {
9648 err = PTR_ERR(vr->ul_rif);
9649 goto err_ul_rif_create;
9650 }
9651
9652 vr->rif_count++;
9653 refcount_set(&vr->ul_rif_refcnt, 1);
9654
9655 return vr->ul_rif;
9656
9657err_ul_rif_create:
9658 mlxsw_sp_vr_put(mlxsw_sp, vr);
9659 return ERR_PTR(err);
9660}
9661
9662static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9663{
9664 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9665 struct mlxsw_sp_vr *vr;
9666
9667 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9668
9669 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9670 return;
9671
9672 vr->rif_count--;
9673 mlxsw_sp_ul_rif_destroy(ul_rif);
9674 mlxsw_sp_vr_put(mlxsw_sp, vr);
9675}
9676
9677int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9678 u16 *ul_rif_index)
9679{
9680 struct mlxsw_sp_rif *ul_rif;
9681 int err = 0;
9682
9683 mutex_lock(&mlxsw_sp->router->lock);
9684 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9685 if (IS_ERR(ul_rif)) {
9686 err = PTR_ERR(ul_rif);
9687 goto out;
9688 }
9689 *ul_rif_index = ul_rif->rif_index;
9690out:
9691 mutex_unlock(&mlxsw_sp->router->lock);
9692 return err;
9693}
9694
9695void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9696{
9697 struct mlxsw_sp_rif *ul_rif;
9698
9699 mutex_lock(&mlxsw_sp->router->lock);
9700 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9701 if (WARN_ON(!ul_rif))
9702 goto out;
9703
9704 mlxsw_sp_ul_rif_put(ul_rif);
9705out:
9706 mutex_unlock(&mlxsw_sp->router->lock);
9707}
9708
9709static int
9710mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9711 struct netlink_ext_ack *extack)
9712{
9713 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9714 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9715 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9716 struct mlxsw_sp_rif *ul_rif;
9717 int err;
9718
9719 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9720 if (IS_ERR(ul_rif))
9721 return PTR_ERR(ul_rif);
9722
9723 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9724 if (err)
9725 goto err_loopback_op;
9726
9727 lb_rif->ul_vr_id = 0;
9728 lb_rif->ul_rif_id = ul_rif->rif_index;
9729
9730 return 0;
9731
9732err_loopback_op:
9733 mlxsw_sp_ul_rif_put(ul_rif);
9734 return err;
9735}
9736
9737static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9738{
9739 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9740 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9741 struct mlxsw_sp_rif *ul_rif;
9742
9743 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9744 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9745 mlxsw_sp_ul_rif_put(ul_rif);
9746}
9747
9748static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9749 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9750 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9751 .setup = mlxsw_sp_rif_ipip_lb_setup,
9752 .configure = mlxsw_sp2_rif_ipip_lb_configure,
9753 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
9754};
9755
9756static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9757 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9758 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
9759 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9760 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
9761};
9762
9763static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
9764{
9765 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
9766 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9767 struct mlxsw_core *core = mlxsw_sp->core;
9768
9769 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
9770 return -EIO;
9771 mlxsw_sp->router->max_rif_mac_profile =
9772 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
9773
9774 mlxsw_sp->router->rifs = kcalloc(max_rifs,
9775 sizeof(struct mlxsw_sp_rif *),
9776 GFP_KERNEL);
9777 if (!mlxsw_sp->router->rifs)
9778 return -ENOMEM;
9779
9780 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
9781 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
9782 devlink_resource_occ_get_register(devlink,
9783 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
9784 mlxsw_sp_rif_mac_profiles_occ_get,
9785 mlxsw_sp);
9786
9787 return 0;
9788}
9789
9790static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
9791{
9792 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9793 int i;
9794
9795 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
9796 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
9797
9798 devlink_resource_occ_get_unregister(devlink,
9799 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
9800 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
9801 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
9802 kfree(mlxsw_sp->router->rifs);
9803}
9804
9805static int
9806mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
9807{
9808 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
9809
9810 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
9811 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
9812}
9813
9814static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
9815{
9816 int err;
9817
9818 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
9819
9820 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
9821 if (err)
9822 return err;
9823 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
9824 if (err)
9825 return err;
9826
9827 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
9828}
9829
9830static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
9831{
9832 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
9833 return mlxsw_sp_ipips_init(mlxsw_sp);
9834}
9835
9836static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
9837{
9838 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
9839 return mlxsw_sp_ipips_init(mlxsw_sp);
9840}
9841
9842static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
9843{
9844 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
9845}
9846
9847static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
9848{
9849 struct mlxsw_sp_router *router;
9850
9851
9852
9853
9854
9855 mlxsw_core_flush_owq();
9856 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
9857 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
9858}
9859
9860#ifdef CONFIG_IP_ROUTE_MULTIPATH
9861struct mlxsw_sp_mp_hash_config {
9862 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
9863 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
9864 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
9865 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
9866 bool inc_parsing_depth;
9867};
9868
9869#define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
9870 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
9871
9872#define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
9873 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
9874
9875#define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
9876 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
9877
9878static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
9879{
9880 unsigned long *inner_headers = config->inner_headers;
9881 unsigned long *inner_fields = config->inner_fields;
9882
9883
9884 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9885 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9886 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9887 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9888
9889 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9890 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9891 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9892 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9893 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9894 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9895 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9896 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9897}
9898
9899static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9900{
9901 unsigned long *headers = config->headers;
9902 unsigned long *fields = config->fields;
9903
9904 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9905 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9906 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9907 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9908}
9909
9910static void
9911mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
9912 u32 hash_fields)
9913{
9914 unsigned long *inner_headers = config->inner_headers;
9915 unsigned long *inner_fields = config->inner_fields;
9916
9917
9918 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
9919 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
9920 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
9921 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
9922 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
9923 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
9924 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9925 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
9926
9927 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
9928 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
9929 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
9930 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
9931 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
9932 }
9933 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
9934 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
9935 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
9936 }
9937 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
9938 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
9939 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
9940 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
9941
9942 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
9943 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
9944 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
9945 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
9946 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
9947 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
9948}
9949
9950static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
9951 struct mlxsw_sp_mp_hash_config *config)
9952{
9953 struct net *net = mlxsw_sp_net(mlxsw_sp);
9954 unsigned long *headers = config->headers;
9955 unsigned long *fields = config->fields;
9956 u32 hash_fields;
9957
9958 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
9959 case 0:
9960 mlxsw_sp_mp4_hash_outer_addr(config);
9961 break;
9962 case 1:
9963 mlxsw_sp_mp4_hash_outer_addr(config);
9964 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9965 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9966 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9967 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9968 break;
9969 case 2:
9970
9971 mlxsw_sp_mp4_hash_outer_addr(config);
9972
9973 mlxsw_sp_mp_hash_inner_l3(config);
9974 break;
9975 case 3:
9976 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
9977
9978 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
9979 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
9980 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
9981 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
9982 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
9983 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
9984 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
9985 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
9986 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
9987 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
9988 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
9989 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
9990 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
9991
9992 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
9993 break;
9994 }
9995}
9996
9997static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
9998{
9999 unsigned long *headers = config->headers;
10000 unsigned long *fields = config->fields;
10001
10002 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10003 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10004 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10005 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10006 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10007 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10008}
10009
10010static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10011 struct mlxsw_sp_mp_hash_config *config)
10012{
10013 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10014 unsigned long *headers = config->headers;
10015 unsigned long *fields = config->fields;
10016
10017 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10018 case 0:
10019 mlxsw_sp_mp6_hash_outer_addr(config);
10020 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10021 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10022 break;
10023 case 1:
10024 mlxsw_sp_mp6_hash_outer_addr(config);
10025 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10026 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10027 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10028 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10029 break;
10030 case 2:
10031
10032 mlxsw_sp_mp6_hash_outer_addr(config);
10033 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10034 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10035
10036 mlxsw_sp_mp_hash_inner_l3(config);
10037 config->inc_parsing_depth = true;
10038 break;
10039 case 3:
10040
10041 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10042 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10043 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10044 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10045 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10046 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10047 }
10048 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10049 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10050 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10051 }
10052 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10053 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10054 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10055 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10056 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10057 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10058 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10059 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10060
10061 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10062 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10063 config->inc_parsing_depth = true;
10064 break;
10065 }
10066}
10067
10068static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10069 bool old_inc_parsing_depth,
10070 bool new_inc_parsing_depth)
10071{
10072 int err;
10073
10074 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10075 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10076 if (err)
10077 return err;
10078 mlxsw_sp->router->inc_parsing_depth = true;
10079 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10080 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10081 mlxsw_sp->router->inc_parsing_depth = false;
10082 }
10083
10084 return 0;
10085}
10086
10087static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10088{
10089 bool old_inc_parsing_depth, new_inc_parsing_depth;
10090 struct mlxsw_sp_mp_hash_config config = {};
10091 char recr2_pl[MLXSW_REG_RECR2_LEN];
10092 unsigned long bit;
10093 u32 seed;
10094 int err;
10095
10096 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10097 mlxsw_reg_recr2_pack(recr2_pl, seed);
10098 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10099 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10100
10101 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10102 new_inc_parsing_depth = config.inc_parsing_depth;
10103 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10104 old_inc_parsing_depth,
10105 new_inc_parsing_depth);
10106 if (err)
10107 return err;
10108
10109 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10110 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10111 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10112 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10113 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10114 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10115 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10116 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10117
10118 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10119 if (err)
10120 goto err_reg_write;
10121
10122 return 0;
10123
10124err_reg_write:
10125 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10126 old_inc_parsing_depth);
10127 return err;
10128}
10129#else
10130static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10131{
10132 return 0;
10133}
10134#endif
10135
10136static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10137{
10138 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10139 unsigned int i;
10140
10141 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10142
10143
10144
10145
10146
10147
10148 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10149 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10150
10151 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10152}
10153
10154static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10155{
10156 struct net *net = mlxsw_sp_net(mlxsw_sp);
10157 bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
10158 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10159 u64 max_rifs;
10160
10161 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10162 return -EIO;
10163 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10164
10165 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10166 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10167 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10168 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10169}
10170
10171static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10172{
10173 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10174
10175 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10176 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10177}
10178
10179static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
10180 .init = mlxsw_sp_router_ll_basic_init,
10181 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
10182 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
10183 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
10184 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
10185 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
10186 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
10187 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
10188 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
10189 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
10190 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
10191 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
10192};
10193
10194static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
10195{
10196 size_t max_size = 0;
10197 int i;
10198
10199 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
10200 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
10201
10202 if (size > max_size)
10203 max_size = size;
10204 }
10205 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
10206 GFP_KERNEL);
10207 if (!router->ll_op_ctx)
10208 return -ENOMEM;
10209 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
10210 return 0;
10211}
10212
10213static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
10214{
10215 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
10216 kfree(router->ll_op_ctx);
10217}
10218
10219static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10220{
10221 u16 lb_rif_index;
10222 int err;
10223
10224
10225
10226
10227
10228 err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10229 &lb_rif_index);
10230 if (err)
10231 return err;
10232
10233 mlxsw_sp->router->lb_rif_index = lb_rif_index;
10234
10235 return 0;
10236}
10237
10238static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10239{
10240 mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10241}
10242
10243static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10244{
10245 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10246
10247 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10248 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10249 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10250
10251 return 0;
10252}
10253
10254const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10255 .init = mlxsw_sp1_router_init,
10256 .ipips_init = mlxsw_sp1_ipips_init,
10257};
10258
10259static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10260{
10261 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10262
10263 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10264 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10265 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10266
10267 return 0;
10268}
10269
10270const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10271 .init = mlxsw_sp2_router_init,
10272 .ipips_init = mlxsw_sp2_ipips_init,
10273};
10274
10275int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10276 struct netlink_ext_ack *extack)
10277{
10278 struct mlxsw_sp_router *router;
10279 int err;
10280
10281 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10282 if (!router)
10283 return -ENOMEM;
10284 mutex_init(&router->lock);
10285 mlxsw_sp->router = router;
10286 router->mlxsw_sp = mlxsw_sp;
10287
10288 err = mlxsw_sp->router_ops->init(mlxsw_sp);
10289 if (err)
10290 goto err_router_ops_init;
10291
10292 err = mlxsw_sp_router_xm_init(mlxsw_sp);
10293 if (err)
10294 goto err_xm_init;
10295
10296 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ?
10297 &mlxsw_sp_router_ll_xm_ops :
10298 &mlxsw_sp_router_ll_basic_ops;
10299 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
10300
10301 err = mlxsw_sp_router_ll_op_ctx_init(router);
10302 if (err)
10303 goto err_ll_op_ctx_init;
10304
10305 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10306 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10307 mlxsw_sp_nh_grp_activity_work);
10308
10309 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10310 err = __mlxsw_sp_router_init(mlxsw_sp);
10311 if (err)
10312 goto err_router_init;
10313
10314 err = mlxsw_sp_rifs_init(mlxsw_sp);
10315 if (err)
10316 goto err_rifs_init;
10317
10318 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10319 if (err)
10320 goto err_ipips_init;
10321
10322 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10323 &mlxsw_sp_nexthop_ht_params);
10324 if (err)
10325 goto err_nexthop_ht_init;
10326
10327 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10328 &mlxsw_sp_nexthop_group_ht_params);
10329 if (err)
10330 goto err_nexthop_group_ht_init;
10331
10332 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10333 err = mlxsw_sp_lpm_init(mlxsw_sp);
10334 if (err)
10335 goto err_lpm_init;
10336
10337 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10338 if (err)
10339 goto err_mr_init;
10340
10341 err = mlxsw_sp_vrs_init(mlxsw_sp);
10342 if (err)
10343 goto err_vrs_init;
10344
10345 err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10346 if (err)
10347 goto err_lb_rif_init;
10348
10349 err = mlxsw_sp_neigh_init(mlxsw_sp);
10350 if (err)
10351 goto err_neigh_init;
10352
10353 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10354 if (err)
10355 goto err_mp_hash_init;
10356
10357 err = mlxsw_sp_dscp_init(mlxsw_sp);
10358 if (err)
10359 goto err_dscp_init;
10360
10361 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
10362 INIT_LIST_HEAD(&router->fib_event_queue);
10363 spin_lock_init(&router->fib_event_queue_lock);
10364
10365 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10366 err = register_inetaddr_notifier(&router->inetaddr_nb);
10367 if (err)
10368 goto err_register_inetaddr_notifier;
10369
10370 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10371 err = register_inet6addr_notifier(&router->inet6addr_nb);
10372 if (err)
10373 goto err_register_inet6addr_notifier;
10374
10375 mlxsw_sp->router->netevent_nb.notifier_call =
10376 mlxsw_sp_router_netevent_event;
10377 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10378 if (err)
10379 goto err_register_netevent_notifier;
10380
10381 mlxsw_sp->router->nexthop_nb.notifier_call =
10382 mlxsw_sp_nexthop_obj_event;
10383 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10384 &mlxsw_sp->router->nexthop_nb,
10385 extack);
10386 if (err)
10387 goto err_register_nexthop_notifier;
10388
10389 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10390 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10391 &mlxsw_sp->router->fib_nb,
10392 mlxsw_sp_router_fib_dump_flush, extack);
10393 if (err)
10394 goto err_register_fib_notifier;
10395
10396 return 0;
10397
10398err_register_fib_notifier:
10399 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10400 &mlxsw_sp->router->nexthop_nb);
10401err_register_nexthop_notifier:
10402 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10403err_register_netevent_notifier:
10404 unregister_inet6addr_notifier(&router->inet6addr_nb);
10405err_register_inet6addr_notifier:
10406 unregister_inetaddr_notifier(&router->inetaddr_nb);
10407err_register_inetaddr_notifier:
10408 mlxsw_core_flush_owq();
10409 WARN_ON(!list_empty(&router->fib_event_queue));
10410err_dscp_init:
10411err_mp_hash_init:
10412 mlxsw_sp_neigh_fini(mlxsw_sp);
10413err_neigh_init:
10414 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10415err_lb_rif_init:
10416 mlxsw_sp_vrs_fini(mlxsw_sp);
10417err_vrs_init:
10418 mlxsw_sp_mr_fini(mlxsw_sp);
10419err_mr_init:
10420 mlxsw_sp_lpm_fini(mlxsw_sp);
10421err_lpm_init:
10422 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10423err_nexthop_group_ht_init:
10424 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10425err_nexthop_ht_init:
10426 mlxsw_sp_ipips_fini(mlxsw_sp);
10427err_ipips_init:
10428 mlxsw_sp_rifs_fini(mlxsw_sp);
10429err_rifs_init:
10430 __mlxsw_sp_router_fini(mlxsw_sp);
10431err_router_init:
10432 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10433 mlxsw_sp_router_ll_op_ctx_fini(router);
10434err_ll_op_ctx_init:
10435 mlxsw_sp_router_xm_fini(mlxsw_sp);
10436err_xm_init:
10437err_router_ops_init:
10438 mutex_destroy(&mlxsw_sp->router->lock);
10439 kfree(mlxsw_sp->router);
10440 return err;
10441}
10442
10443void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10444{
10445 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10446 &mlxsw_sp->router->fib_nb);
10447 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10448 &mlxsw_sp->router->nexthop_nb);
10449 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10450 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10451 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10452 mlxsw_core_flush_owq();
10453 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
10454 mlxsw_sp_neigh_fini(mlxsw_sp);
10455 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10456 mlxsw_sp_vrs_fini(mlxsw_sp);
10457 mlxsw_sp_mr_fini(mlxsw_sp);
10458 mlxsw_sp_lpm_fini(mlxsw_sp);
10459 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10460 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10461 mlxsw_sp_ipips_fini(mlxsw_sp);
10462 mlxsw_sp_rifs_fini(mlxsw_sp);
10463 __mlxsw_sp_router_fini(mlxsw_sp);
10464 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10465 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
10466 mlxsw_sp_router_xm_fini(mlxsw_sp);
10467 mutex_destroy(&mlxsw_sp->router->lock);
10468 kfree(mlxsw_sp->router);
10469}
10470