1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/if_bridge.h>
36#include <linux/list.h>
37#include <net/arp.h>
38#include <net/gre.h>
39#include <net/ndisc.h>
40#include <net/ip6_tunnel.h>
41
42#include "spectrum.h"
43#include "spectrum_ipip.h"
44#include "spectrum_span.h"
45#include "spectrum_switchdev.h"
46
47int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
48{
49 int i;
50
51 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
52 return -EIO;
53
54 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
55 MAX_SPAN);
56 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
57 sizeof(struct mlxsw_sp_span_entry),
58 GFP_KERNEL);
59 if (!mlxsw_sp->span.entries)
60 return -ENOMEM;
61
62 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
63 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
64
65 INIT_LIST_HEAD(&curr->bound_ports_list);
66 curr->id = i;
67 }
68
69 return 0;
70}
71
72void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
73{
74 int i;
75
76 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
77 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
78
79 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
80 }
81 kfree(mlxsw_sp->span.entries);
82}
83
84static int
85mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
86 struct mlxsw_sp_span_parms *sparmsp)
87{
88 sparmsp->dest_port = netdev_priv(to_dev);
89 return 0;
90}
91
92static int
93mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
94 struct mlxsw_sp_span_parms sparms)
95{
96 struct mlxsw_sp_port *dest_port = sparms.dest_port;
97 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
98 u8 local_port = dest_port->local_port;
99 char mpat_pl[MLXSW_REG_MPAT_LEN];
100 int pa_id = span_entry->id;
101
102
103 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
104 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
105
106 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
107}
108
109static void
110mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
111 enum mlxsw_reg_mpat_span_type span_type)
112{
113 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
114 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
115 u8 local_port = dest_port->local_port;
116 char mpat_pl[MLXSW_REG_MPAT_LEN];
117 int pa_id = span_entry->id;
118
119 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
120 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
121}
122
123static void
124mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
125{
126 mlxsw_sp_span_entry_deconfigure_common(span_entry,
127 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
128}
129
130static const
131struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
132 .can_handle = mlxsw_sp_port_dev_check,
133 .parms = mlxsw_sp_span_entry_phys_parms,
134 .configure = mlxsw_sp_span_entry_phys_configure,
135 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
136};
137
138static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
139 const void *pkey,
140 struct net_device *dev,
141 unsigned char dmac[ETH_ALEN])
142{
143 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
144 int err = 0;
145
146 if (!neigh) {
147 neigh = neigh_create(tbl, pkey, dev);
148 if (IS_ERR(neigh))
149 return PTR_ERR(neigh);
150 }
151
152 neigh_event_send(neigh, NULL);
153
154 read_lock_bh(&neigh->lock);
155 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
156 memcpy(dmac, neigh->ha, ETH_ALEN);
157 else
158 err = -ENOENT;
159 read_unlock_bh(&neigh->lock);
160
161 neigh_release(neigh);
162 return err;
163}
164
165static int
166mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
167{
168 sparmsp->dest_port = NULL;
169 return 0;
170}
171
172static struct net_device *
173mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
174 unsigned char *dmac,
175 u16 *p_vid)
176{
177 struct bridge_vlan_info vinfo;
178 struct net_device *edev;
179 u16 vid = *p_vid;
180
181 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
182 return NULL;
183 if (!vid ||
184 br_vlan_get_info(br_dev, vid, &vinfo) ||
185 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
186 return NULL;
187
188 edev = br_fdb_find_port(br_dev, dmac, vid);
189 if (!edev)
190 return NULL;
191
192 if (br_vlan_get_info(edev, vid, &vinfo))
193 return NULL;
194 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
195 *p_vid = 0;
196 else
197 *p_vid = vid;
198 return edev;
199}
200
201static struct net_device *
202mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
203 unsigned char *dmac)
204{
205 return br_fdb_find_port(br_dev, dmac, 0);
206}
207
208static struct net_device *
209mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
210 unsigned char dmac[ETH_ALEN],
211 u16 *p_vid)
212{
213 struct mlxsw_sp_bridge_port *bridge_port;
214 enum mlxsw_reg_spms_state spms_state;
215 struct net_device *dev = NULL;
216 struct mlxsw_sp_port *port;
217 u8 stp_state;
218
219 if (br_vlan_enabled(br_dev))
220 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
221 else if (!*p_vid)
222 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
223 if (!dev)
224 return NULL;
225
226 port = mlxsw_sp_port_dev_lower_find(dev);
227 if (!port)
228 return NULL;
229
230 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
231 if (!bridge_port)
232 return NULL;
233
234 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
235 spms_state = mlxsw_sp_stp_spms_state(stp_state);
236 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
237 return NULL;
238
239 return dev;
240}
241
242static struct net_device *
243mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
244 u16 *p_vid)
245{
246 *p_vid = vlan_dev_vlan_id(vlan_dev);
247 return vlan_dev_real_dev(vlan_dev);
248}
249
250static struct net_device *
251mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
252{
253 struct net_device *dev;
254 struct list_head *iter;
255
256 netdev_for_each_lower_dev(lag_dev, dev, iter)
257 if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev))
258 return dev;
259
260 return NULL;
261}
262
263static __maybe_unused int
264mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
265 union mlxsw_sp_l3addr saddr,
266 union mlxsw_sp_l3addr daddr,
267 union mlxsw_sp_l3addr gw,
268 __u8 ttl,
269 struct neigh_table *tbl,
270 struct mlxsw_sp_span_parms *sparmsp)
271{
272 unsigned char dmac[ETH_ALEN];
273 u16 vid = 0;
274
275 if (mlxsw_sp_l3addr_is_zero(gw))
276 gw = daddr;
277
278 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
279 goto unoffloadable;
280
281 if (is_vlan_dev(edev))
282 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
283
284 if (netif_is_bridge_master(edev)) {
285 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
286 if (!edev)
287 goto unoffloadable;
288 }
289
290 if (is_vlan_dev(edev)) {
291 if (vid || !(edev->flags & IFF_UP))
292 goto unoffloadable;
293 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
294 }
295
296 if (netif_is_lag_master(edev)) {
297 if (!(edev->flags & IFF_UP))
298 goto unoffloadable;
299 edev = mlxsw_sp_span_entry_lag(edev);
300 if (!edev)
301 goto unoffloadable;
302 }
303
304 if (!mlxsw_sp_port_dev_check(edev))
305 goto unoffloadable;
306
307 sparmsp->dest_port = netdev_priv(edev);
308 sparmsp->ttl = ttl;
309 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
310 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
311 sparmsp->saddr = saddr;
312 sparmsp->daddr = daddr;
313 sparmsp->vid = vid;
314 return 0;
315
316unoffloadable:
317 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
318}
319
320#if IS_ENABLED(CONFIG_NET_IPGRE)
321static struct net_device *
322mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
323 __be32 *saddrp, __be32 *daddrp)
324{
325 struct ip_tunnel *tun = netdev_priv(to_dev);
326 struct net_device *dev = NULL;
327 struct ip_tunnel_parm parms;
328 struct rtable *rt = NULL;
329 struct flowi4 fl4;
330
331
332 ASSERT_RTNL();
333
334 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
335 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
336 0, 0, parms.link, tun->fwmark);
337
338 rt = ip_route_output_key(tun->net, &fl4);
339 if (IS_ERR(rt))
340 return NULL;
341
342 if (rt->rt_type != RTN_UNICAST)
343 goto out;
344
345 dev = rt->dst.dev;
346 *saddrp = fl4.saddr;
347 *daddrp = rt->rt_gateway;
348
349out:
350 ip_rt_put(rt);
351 return dev;
352}
353
354static int
355mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
356 struct mlxsw_sp_span_parms *sparmsp)
357{
358 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
359 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
360 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
361 bool inherit_tos = tparm.iph.tos & 0x1;
362 bool inherit_ttl = !tparm.iph.ttl;
363 union mlxsw_sp_l3addr gw = daddr;
364 struct net_device *l3edev;
365
366 if (!(to_dev->flags & IFF_UP) ||
367
368 tparm.i_flags || tparm.o_flags ||
369
370 inherit_ttl || !inherit_tos ||
371
372 mlxsw_sp_l3addr_is_zero(daddr))
373 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
374
375 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
376 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
377 tparm.iph.ttl,
378 &arp_tbl, sparmsp);
379}
380
381static int
382mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
383 struct mlxsw_sp_span_parms sparms)
384{
385 struct mlxsw_sp_port *dest_port = sparms.dest_port;
386 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
387 u8 local_port = dest_port->local_port;
388 char mpat_pl[MLXSW_REG_MPAT_LEN];
389 int pa_id = span_entry->id;
390
391
392 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
393 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
394 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
395 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
396 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
397 sparms.dmac, !!sparms.vid);
398 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
399 sparms.ttl, sparms.smac,
400 be32_to_cpu(sparms.saddr.addr4),
401 be32_to_cpu(sparms.daddr.addr4));
402
403 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
404}
405
406static void
407mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
408{
409 mlxsw_sp_span_entry_deconfigure_common(span_entry,
410 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
411}
412
413static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
414 .can_handle = is_gretap_dev,
415 .parms = mlxsw_sp_span_entry_gretap4_parms,
416 .configure = mlxsw_sp_span_entry_gretap4_configure,
417 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
418};
419#endif
420
421#if IS_ENABLED(CONFIG_IPV6_GRE)
422static struct net_device *
423mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
424 struct in6_addr *saddrp,
425 struct in6_addr *daddrp)
426{
427 struct ip6_tnl *t = netdev_priv(to_dev);
428 struct flowi6 fl6 = t->fl.u.ip6;
429 struct net_device *dev = NULL;
430 struct dst_entry *dst;
431 struct rt6_info *rt6;
432
433
434 ASSERT_RTNL();
435
436 fl6.flowi6_mark = t->parms.fwmark;
437 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
438 return NULL;
439
440 dst = ip6_route_output(t->net, NULL, &fl6);
441 if (!dst || dst->error)
442 goto out;
443
444 rt6 = container_of(dst, struct rt6_info, dst);
445
446 dev = dst->dev;
447 *saddrp = fl6.saddr;
448 *daddrp = rt6->rt6i_gateway;
449
450out:
451 dst_release(dst);
452 return dev;
453}
454
455static int
456mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
457 struct mlxsw_sp_span_parms *sparmsp)
458{
459 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
460 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
461 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
462 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
463 bool inherit_ttl = !tparm.hop_limit;
464 union mlxsw_sp_l3addr gw = daddr;
465 struct net_device *l3edev;
466
467 if (!(to_dev->flags & IFF_UP) ||
468
469 tparm.i_flags || tparm.o_flags ||
470
471 inherit_ttl || !inherit_tos ||
472
473 mlxsw_sp_l3addr_is_zero(daddr))
474 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
475
476 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
477 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
478 tparm.hop_limit,
479 &nd_tbl, sparmsp);
480}
481
482static int
483mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
484 struct mlxsw_sp_span_parms sparms)
485{
486 struct mlxsw_sp_port *dest_port = sparms.dest_port;
487 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
488 u8 local_port = dest_port->local_port;
489 char mpat_pl[MLXSW_REG_MPAT_LEN];
490 int pa_id = span_entry->id;
491
492
493 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
494 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
495 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
496 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
497 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
498 sparms.dmac, !!sparms.vid);
499 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
500 sparms.saddr.addr6,
501 sparms.daddr.addr6);
502
503 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
504}
505
506static void
507mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
508{
509 mlxsw_sp_span_entry_deconfigure_common(span_entry,
510 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
511}
512
513static const
514struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
515 .can_handle = is_ip6gretap_dev,
516 .parms = mlxsw_sp_span_entry_gretap6_parms,
517 .configure = mlxsw_sp_span_entry_gretap6_configure,
518 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
519};
520#endif
521
522static bool
523mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
524{
525 return is_vlan_dev(dev) &&
526 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
527}
528
529static int
530mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
531 struct mlxsw_sp_span_parms *sparmsp)
532{
533 struct net_device *real_dev;
534 u16 vid;
535
536 if (!(to_dev->flags & IFF_UP))
537 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
538
539 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
540 sparmsp->dest_port = netdev_priv(real_dev);
541 sparmsp->vid = vid;
542 return 0;
543}
544
545static int
546mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
547 struct mlxsw_sp_span_parms sparms)
548{
549 struct mlxsw_sp_port *dest_port = sparms.dest_port;
550 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
551 u8 local_port = dest_port->local_port;
552 char mpat_pl[MLXSW_REG_MPAT_LEN];
553 int pa_id = span_entry->id;
554
555 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
556 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
557 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
558
559 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
560}
561
562static void
563mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
564{
565 mlxsw_sp_span_entry_deconfigure_common(span_entry,
566 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
567}
568
569static const
570struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
571 .can_handle = mlxsw_sp_span_vlan_can_handle,
572 .parms = mlxsw_sp_span_entry_vlan_parms,
573 .configure = mlxsw_sp_span_entry_vlan_configure,
574 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
575};
576
577static const
578struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
579 &mlxsw_sp_span_entry_ops_phys,
580#if IS_ENABLED(CONFIG_NET_IPGRE)
581 &mlxsw_sp_span_entry_ops_gretap4,
582#endif
583#if IS_ENABLED(CONFIG_IPV6_GRE)
584 &mlxsw_sp_span_entry_ops_gretap6,
585#endif
586 &mlxsw_sp_span_entry_ops_vlan,
587};
588
589static int
590mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
591 struct mlxsw_sp_span_parms *sparmsp)
592{
593 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
594}
595
596static int
597mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
598 struct mlxsw_sp_span_parms sparms)
599{
600 return 0;
601}
602
603static void
604mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
605{
606}
607
608static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
609 .parms = mlxsw_sp_span_entry_nop_parms,
610 .configure = mlxsw_sp_span_entry_nop_configure,
611 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
612};
613
614static void
615mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
616 struct mlxsw_sp_span_entry *span_entry,
617 struct mlxsw_sp_span_parms sparms)
618{
619 if (sparms.dest_port) {
620 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
621 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
622 sparms.dest_port->dev->name);
623 sparms.dest_port = NULL;
624 } else if (span_entry->ops->configure(span_entry, sparms)) {
625 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
626 sparms.dest_port->dev->name);
627 sparms.dest_port = NULL;
628 }
629 }
630
631 span_entry->parms = sparms;
632}
633
634static void
635mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
636{
637 if (span_entry->parms.dest_port)
638 span_entry->ops->deconfigure(span_entry);
639}
640
641static struct mlxsw_sp_span_entry *
642mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
643 const struct net_device *to_dev,
644 const struct mlxsw_sp_span_entry_ops *ops,
645 struct mlxsw_sp_span_parms sparms)
646{
647 struct mlxsw_sp_span_entry *span_entry = NULL;
648 int i;
649
650
651 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
652 if (!mlxsw_sp->span.entries[i].ref_count) {
653 span_entry = &mlxsw_sp->span.entries[i];
654 break;
655 }
656 }
657 if (!span_entry)
658 return NULL;
659
660 span_entry->ops = ops;
661 span_entry->ref_count = 1;
662 span_entry->to_dev = to_dev;
663 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
664
665 return span_entry;
666}
667
668static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
669{
670 mlxsw_sp_span_entry_deconfigure(span_entry);
671}
672
673struct mlxsw_sp_span_entry *
674mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
675 const struct net_device *to_dev)
676{
677 int i;
678
679 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
680 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
681
682 if (curr->ref_count && curr->to_dev == to_dev)
683 return curr;
684 }
685 return NULL;
686}
687
688void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
689 struct mlxsw_sp_span_entry *span_entry)
690{
691 mlxsw_sp_span_entry_deconfigure(span_entry);
692 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
693}
694
695static struct mlxsw_sp_span_entry *
696mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
697{
698 int i;
699
700 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
701 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
702
703 if (curr->ref_count && curr->id == span_id)
704 return curr;
705 }
706 return NULL;
707}
708
709static struct mlxsw_sp_span_entry *
710mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
711 const struct net_device *to_dev,
712 const struct mlxsw_sp_span_entry_ops *ops,
713 struct mlxsw_sp_span_parms sparms)
714{
715 struct mlxsw_sp_span_entry *span_entry;
716
717 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
718 if (span_entry) {
719
720 span_entry->ref_count++;
721 return span_entry;
722 }
723
724 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
725}
726
727static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
728 struct mlxsw_sp_span_entry *span_entry)
729{
730 WARN_ON(!span_entry->ref_count);
731 if (--span_entry->ref_count == 0)
732 mlxsw_sp_span_entry_destroy(span_entry);
733 return 0;
734}
735
736static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
737{
738 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
739 struct mlxsw_sp_span_inspected_port *p;
740 int i;
741
742 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
743 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
744
745 list_for_each_entry(p, &curr->bound_ports_list, list)
746 if (p->local_port == port->local_port &&
747 p->type == MLXSW_SP_SPAN_EGRESS)
748 return true;
749 }
750
751 return false;
752}
753
754static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
755 int mtu)
756{
757 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
758}
759
760int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
761{
762 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
763 char sbib_pl[MLXSW_REG_SBIB_LEN];
764 int err;
765
766
767
768
769 if (mlxsw_sp_span_is_egress_mirror(port)) {
770 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
771
772 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
773 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
774 if (err) {
775 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
776 return err;
777 }
778 }
779
780 return 0;
781}
782
783static struct mlxsw_sp_span_inspected_port *
784mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
785 enum mlxsw_sp_span_type type,
786 struct mlxsw_sp_port *port,
787 bool bind)
788{
789 struct mlxsw_sp_span_inspected_port *p;
790
791 list_for_each_entry(p, &span_entry->bound_ports_list, list)
792 if (type == p->type &&
793 port->local_port == p->local_port &&
794 bind == p->bound)
795 return p;
796 return NULL;
797}
798
799static int
800mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
801 struct mlxsw_sp_span_entry *span_entry,
802 enum mlxsw_sp_span_type type,
803 bool bind)
804{
805 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
806 char mpar_pl[MLXSW_REG_MPAR_LEN];
807 int pa_id = span_entry->id;
808
809
810 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
811 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
812 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
813}
814
815static int
816mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
817 struct mlxsw_sp_span_entry *span_entry,
818 enum mlxsw_sp_span_type type,
819 bool bind)
820{
821 struct mlxsw_sp_span_inspected_port *inspected_port;
822 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
823 char sbib_pl[MLXSW_REG_SBIB_LEN];
824 int i;
825 int err;
826
827
828
829
830 if (bind)
831 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
832 struct mlxsw_sp_span_entry *curr =
833 &mlxsw_sp->span.entries[i];
834
835 if (mlxsw_sp_span_entry_bound_port_find(curr, type,
836 port, bind))
837 return -EEXIST;
838 }
839
840
841 if (type == MLXSW_SP_SPAN_EGRESS) {
842 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
843 port->dev->mtu);
844
845 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
846 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
847 if (err) {
848 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
849 return err;
850 }
851 }
852
853 if (bind) {
854 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
855 true);
856 if (err)
857 goto err_port_bind;
858 }
859
860 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
861 if (!inspected_port) {
862 err = -ENOMEM;
863 goto err_inspected_port_alloc;
864 }
865 inspected_port->local_port = port->local_port;
866 inspected_port->type = type;
867 inspected_port->bound = bind;
868 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
869
870 return 0;
871
872err_inspected_port_alloc:
873 if (bind)
874 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
875 false);
876err_port_bind:
877 if (type == MLXSW_SP_SPAN_EGRESS) {
878 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
879 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
880 }
881 return err;
882}
883
884static void
885mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
886 struct mlxsw_sp_span_entry *span_entry,
887 enum mlxsw_sp_span_type type,
888 bool bind)
889{
890 struct mlxsw_sp_span_inspected_port *inspected_port;
891 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
892 char sbib_pl[MLXSW_REG_SBIB_LEN];
893
894 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
895 port, bind);
896 if (!inspected_port)
897 return;
898
899 if (bind)
900 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
901 false);
902
903 if (type == MLXSW_SP_SPAN_EGRESS) {
904 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
905 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
906 }
907
908 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
909
910 list_del(&inspected_port->list);
911 kfree(inspected_port);
912}
913
914static const struct mlxsw_sp_span_entry_ops *
915mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
916 const struct net_device *to_dev)
917{
918 size_t i;
919
920 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
921 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
922 return mlxsw_sp_span_entry_types[i];
923
924 return NULL;
925}
926
927int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
928 const struct net_device *to_dev,
929 enum mlxsw_sp_span_type type, bool bind,
930 int *p_span_id)
931{
932 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
933 const struct mlxsw_sp_span_entry_ops *ops;
934 struct mlxsw_sp_span_parms sparms = {NULL};
935 struct mlxsw_sp_span_entry *span_entry;
936 int err;
937
938 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
939 if (!ops) {
940 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
941 return -EOPNOTSUPP;
942 }
943
944 err = ops->parms(to_dev, &sparms);
945 if (err)
946 return err;
947
948 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
949 if (!span_entry)
950 return -ENOBUFS;
951
952 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
953 span_entry->id);
954
955 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
956 if (err)
957 goto err_port_bind;
958
959 *p_span_id = span_entry->id;
960 return 0;
961
962err_port_bind:
963 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
964 return err;
965}
966
967void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
968 enum mlxsw_sp_span_type type, bool bind)
969{
970 struct mlxsw_sp_span_entry *span_entry;
971
972 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
973 if (!span_entry) {
974 netdev_err(from->dev, "no span entry found\n");
975 return;
976 }
977
978 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
979 span_entry->id);
980 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
981}
982
983void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
984{
985 int i;
986 int err;
987
988 ASSERT_RTNL();
989 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
990 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
991 struct mlxsw_sp_span_parms sparms = {NULL};
992
993 if (!curr->ref_count)
994 continue;
995
996 err = curr->ops->parms(curr->to_dev, &sparms);
997 if (err)
998 continue;
999
1000 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1001 mlxsw_sp_span_entry_deconfigure(curr);
1002 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1003 }
1004 }
1005}
1006