1
2
3
4#include <linux/if_bridge.h>
5#include <linux/list.h>
6#include <linux/mutex.h>
7#include <linux/refcount.h>
8#include <linux/rtnetlink.h>
9#include <linux/workqueue.h>
10#include <net/arp.h>
11#include <net/gre.h>
12#include <net/lag.h>
13#include <net/ndisc.h>
14#include <net/ip6_tunnel.h>
15
16#include "spectrum.h"
17#include "spectrum_ipip.h"
18#include "spectrum_span.h"
19#include "spectrum_switchdev.h"
20
21struct mlxsw_sp_span {
22 struct work_struct work;
23 struct mlxsw_sp *mlxsw_sp;
24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr;
26 size_t span_entry_ops_arr_size;
27 struct list_head analyzed_ports_list;
28 struct mutex analyzed_ports_lock;
29 struct list_head trigger_entries_list;
30 u16 policer_id_base;
31 refcount_t policer_id_base_ref_count;
32 atomic_t active_entries_count;
33 int entries_count;
34 struct mlxsw_sp_span_entry entries[];
35};
36
37struct mlxsw_sp_span_analyzed_port {
38 struct list_head list;
39 refcount_t ref_count;
40 u8 local_port;
41 bool ingress;
42};
43
44struct mlxsw_sp_span_trigger_entry {
45 struct list_head list;
46 struct mlxsw_sp_span *span;
47 const struct mlxsw_sp_span_trigger_ops *ops;
48 refcount_t ref_count;
49 u8 local_port;
50 enum mlxsw_sp_span_trigger trigger;
51 struct mlxsw_sp_span_trigger_parms parms;
52};
53
54enum mlxsw_sp_span_trigger_type {
55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
57};
58
59struct mlxsw_sp_span_trigger_ops {
60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
63 enum mlxsw_sp_span_trigger trigger,
64 struct mlxsw_sp_port *mlxsw_sp_port);
65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
69};
70
71static void mlxsw_sp_span_respin_work(struct work_struct *work);
72
73static u64 mlxsw_sp_span_occ_get(void *priv)
74{
75 const struct mlxsw_sp *mlxsw_sp = priv;
76
77 return atomic_read(&mlxsw_sp->span->active_entries_count);
78}
79
80int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
81{
82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
83 struct mlxsw_sp_span *span;
84 int i, entries_count, err;
85
86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
87 return -EIO;
88
89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
91 if (!span)
92 return -ENOMEM;
93 refcount_set(&span->policer_id_base_ref_count, 0);
94 span->entries_count = entries_count;
95 atomic_set(&span->active_entries_count, 0);
96 mutex_init(&span->analyzed_ports_lock);
97 INIT_LIST_HEAD(&span->analyzed_ports_list);
98 INIT_LIST_HEAD(&span->trigger_entries_list);
99 span->mlxsw_sp = mlxsw_sp;
100 mlxsw_sp->span = span;
101
102 for (i = 0; i < mlxsw_sp->span->entries_count; i++)
103 mlxsw_sp->span->entries[i].id = i;
104
105 err = mlxsw_sp->span_ops->init(mlxsw_sp);
106 if (err)
107 goto err_init;
108
109 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
110 mlxsw_sp_span_occ_get, mlxsw_sp);
111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
112
113 return 0;
114
115err_init:
116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
117 kfree(mlxsw_sp->span);
118 return err;
119}
120
121void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
122{
123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
124
125 cancel_work_sync(&mlxsw_sp->span->work);
126 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
127
128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
131 kfree(mlxsw_sp->span);
132}
133
134static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev)
135{
136 return !dev;
137}
138
139static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
140 const struct net_device *to_dev,
141 struct mlxsw_sp_span_parms *sparmsp)
142{
143 return -EOPNOTSUPP;
144}
145
146static int
147mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
148 struct mlxsw_sp_span_parms sparms)
149{
150 return -EOPNOTSUPP;
151}
152
153static void
154mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
155{
156}
157
158static const
159struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
160 .is_static = true,
161 .can_handle = mlxsw_sp1_span_cpu_can_handle,
162 .parms_set = mlxsw_sp1_span_entry_cpu_parms,
163 .configure = mlxsw_sp1_span_entry_cpu_configure,
164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure,
165};
166
167static int
168mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp,
169 const struct net_device *to_dev,
170 struct mlxsw_sp_span_parms *sparmsp)
171{
172 sparmsp->dest_port = netdev_priv(to_dev);
173 return 0;
174}
175
176static int
177mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
178 struct mlxsw_sp_span_parms sparms)
179{
180 struct mlxsw_sp_port *dest_port = sparms.dest_port;
181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
182 u8 local_port = dest_port->local_port;
183 char mpat_pl[MLXSW_REG_MPAT_LEN];
184 int pa_id = span_entry->id;
185
186
187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id);
190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
192
193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
194}
195
196static void
197mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
198 enum mlxsw_reg_mpat_span_type span_type)
199{
200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
202 u8 local_port = dest_port->local_port;
203 char mpat_pl[MLXSW_REG_MPAT_LEN];
204 int pa_id = span_entry->id;
205
206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id);
208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
209}
210
211static void
212mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
213{
214 mlxsw_sp_span_entry_deconfigure_common(span_entry,
215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
216}
217
218static const
219struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
220 .is_static = true,
221 .can_handle = mlxsw_sp_port_dev_check,
222 .parms_set = mlxsw_sp_span_entry_phys_parms,
223 .configure = mlxsw_sp_span_entry_phys_configure,
224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
225};
226
227static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
228 const void *pkey,
229 struct net_device *dev,
230 unsigned char dmac[ETH_ALEN])
231{
232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
233 int err = 0;
234
235 if (!neigh) {
236 neigh = neigh_create(tbl, pkey, dev);
237 if (IS_ERR(neigh))
238 return PTR_ERR(neigh);
239 }
240
241 neigh_event_send(neigh, NULL);
242
243 read_lock_bh(&neigh->lock);
244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
245 memcpy(dmac, neigh->ha, ETH_ALEN);
246 else
247 err = -ENOENT;
248 read_unlock_bh(&neigh->lock);
249
250 neigh_release(neigh);
251 return err;
252}
253
254static int
255mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
256{
257 sparmsp->dest_port = NULL;
258 return 0;
259}
260
261static struct net_device *
262mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
263 unsigned char *dmac,
264 u16 *p_vid)
265{
266 struct bridge_vlan_info vinfo;
267 struct net_device *edev;
268 u16 vid = *p_vid;
269
270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
271 return NULL;
272 if (!vid ||
273 br_vlan_get_info(br_dev, vid, &vinfo) ||
274 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
275 return NULL;
276
277 edev = br_fdb_find_port(br_dev, dmac, vid);
278 if (!edev)
279 return NULL;
280
281 if (br_vlan_get_info(edev, vid, &vinfo))
282 return NULL;
283 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
284 *p_vid = 0;
285 else
286 *p_vid = vid;
287 return edev;
288}
289
290static struct net_device *
291mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
292 unsigned char *dmac)
293{
294 return br_fdb_find_port(br_dev, dmac, 0);
295}
296
297static struct net_device *
298mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
299 unsigned char dmac[ETH_ALEN],
300 u16 *p_vid)
301{
302 struct mlxsw_sp_bridge_port *bridge_port;
303 enum mlxsw_reg_spms_state spms_state;
304 struct net_device *dev = NULL;
305 struct mlxsw_sp_port *port;
306 u8 stp_state;
307
308 if (br_vlan_enabled(br_dev))
309 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
310 else if (!*p_vid)
311 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
312 if (!dev)
313 return NULL;
314
315 port = mlxsw_sp_port_dev_lower_find(dev);
316 if (!port)
317 return NULL;
318
319 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
320 if (!bridge_port)
321 return NULL;
322
323 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
324 spms_state = mlxsw_sp_stp_spms_state(stp_state);
325 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
326 return NULL;
327
328 return dev;
329}
330
331static struct net_device *
332mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
333 u16 *p_vid)
334{
335 *p_vid = vlan_dev_vlan_id(vlan_dev);
336 return vlan_dev_real_dev(vlan_dev);
337}
338
339static struct net_device *
340mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
341{
342 struct net_device *dev;
343 struct list_head *iter;
344
345 netdev_for_each_lower_dev(lag_dev, dev, iter)
346 if (netif_carrier_ok(dev) &&
347 net_lag_port_dev_txable(dev) &&
348 mlxsw_sp_port_dev_check(dev))
349 return dev;
350
351 return NULL;
352}
353
354static __maybe_unused int
355mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
356 union mlxsw_sp_l3addr saddr,
357 union mlxsw_sp_l3addr daddr,
358 union mlxsw_sp_l3addr gw,
359 __u8 ttl,
360 struct neigh_table *tbl,
361 struct mlxsw_sp_span_parms *sparmsp)
362{
363 unsigned char dmac[ETH_ALEN];
364 u16 vid = 0;
365
366 if (mlxsw_sp_l3addr_is_zero(gw))
367 gw = daddr;
368
369 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
370 goto unoffloadable;
371
372 if (is_vlan_dev(edev))
373 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
374
375 if (netif_is_bridge_master(edev)) {
376 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
377 if (!edev)
378 goto unoffloadable;
379 }
380
381 if (is_vlan_dev(edev)) {
382 if (vid || !(edev->flags & IFF_UP))
383 goto unoffloadable;
384 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
385 }
386
387 if (netif_is_lag_master(edev)) {
388 if (!(edev->flags & IFF_UP))
389 goto unoffloadable;
390 edev = mlxsw_sp_span_entry_lag(edev);
391 if (!edev)
392 goto unoffloadable;
393 }
394
395 if (!mlxsw_sp_port_dev_check(edev))
396 goto unoffloadable;
397
398 sparmsp->dest_port = netdev_priv(edev);
399 sparmsp->ttl = ttl;
400 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
401 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
402 sparmsp->saddr = saddr;
403 sparmsp->daddr = daddr;
404 sparmsp->vid = vid;
405 return 0;
406
407unoffloadable:
408 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
409}
410
411#if IS_ENABLED(CONFIG_NET_IPGRE)
412static struct net_device *
413mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
414 __be32 *saddrp, __be32 *daddrp)
415{
416 struct ip_tunnel *tun = netdev_priv(to_dev);
417 struct net_device *dev = NULL;
418 struct ip_tunnel_parm parms;
419 struct rtable *rt = NULL;
420 struct flowi4 fl4;
421
422
423 ASSERT_RTNL();
424
425 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
426 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
427 0, 0, parms.link, tun->fwmark);
428
429 rt = ip_route_output_key(tun->net, &fl4);
430 if (IS_ERR(rt))
431 return NULL;
432
433 if (rt->rt_type != RTN_UNICAST)
434 goto out;
435
436 dev = rt->dst.dev;
437 *saddrp = fl4.saddr;
438 *daddrp = rt->rt_gateway;
439
440out:
441 ip_rt_put(rt);
442 return dev;
443}
444
445static int
446mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
447 const struct net_device *to_dev,
448 struct mlxsw_sp_span_parms *sparmsp)
449{
450 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
451 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
452 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
453 bool inherit_tos = tparm.iph.tos & 0x1;
454 bool inherit_ttl = !tparm.iph.ttl;
455 union mlxsw_sp_l3addr gw = daddr;
456 struct net_device *l3edev;
457
458 if (!(to_dev->flags & IFF_UP) ||
459
460 tparm.i_flags || tparm.o_flags ||
461
462 inherit_ttl || !inherit_tos ||
463
464 mlxsw_sp_l3addr_is_zero(daddr))
465 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
466
467 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
468 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
469 tparm.iph.ttl,
470 &arp_tbl, sparmsp);
471}
472
473static int
474mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
475 struct mlxsw_sp_span_parms sparms)
476{
477 struct mlxsw_sp_port *dest_port = sparms.dest_port;
478 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
479 u8 local_port = dest_port->local_port;
480 char mpat_pl[MLXSW_REG_MPAT_LEN];
481 int pa_id = span_entry->id;
482
483
484 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
485 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
486 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
487 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
488 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
489 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
490 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
491 sparms.dmac, !!sparms.vid);
492 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
493 sparms.ttl, sparms.smac,
494 be32_to_cpu(sparms.saddr.addr4),
495 be32_to_cpu(sparms.daddr.addr4));
496
497 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
498}
499
500static void
501mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
502{
503 mlxsw_sp_span_entry_deconfigure_common(span_entry,
504 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
505}
506
507static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
508 .can_handle = netif_is_gretap,
509 .parms_set = mlxsw_sp_span_entry_gretap4_parms,
510 .configure = mlxsw_sp_span_entry_gretap4_configure,
511 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
512};
513#endif
514
515#if IS_ENABLED(CONFIG_IPV6_GRE)
516static struct net_device *
517mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
518 struct in6_addr *saddrp,
519 struct in6_addr *daddrp)
520{
521 struct ip6_tnl *t = netdev_priv(to_dev);
522 struct flowi6 fl6 = t->fl.u.ip6;
523 struct net_device *dev = NULL;
524 struct dst_entry *dst;
525 struct rt6_info *rt6;
526
527
528 ASSERT_RTNL();
529
530 fl6.flowi6_mark = t->parms.fwmark;
531 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
532 return NULL;
533
534 dst = ip6_route_output(t->net, NULL, &fl6);
535 if (!dst || dst->error)
536 goto out;
537
538 rt6 = container_of(dst, struct rt6_info, dst);
539
540 dev = dst->dev;
541 *saddrp = fl6.saddr;
542 *daddrp = rt6->rt6i_gateway;
543
544out:
545 dst_release(dst);
546 return dev;
547}
548
549static int
550mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
551 const struct net_device *to_dev,
552 struct mlxsw_sp_span_parms *sparmsp)
553{
554 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
555 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
556 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
557 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
558 bool inherit_ttl = !tparm.hop_limit;
559 union mlxsw_sp_l3addr gw = daddr;
560 struct net_device *l3edev;
561
562 if (!(to_dev->flags & IFF_UP) ||
563
564 tparm.i_flags || tparm.o_flags ||
565
566 inherit_ttl || !inherit_tos ||
567
568 mlxsw_sp_l3addr_is_zero(daddr))
569 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
570
571 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
572 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
573 tparm.hop_limit,
574 &nd_tbl, sparmsp);
575}
576
577static int
578mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
579 struct mlxsw_sp_span_parms sparms)
580{
581 struct mlxsw_sp_port *dest_port = sparms.dest_port;
582 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
583 u8 local_port = dest_port->local_port;
584 char mpat_pl[MLXSW_REG_MPAT_LEN];
585 int pa_id = span_entry->id;
586
587
588 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
589 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
590 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
591 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
592 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
593 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
594 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
595 sparms.dmac, !!sparms.vid);
596 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
597 sparms.saddr.addr6,
598 sparms.daddr.addr6);
599
600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
601}
602
603static void
604mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
605{
606 mlxsw_sp_span_entry_deconfigure_common(span_entry,
607 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
608}
609
610static const
611struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
612 .can_handle = netif_is_ip6gretap,
613 .parms_set = mlxsw_sp_span_entry_gretap6_parms,
614 .configure = mlxsw_sp_span_entry_gretap6_configure,
615 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
616};
617#endif
618
619static bool
620mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
621{
622 return is_vlan_dev(dev) &&
623 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
624}
625
626static int
627mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp,
628 const struct net_device *to_dev,
629 struct mlxsw_sp_span_parms *sparmsp)
630{
631 struct net_device *real_dev;
632 u16 vid;
633
634 if (!(to_dev->flags & IFF_UP))
635 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
636
637 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
638 sparmsp->dest_port = netdev_priv(real_dev);
639 sparmsp->vid = vid;
640 return 0;
641}
642
643static int
644mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
645 struct mlxsw_sp_span_parms sparms)
646{
647 struct mlxsw_sp_port *dest_port = sparms.dest_port;
648 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
649 u8 local_port = dest_port->local_port;
650 char mpat_pl[MLXSW_REG_MPAT_LEN];
651 int pa_id = span_entry->id;
652
653 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
654 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
655 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
656 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
657 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
658
659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
660}
661
662static void
663mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
664{
665 mlxsw_sp_span_entry_deconfigure_common(span_entry,
666 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
667}
668
669static const
670struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
671 .can_handle = mlxsw_sp_span_vlan_can_handle,
672 .parms_set = mlxsw_sp_span_entry_vlan_parms,
673 .configure = mlxsw_sp_span_entry_vlan_configure,
674 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
675};
676
677static const
678struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = {
679 &mlxsw_sp1_span_entry_ops_cpu,
680 &mlxsw_sp_span_entry_ops_phys,
681#if IS_ENABLED(CONFIG_NET_IPGRE)
682 &mlxsw_sp_span_entry_ops_gretap4,
683#endif
684#if IS_ENABLED(CONFIG_IPV6_GRE)
685 &mlxsw_sp_span_entry_ops_gretap6,
686#endif
687 &mlxsw_sp_span_entry_ops_vlan,
688};
689
690static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev)
691{
692 return !dev;
693}
694
695static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
696 const struct net_device *to_dev,
697 struct mlxsw_sp_span_parms *sparmsp)
698{
699 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
700 return 0;
701}
702
703static int
704mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
705 struct mlxsw_sp_span_parms sparms)
706{
707
708
709
710 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms);
711}
712
713static void
714mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
715{
716 enum mlxsw_reg_mpat_span_type span_type;
717
718 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH;
719 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type);
720}
721
722static const
723struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
724 .is_static = true,
725 .can_handle = mlxsw_sp2_span_cpu_can_handle,
726 .parms_set = mlxsw_sp2_span_entry_cpu_parms,
727 .configure = mlxsw_sp2_span_entry_cpu_configure,
728 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure,
729};
730
731static const
732struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = {
733 &mlxsw_sp2_span_entry_ops_cpu,
734 &mlxsw_sp_span_entry_ops_phys,
735#if IS_ENABLED(CONFIG_NET_IPGRE)
736 &mlxsw_sp_span_entry_ops_gretap4,
737#endif
738#if IS_ENABLED(CONFIG_IPV6_GRE)
739 &mlxsw_sp_span_entry_ops_gretap6,
740#endif
741 &mlxsw_sp_span_entry_ops_vlan,
742};
743
744static int
745mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp,
746 const struct net_device *to_dev,
747 struct mlxsw_sp_span_parms *sparmsp)
748{
749 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
750}
751
752static int
753mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
754 struct mlxsw_sp_span_parms sparms)
755{
756 return 0;
757}
758
759static void
760mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
761{
762}
763
764static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
765 .parms_set = mlxsw_sp_span_entry_nop_parms,
766 .configure = mlxsw_sp_span_entry_nop_configure,
767 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
768};
769
770static void
771mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
772 struct mlxsw_sp_span_entry *span_entry,
773 struct mlxsw_sp_span_parms sparms)
774{
775 int err;
776
777 if (!sparms.dest_port)
778 goto set_parms;
779
780 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
781 dev_err(mlxsw_sp->bus_info->dev,
782 "Cannot mirror to a port which belongs to a different mlxsw instance\n");
783 sparms.dest_port = NULL;
784 goto set_parms;
785 }
786
787 err = span_entry->ops->configure(span_entry, sparms);
788 if (err) {
789 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n");
790 sparms.dest_port = NULL;
791 goto set_parms;
792 }
793
794set_parms:
795 span_entry->parms = sparms;
796}
797
798static void
799mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
800{
801 if (span_entry->parms.dest_port)
802 span_entry->ops->deconfigure(span_entry);
803}
804
805static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
806 u16 policer_id)
807{
808 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
809 u16 policer_id_base;
810 int err;
811
812
813
814
815
816
817 if (refcount_read(&span->policer_id_base_ref_count)) {
818 if (policer_id < span->policer_id_base ||
819 policer_id >= span->policer_id_base + span->entries_count)
820 return -EINVAL;
821
822 refcount_inc(&span->policer_id_base_ref_count);
823 return 0;
824 }
825
826
827 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1;
828 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp,
829 policer_id_base);
830 if (err)
831 return err;
832
833 span->policer_id_base = policer_id_base;
834 refcount_set(&span->policer_id_base_ref_count, 1);
835
836 return 0;
837}
838
839static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
840{
841 if (refcount_dec_and_test(&span->policer_id_base_ref_count))
842 span->policer_id_base = 0;
843}
844
845static struct mlxsw_sp_span_entry *
846mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
847 const struct net_device *to_dev,
848 const struct mlxsw_sp_span_entry_ops *ops,
849 struct mlxsw_sp_span_parms sparms)
850{
851 struct mlxsw_sp_span_entry *span_entry = NULL;
852 int i;
853
854
855 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
856 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
857 span_entry = &mlxsw_sp->span->entries[i];
858 break;
859 }
860 }
861 if (!span_entry)
862 return NULL;
863
864 if (sparms.policer_enable) {
865 int err;
866
867 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span,
868 sparms.policer_id);
869 if (err)
870 return NULL;
871 }
872
873 atomic_inc(&mlxsw_sp->span->active_entries_count);
874 span_entry->ops = ops;
875 refcount_set(&span_entry->ref_count, 1);
876 span_entry->to_dev = to_dev;
877 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
878
879 return span_entry;
880}
881
882static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
883 struct mlxsw_sp_span_entry *span_entry)
884{
885 mlxsw_sp_span_entry_deconfigure(span_entry);
886 atomic_dec(&mlxsw_sp->span->active_entries_count);
887 if (span_entry->parms.policer_enable)
888 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span);
889}
890
891struct mlxsw_sp_span_entry *
892mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
893 const struct net_device *to_dev)
894{
895 int i;
896
897 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
898 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
899
900 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
901 return curr;
902 }
903 return NULL;
904}
905
906void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
907 struct mlxsw_sp_span_entry *span_entry)
908{
909 mlxsw_sp_span_entry_deconfigure(span_entry);
910 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
911}
912
913static struct mlxsw_sp_span_entry *
914mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
915{
916 int i;
917
918 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
919 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
920
921 if (refcount_read(&curr->ref_count) && curr->id == span_id)
922 return curr;
923 }
924 return NULL;
925}
926
927static struct mlxsw_sp_span_entry *
928mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
929 const struct net_device *to_dev,
930 const struct mlxsw_sp_span_parms *sparms)
931{
932 int i;
933
934 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
935 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
936
937 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
938 curr->parms.policer_enable == sparms->policer_enable &&
939 curr->parms.policer_id == sparms->policer_id &&
940 curr->parms.session_id == sparms->session_id)
941 return curr;
942 }
943 return NULL;
944}
945
946static struct mlxsw_sp_span_entry *
947mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
948 const struct net_device *to_dev,
949 const struct mlxsw_sp_span_entry_ops *ops,
950 struct mlxsw_sp_span_parms sparms)
951{
952 struct mlxsw_sp_span_entry *span_entry;
953
954 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
955 &sparms);
956 if (span_entry) {
957
958 refcount_inc(&span_entry->ref_count);
959 return span_entry;
960 }
961
962 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
963}
964
965static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
966 struct mlxsw_sp_span_entry *span_entry)
967{
968 if (refcount_dec_and_test(&span_entry->ref_count))
969 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
970 return 0;
971}
972
973static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
974{
975 struct mlxsw_sp_hdroom hdroom;
976
977 hdroom = *mlxsw_sp_port->hdroom;
978 hdroom.int_buf.enable = enable;
979 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
980
981 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
982}
983
984static int
985mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
986{
987 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
988}
989
990static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
991{
992 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
993}
994
995static struct mlxsw_sp_span_analyzed_port *
996mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
997 bool ingress)
998{
999 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1000
1001 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
1002 if (analyzed_port->local_port == local_port &&
1003 analyzed_port->ingress == ingress)
1004 return analyzed_port;
1005 }
1006
1007 return NULL;
1008}
1009
1010static const struct mlxsw_sp_span_entry_ops *
1011mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
1012 const struct net_device *to_dev)
1013{
1014 struct mlxsw_sp_span *span = mlxsw_sp->span;
1015 size_t i;
1016
1017 for (i = 0; i < span->span_entry_ops_arr_size; ++i)
1018 if (span->span_entry_ops_arr[i]->can_handle(to_dev))
1019 return span->span_entry_ops_arr[i];
1020
1021 return NULL;
1022}
1023
1024static void mlxsw_sp_span_respin_work(struct work_struct *work)
1025{
1026 struct mlxsw_sp_span *span;
1027 struct mlxsw_sp *mlxsw_sp;
1028 int i, err;
1029
1030 span = container_of(work, struct mlxsw_sp_span, work);
1031 mlxsw_sp = span->mlxsw_sp;
1032
1033 rtnl_lock();
1034 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
1035 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
1036 struct mlxsw_sp_span_parms sparms = {NULL};
1037
1038 if (!refcount_read(&curr->ref_count))
1039 continue;
1040
1041 if (curr->ops->is_static)
1042 continue;
1043
1044 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
1045 if (err)
1046 continue;
1047
1048 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1049 mlxsw_sp_span_entry_deconfigure(curr);
1050 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1051 }
1052 }
1053 rtnl_unlock();
1054}
1055
1056void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
1057{
1058 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
1059 return;
1060 mlxsw_core_schedule_work(&mlxsw_sp->span->work);
1061}
1062
1063int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
1064 const struct mlxsw_sp_span_agent_parms *parms)
1065{
1066 const struct net_device *to_dev = parms->to_dev;
1067 const struct mlxsw_sp_span_entry_ops *ops;
1068 struct mlxsw_sp_span_entry *span_entry;
1069 struct mlxsw_sp_span_parms sparms;
1070 int err;
1071
1072 ASSERT_RTNL();
1073
1074 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
1075 if (!ops) {
1076 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
1077 return -EOPNOTSUPP;
1078 }
1079
1080 memset(&sparms, 0, sizeof(sparms));
1081 err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
1082 if (err)
1083 return err;
1084
1085 sparms.policer_id = parms->policer_id;
1086 sparms.policer_enable = parms->policer_enable;
1087 sparms.session_id = parms->session_id;
1088 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
1089 if (!span_entry)
1090 return -ENOBUFS;
1091
1092 *p_span_id = span_entry->id;
1093
1094 return 0;
1095}
1096
1097void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
1098{
1099 struct mlxsw_sp_span_entry *span_entry;
1100
1101 ASSERT_RTNL();
1102
1103 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
1104 if (WARN_ON_ONCE(!span_entry))
1105 return;
1106
1107 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
1108}
1109
1110static struct mlxsw_sp_span_analyzed_port *
1111mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
1112 struct mlxsw_sp_port *mlxsw_sp_port,
1113 bool ingress)
1114{
1115 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1116 int err;
1117
1118 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
1119 if (!analyzed_port)
1120 return ERR_PTR(-ENOMEM);
1121
1122 refcount_set(&analyzed_port->ref_count, 1);
1123 analyzed_port->local_port = mlxsw_sp_port->local_port;
1124 analyzed_port->ingress = ingress;
1125 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
1126
1127
1128
1129
1130 if (!ingress) {
1131 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
1132 if (err)
1133 goto err_buffer_update;
1134 }
1135
1136 return analyzed_port;
1137
1138err_buffer_update:
1139 list_del(&analyzed_port->list);
1140 kfree(analyzed_port);
1141 return ERR_PTR(err);
1142}
1143
1144static void
1145mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1146 struct mlxsw_sp_span_analyzed_port *
1147 analyzed_port)
1148{
1149
1150
1151
1152 if (!analyzed_port->ingress)
1153 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
1154
1155 list_del(&analyzed_port->list);
1156 kfree(analyzed_port);
1157}
1158
1159int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
1160 bool ingress)
1161{
1162 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1163 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1164 u8 local_port = mlxsw_sp_port->local_port;
1165 int err = 0;
1166
1167 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1168
1169 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1170 local_port, ingress);
1171 if (analyzed_port) {
1172 refcount_inc(&analyzed_port->ref_count);
1173 goto out_unlock;
1174 }
1175
1176 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
1177 mlxsw_sp_port,
1178 ingress);
1179 if (IS_ERR(analyzed_port))
1180 err = PTR_ERR(analyzed_port);
1181
1182out_unlock:
1183 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1184 return err;
1185}
1186
1187void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
1188 bool ingress)
1189{
1190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1191 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1192 u8 local_port = mlxsw_sp_port->local_port;
1193
1194 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1195
1196 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1197 local_port, ingress);
1198 if (WARN_ON_ONCE(!analyzed_port))
1199 goto out_unlock;
1200
1201 if (!refcount_dec_and_test(&analyzed_port->ref_count))
1202 goto out_unlock;
1203
1204 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
1205
1206out_unlock:
1207 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1208}
1209
1210static int
1211__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
1212 struct mlxsw_sp_span_trigger_entry *
1213 trigger_entry, bool enable)
1214{
1215 char mpar_pl[MLXSW_REG_MPAR_LEN];
1216 enum mlxsw_reg_mpar_i_e i_e;
1217
1218 switch (trigger_entry->trigger) {
1219 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1220 i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
1221 break;
1222 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1223 i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
1224 break;
1225 default:
1226 WARN_ON_ONCE(1);
1227 return -EINVAL;
1228 }
1229
1230 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX)
1231 return -EINVAL;
1232
1233 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
1234 trigger_entry->parms.span_id,
1235 trigger_entry->parms.probability_rate);
1236 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
1237}
1238
1239static int
1240mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
1241 trigger_entry)
1242{
1243 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
1244 trigger_entry, true);
1245}
1246
1247static void
1248mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
1249 trigger_entry)
1250{
1251 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
1252 false);
1253}
1254
1255static bool
1256mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
1257 trigger_entry,
1258 enum mlxsw_sp_span_trigger trigger,
1259 struct mlxsw_sp_port *mlxsw_sp_port)
1260{
1261 return trigger_entry->trigger == trigger &&
1262 trigger_entry->local_port == mlxsw_sp_port->local_port;
1263}
1264
1265static int
1266mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
1267 trigger_entry,
1268 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1269{
1270
1271 return 0;
1272}
1273
1274static void
1275mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
1276 trigger_entry,
1277 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1278{
1279}
1280
1281static const struct mlxsw_sp_span_trigger_ops
1282mlxsw_sp_span_trigger_port_ops = {
1283 .bind = mlxsw_sp_span_trigger_port_bind,
1284 .unbind = mlxsw_sp_span_trigger_port_unbind,
1285 .matches = mlxsw_sp_span_trigger_port_matches,
1286 .enable = mlxsw_sp_span_trigger_port_enable,
1287 .disable = mlxsw_sp_span_trigger_port_disable,
1288};
1289
1290static int
1291mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1292 trigger_entry)
1293{
1294 return -EOPNOTSUPP;
1295}
1296
1297static void
1298mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1299 trigger_entry)
1300{
1301}
1302
1303static bool
1304mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1305 trigger_entry,
1306 enum mlxsw_sp_span_trigger trigger,
1307 struct mlxsw_sp_port *mlxsw_sp_port)
1308{
1309 WARN_ON_ONCE(1);
1310 return false;
1311}
1312
1313static int
1314mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1315 trigger_entry,
1316 struct mlxsw_sp_port *mlxsw_sp_port,
1317 u8 tc)
1318{
1319 return -EOPNOTSUPP;
1320}
1321
1322static void
1323mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1324 trigger_entry,
1325 struct mlxsw_sp_port *mlxsw_sp_port,
1326 u8 tc)
1327{
1328}
1329
1330static const struct mlxsw_sp_span_trigger_ops
1331mlxsw_sp1_span_trigger_global_ops = {
1332 .bind = mlxsw_sp1_span_trigger_global_bind,
1333 .unbind = mlxsw_sp1_span_trigger_global_unbind,
1334 .matches = mlxsw_sp1_span_trigger_global_matches,
1335 .enable = mlxsw_sp1_span_trigger_global_enable,
1336 .disable = mlxsw_sp1_span_trigger_global_disable,
1337};
1338
1339static const struct mlxsw_sp_span_trigger_ops *
1340mlxsw_sp1_span_trigger_ops_arr[] = {
1341 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
1342 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1343 &mlxsw_sp1_span_trigger_global_ops,
1344};
1345
1346static int
1347mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1348 trigger_entry)
1349{
1350 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1351 enum mlxsw_reg_mpagr_trigger trigger;
1352 char mpagr_pl[MLXSW_REG_MPAGR_LEN];
1353
1354 switch (trigger_entry->trigger) {
1355 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1356 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
1357 break;
1358 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1359 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
1360 break;
1361 case MLXSW_SP_SPAN_TRIGGER_ECN:
1362 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
1363 break;
1364 default:
1365 WARN_ON_ONCE(1);
1366 return -EINVAL;
1367 }
1368
1369 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX)
1370 return -EINVAL;
1371
1372 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
1373 trigger_entry->parms.probability_rate);
1374 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
1375}
1376
1377static void
1378mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1379 trigger_entry)
1380{
1381
1382
1383
1384}
1385
1386static bool
1387mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1388 trigger_entry,
1389 enum mlxsw_sp_span_trigger trigger,
1390 struct mlxsw_sp_port *mlxsw_sp_port)
1391{
1392 return trigger_entry->trigger == trigger;
1393}
1394
1395static int
1396__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1397 trigger_entry,
1398 struct mlxsw_sp_port *mlxsw_sp_port,
1399 u8 tc, bool enable)
1400{
1401 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1402 char momte_pl[MLXSW_REG_MOMTE_LEN];
1403 enum mlxsw_reg_momte_type type;
1404 int err;
1405
1406 switch (trigger_entry->trigger) {
1407 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1408 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
1409 break;
1410 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1411 type = MLXSW_REG_MOMTE_TYPE_WRED;
1412 break;
1413 case MLXSW_SP_SPAN_TRIGGER_ECN:
1414 type = MLXSW_REG_MOMTE_TYPE_ECN;
1415 break;
1416 default:
1417 WARN_ON_ONCE(1);
1418 return -EINVAL;
1419 }
1420
1421
1422
1423
1424 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
1425 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1426 if (err)
1427 return err;
1428
1429 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
1430 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1431}
1432
1433static int
1434mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1435 trigger_entry,
1436 struct mlxsw_sp_port *mlxsw_sp_port,
1437 u8 tc)
1438{
1439 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
1440 mlxsw_sp_port, tc, true);
1441}
1442
1443static void
1444mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1445 trigger_entry,
1446 struct mlxsw_sp_port *mlxsw_sp_port,
1447 u8 tc)
1448{
1449 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
1450 false);
1451}
1452
1453static const struct mlxsw_sp_span_trigger_ops
1454mlxsw_sp2_span_trigger_global_ops = {
1455 .bind = mlxsw_sp2_span_trigger_global_bind,
1456 .unbind = mlxsw_sp2_span_trigger_global_unbind,
1457 .matches = mlxsw_sp2_span_trigger_global_matches,
1458 .enable = mlxsw_sp2_span_trigger_global_enable,
1459 .disable = mlxsw_sp2_span_trigger_global_disable,
1460};
1461
1462static const struct mlxsw_sp_span_trigger_ops *
1463mlxsw_sp2_span_trigger_ops_arr[] = {
1464 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
1465 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1466 &mlxsw_sp2_span_trigger_global_ops,
1467};
1468
1469static void
1470mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
1471{
1472 struct mlxsw_sp_span *span = trigger_entry->span;
1473 enum mlxsw_sp_span_trigger_type type;
1474
1475 switch (trigger_entry->trigger) {
1476 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1477 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1478 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
1479 break;
1480 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1481 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1482 case MLXSW_SP_SPAN_TRIGGER_ECN:
1483 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
1484 break;
1485 default:
1486 WARN_ON_ONCE(1);
1487 return;
1488 }
1489
1490 trigger_entry->ops = span->span_trigger_ops_arr[type];
1491}
1492
1493static struct mlxsw_sp_span_trigger_entry *
1494mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
1495 enum mlxsw_sp_span_trigger trigger,
1496 struct mlxsw_sp_port *mlxsw_sp_port,
1497 const struct mlxsw_sp_span_trigger_parms
1498 *parms)
1499{
1500 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1501 int err;
1502
1503 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
1504 if (!trigger_entry)
1505 return ERR_PTR(-ENOMEM);
1506
1507 refcount_set(&trigger_entry->ref_count, 1);
1508 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
1509 0;
1510 trigger_entry->trigger = trigger;
1511 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
1512 trigger_entry->span = span;
1513 mlxsw_sp_span_trigger_ops_set(trigger_entry);
1514 list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
1515
1516 err = trigger_entry->ops->bind(trigger_entry);
1517 if (err)
1518 goto err_trigger_entry_bind;
1519
1520 return trigger_entry;
1521
1522err_trigger_entry_bind:
1523 list_del(&trigger_entry->list);
1524 kfree(trigger_entry);
1525 return ERR_PTR(err);
1526}
1527
1528static void
1529mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
1530 struct mlxsw_sp_span_trigger_entry *
1531 trigger_entry)
1532{
1533 trigger_entry->ops->unbind(trigger_entry);
1534 list_del(&trigger_entry->list);
1535 kfree(trigger_entry);
1536}
1537
1538static struct mlxsw_sp_span_trigger_entry *
1539mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
1540 enum mlxsw_sp_span_trigger trigger,
1541 struct mlxsw_sp_port *mlxsw_sp_port)
1542{
1543 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1544
1545 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
1546 if (trigger_entry->ops->matches(trigger_entry, trigger,
1547 mlxsw_sp_port))
1548 return trigger_entry;
1549 }
1550
1551 return NULL;
1552}
1553
1554int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
1555 enum mlxsw_sp_span_trigger trigger,
1556 struct mlxsw_sp_port *mlxsw_sp_port,
1557 const struct mlxsw_sp_span_trigger_parms *parms)
1558{
1559 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1560 int err = 0;
1561
1562 ASSERT_RTNL();
1563
1564 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
1565 return -EINVAL;
1566
1567 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1568 trigger,
1569 mlxsw_sp_port);
1570 if (trigger_entry) {
1571 if (trigger_entry->parms.span_id != parms->span_id ||
1572 trigger_entry->parms.probability_rate !=
1573 parms->probability_rate)
1574 return -EINVAL;
1575 refcount_inc(&trigger_entry->ref_count);
1576 goto out;
1577 }
1578
1579 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
1580 trigger,
1581 mlxsw_sp_port,
1582 parms);
1583 if (IS_ERR(trigger_entry))
1584 err = PTR_ERR(trigger_entry);
1585
1586out:
1587 return err;
1588}
1589
1590void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
1591 enum mlxsw_sp_span_trigger trigger,
1592 struct mlxsw_sp_port *mlxsw_sp_port,
1593 const struct mlxsw_sp_span_trigger_parms *parms)
1594{
1595 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1596
1597 ASSERT_RTNL();
1598
1599 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
1600 parms->span_id)))
1601 return;
1602
1603 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1604 trigger,
1605 mlxsw_sp_port);
1606 if (WARN_ON_ONCE(!trigger_entry))
1607 return;
1608
1609 if (!refcount_dec_and_test(&trigger_entry->ref_count))
1610 return;
1611
1612 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
1613}
1614
1615int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
1616 enum mlxsw_sp_span_trigger trigger, u8 tc)
1617{
1618 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1619 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1620
1621 ASSERT_RTNL();
1622
1623 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1624 trigger,
1625 mlxsw_sp_port);
1626 if (WARN_ON_ONCE(!trigger_entry))
1627 return -EINVAL;
1628
1629 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
1630}
1631
1632void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
1633 enum mlxsw_sp_span_trigger trigger, u8 tc)
1634{
1635 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1636 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1637
1638 ASSERT_RTNL();
1639
1640 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1641 trigger,
1642 mlxsw_sp_port);
1643 if (WARN_ON_ONCE(!trigger_entry))
1644 return;
1645
1646 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
1647}
1648
1649static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
1650{
1651 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
1652
1653
1654
1655
1656 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] !=
1657 &mlxsw_sp1_span_entry_ops_cpu))
1658 return -EINVAL;
1659
1660 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
1661 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr;
1662 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
1663
1664 return 0;
1665}
1666
1667static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1668 u16 policer_id_base)
1669{
1670 return -EOPNOTSUPP;
1671}
1672
1673const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
1674 .init = mlxsw_sp1_span_init,
1675 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
1676};
1677
1678static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
1679{
1680 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr);
1681
1682
1683
1684
1685 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] !=
1686 &mlxsw_sp2_span_entry_ops_cpu))
1687 return -EINVAL;
1688
1689 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
1690 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr;
1691 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
1692
1693 return 0;
1694}
1695
1696#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1697#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1698
1699static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1700 u16 policer_id_base)
1701{
1702 char mogcr_pl[MLXSW_REG_MOGCR_LEN];
1703 int err;
1704
1705 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1706 if (err)
1707 return err;
1708
1709 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base);
1710 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1711}
1712
1713const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
1714 .init = mlxsw_sp2_span_init,
1715 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
1716};
1717
1718const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
1719 .init = mlxsw_sp2_span_init,
1720 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
1721};
1722