1
2
3
4
5
6
7
8#include <linux/list.h>
9#include <linux/ethtool.h>
10#include <linux/if_ether.h>
11#include <linux/in.h>
12#include <linux/netdevice.h>
13#include <net/dsa.h>
14#include <linux/bitmap.h>
15#include <net/flow_offload.h>
16#include <net/switchdev.h>
17#include <uapi/linux/if_bridge.h>
18
19#include "bcm_sf2.h"
20#include "bcm_sf2_regs.h"
21
22struct cfp_rule {
23 int port;
24 struct ethtool_rx_flow_spec fs;
25 struct list_head next;
26};
27
28struct cfp_udf_slice_layout {
29 u8 slices[UDFS_PER_SLICE];
30 u32 mask_value;
31 u32 base_offset;
32};
33
34struct cfp_udf_layout {
35 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
36};
37
38static const u8 zero_slice[UDFS_PER_SLICE] = { };
39
40
41static const struct cfp_udf_layout udf_tcpip4_layout = {
42 .udfs = {
43 [1] = {
44 .slices = {
45
46 CFG_UDF_EOL2 | 6,
47
48 CFG_UDF_EOL2 | 7,
49
50 CFG_UDF_EOL2 | 8,
51
52 CFG_UDF_EOL2 | 9,
53
54 CFG_UDF_EOL3 | 0,
55
56 CFG_UDF_EOL3 | 1,
57 0, 0, 0
58 },
59 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
60 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
61 },
62 },
63};
64
65
66static const struct cfp_udf_layout udf_tcpip6_layout = {
67 .udfs = {
68 [0] = {
69 .slices = {
70
71 CFG_UDF_EOL2 | 4,
72
73 CFG_UDF_EOL2 | 5,
74
75 CFG_UDF_EOL2 | 6,
76
77 CFG_UDF_EOL2 | 7,
78
79 CFG_UDF_EOL2 | 8,
80
81 CFG_UDF_EOL2 | 9,
82
83 CFG_UDF_EOL2 | 10,
84
85 CFG_UDF_EOL2 | 11,
86
87 CFG_UDF_EOL3 | 0,
88 },
89 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
90 .base_offset = CORE_UDF_0_B_0_8_PORT_0,
91 },
92 [3] = {
93 .slices = {
94
95 CFG_UDF_EOL2 | 12,
96
97 CFG_UDF_EOL2 | 13,
98
99 CFG_UDF_EOL2 | 14,
100
101 CFG_UDF_EOL2 | 15,
102
103 CFG_UDF_EOL2 | 16,
104
105 CFG_UDF_EOL2 | 17,
106
107 CFG_UDF_EOL2 | 18,
108
109 CFG_UDF_EOL2 | 19,
110
111 CFG_UDF_EOL3 | 1,
112 },
113 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
114 .base_offset = CORE_UDF_0_D_0_11_PORT_0,
115 },
116 },
117};
118
119static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
120{
121 unsigned int i, count = 0;
122
123 for (i = 0; i < UDFS_PER_SLICE; i++) {
124 if (layout[i] != 0)
125 count++;
126 }
127
128 return count;
129}
130
131static inline u32 udf_upper_bits(int num_udf)
132{
133 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
134}
135
136static inline u32 udf_lower_bits(int num_udf)
137{
138 return (u8)GENMASK(num_udf - 1, 0);
139}
140
141static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
142 unsigned int start)
143{
144 const struct cfp_udf_slice_layout *slice_layout;
145 unsigned int slice_idx;
146
147 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
148 slice_layout = &l->udfs[slice_idx];
149 if (memcmp(slice_layout->slices, zero_slice,
150 sizeof(zero_slice)))
151 break;
152 }
153
154 return slice_idx;
155}
156
157static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
158 const struct cfp_udf_layout *layout,
159 unsigned int slice_num)
160{
161 u32 offset = layout->udfs[slice_num].base_offset;
162 unsigned int i;
163
164 for (i = 0; i < UDFS_PER_SLICE; i++)
165 core_writel(priv, layout->udfs[slice_num].slices[i],
166 offset + i * 4);
167}
168
169static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
170{
171 unsigned int timeout = 1000;
172 u32 reg;
173
174 reg = core_readl(priv, CORE_CFP_ACC);
175 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
176 reg |= OP_STR_DONE | op;
177 core_writel(priv, reg, CORE_CFP_ACC);
178
179 do {
180 reg = core_readl(priv, CORE_CFP_ACC);
181 if (!(reg & OP_STR_DONE))
182 break;
183
184 cpu_relax();
185 } while (timeout--);
186
187 if (!timeout)
188 return -ETIMEDOUT;
189
190 return 0;
191}
192
193static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
194 unsigned int addr)
195{
196 u32 reg;
197
198 WARN_ON(addr >= priv->num_cfp_rules);
199
200 reg = core_readl(priv, CORE_CFP_ACC);
201 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
202 reg |= addr << XCESS_ADDR_SHIFT;
203 core_writel(priv, reg, CORE_CFP_ACC);
204}
205
206static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
207{
208
209 return priv->num_cfp_rules - 1;
210}
211
212static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
213 unsigned int rule_index,
214 int src_port,
215 unsigned int port_num,
216 unsigned int queue_num,
217 bool fwd_map_change)
218{
219 int ret;
220 u32 reg;
221
222
223
224
225 if (fwd_map_change)
226 reg = CHANGE_FWRD_MAP_IB_REP_ARL |
227 BIT(port_num + DST_MAP_IB_SHIFT) |
228 CHANGE_TC | queue_num << NEW_TC_SHIFT;
229 else
230 reg = 0;
231
232
233 if (src_port == port_num)
234 reg |= LOOP_BK_EN;
235
236 core_writel(priv, reg, CORE_ACT_POL_DATA0);
237
238
239 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
240
241 core_writel(priv, 0, CORE_ACT_POL_DATA2);
242
243
244 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
245 if (ret) {
246 pr_err("Policer entry at %d failed\n", rule_index);
247 return ret;
248 }
249
250
251 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
252
253
254 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
255 if (ret) {
256 pr_err("Meter entry at %d failed\n", rule_index);
257 return ret;
258 }
259
260 return 0;
261}
262
263static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
264 struct flow_dissector_key_ipv4_addrs *addrs,
265 struct flow_dissector_key_ports *ports,
266 const __be16 vlan_tci,
267 unsigned int slice_num, u8 num_udf,
268 bool mask)
269{
270 u32 reg, offset;
271
272
273
274
275
276 reg = udf_lower_bits(num_udf) << 24 | be16_to_cpu(vlan_tci) >> 8;
277 if (mask)
278 core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
279 else
280 core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
281
282
283
284
285
286 reg = (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
287 if (mask)
288 offset = CORE_CFP_MASK_PORT(4);
289 else
290 offset = CORE_CFP_DATA_PORT(4);
291 core_writel(priv, reg, offset);
292
293
294
295
296
297 reg = be16_to_cpu(ports->dst) >> 8;
298 if (mask)
299 offset = CORE_CFP_MASK_PORT(3);
300 else
301 offset = CORE_CFP_DATA_PORT(3);
302 core_writel(priv, reg, offset);
303
304
305
306
307
308 reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
309 (u32)be16_to_cpu(ports->src) << 8 |
310 (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
311 if (mask)
312 offset = CORE_CFP_MASK_PORT(2);
313 else
314 offset = CORE_CFP_DATA_PORT(2);
315 core_writel(priv, reg, offset);
316
317
318
319
320
321 reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
322 (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
323 (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
324 if (mask)
325 offset = CORE_CFP_MASK_PORT(1);
326 else
327 offset = CORE_CFP_DATA_PORT(1);
328 core_writel(priv, reg, offset);
329
330
331
332
333
334
335
336 reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
337 (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
338 SLICE_NUM(slice_num) | SLICE_VALID;
339 if (mask)
340 offset = CORE_CFP_MASK_PORT(0);
341 else
342 offset = CORE_CFP_DATA_PORT(0);
343 core_writel(priv, reg, offset);
344}
345
346static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
347 unsigned int port_num,
348 unsigned int queue_num,
349 struct ethtool_rx_flow_spec *fs)
350{
351 __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
352 struct ethtool_rx_flow_spec_input input = {};
353 const struct cfp_udf_layout *layout;
354 unsigned int slice_num, rule_index;
355 struct ethtool_rx_flow_rule *flow;
356 struct flow_match_ipv4_addrs ipv4;
357 struct flow_match_ports ports;
358 struct flow_match_ip ip;
359 u8 ip_proto, ip_frag;
360 u8 num_udf;
361 u32 reg;
362 int ret;
363
364 switch (fs->flow_type & ~FLOW_EXT) {
365 case TCP_V4_FLOW:
366 ip_proto = IPPROTO_TCP;
367 break;
368 case UDP_V4_FLOW:
369 ip_proto = IPPROTO_UDP;
370 break;
371 default:
372 return -EINVAL;
373 }
374
375 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
376
377
378 if (fs->flow_type & FLOW_EXT) {
379 vlan_tci = fs->h_ext.vlan_tci;
380 vlan_m_tci = fs->m_ext.vlan_tci;
381 }
382
383
384 if (fs->location == RX_CLS_LOC_ANY)
385 rule_index = find_first_zero_bit(priv->cfp.used,
386 priv->num_cfp_rules);
387 else
388 rule_index = fs->location;
389
390 if (rule_index > bcm_sf2_cfp_rule_size(priv))
391 return -ENOSPC;
392
393 input.fs = fs;
394 flow = ethtool_rx_flow_rule_create(&input);
395 if (IS_ERR(flow))
396 return PTR_ERR(flow);
397
398 flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
399 flow_rule_match_ports(flow->rule, &ports);
400 flow_rule_match_ip(flow->rule, &ip);
401
402 layout = &udf_tcpip4_layout;
403
404 slice_num = bcm_sf2_get_slice_number(layout, 0);
405 if (slice_num == UDF_NUM_SLICES) {
406 ret = -EINVAL;
407 goto out_err_flow_rule;
408 }
409
410 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
411
412
413 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
414
415
416 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
417
418
419 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435 core_writel(priv, ip.key->tos << IPTOS_SHIFT |
436 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
437 udf_upper_bits(num_udf),
438 CORE_CFP_DATA_PORT(6));
439
440
441 core_writel(priv, layout->udfs[slice_num].mask_value |
442 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
443
444
445 bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, vlan_tci,
446 slice_num, num_udf, false);
447 bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, vlan_m_tci,
448 SLICE_NUM_MASK, num_udf, true);
449
450
451 bcm_sf2_cfp_rule_addr_set(priv, rule_index);
452
453 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
454 if (ret) {
455 pr_err("TCAM entry at addr %d failed\n", rule_index);
456 goto out_err_flow_rule;
457 }
458
459
460 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
461 queue_num, true);
462 if (ret)
463 goto out_err_flow_rule;
464
465
466 reg = core_readl(priv, CORE_CFP_CTL_REG);
467 reg |= BIT(port);
468 core_writel(priv, reg, CORE_CFP_CTL_REG);
469
470
471 set_bit(rule_index, priv->cfp.used);
472 set_bit(rule_index, priv->cfp.unique);
473 fs->location = rule_index;
474
475 return 0;
476
477out_err_flow_rule:
478 ethtool_rx_flow_rule_destroy(flow);
479 return ret;
480}
481
482static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
483 const __be32 *ip6_addr, const __be16 port,
484 const __be16 vlan_tci,
485 unsigned int slice_num, u32 udf_bits,
486 bool mask)
487{
488 u32 reg, tmp, val, offset;
489
490
491
492
493
494 reg = udf_bits << 24 | be16_to_cpu(vlan_tci) >> 8;
495 if (mask)
496 core_writel(priv, reg, CORE_CFP_MASK_PORT(5));
497 else
498 core_writel(priv, reg, CORE_CFP_DATA_PORT(5));
499
500
501
502
503
504 reg = be32_to_cpu(ip6_addr[3]);
505 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
506 val |= (u32)(be16_to_cpu(vlan_tci) & 0xff) << 24;
507 if (mask)
508 offset = CORE_CFP_MASK_PORT(4);
509 else
510 offset = CORE_CFP_DATA_PORT(4);
511 core_writel(priv, val, offset);
512
513
514
515
516
517 tmp = be32_to_cpu(ip6_addr[2]);
518 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
519 ((tmp >> 8) & 0xff);
520 if (mask)
521 offset = CORE_CFP_MASK_PORT(3);
522 else
523 offset = CORE_CFP_DATA_PORT(3);
524 core_writel(priv, val, offset);
525
526
527
528
529
530 reg = be32_to_cpu(ip6_addr[1]);
531 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
532 ((reg >> 8) & 0xff);
533 if (mask)
534 offset = CORE_CFP_MASK_PORT(2);
535 else
536 offset = CORE_CFP_DATA_PORT(2);
537 core_writel(priv, val, offset);
538
539
540
541
542
543 tmp = be32_to_cpu(ip6_addr[0]);
544 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
545 ((tmp >> 8) & 0xff);
546 if (mask)
547 offset = CORE_CFP_MASK_PORT(1);
548 else
549 offset = CORE_CFP_DATA_PORT(1);
550 core_writel(priv, val, offset);
551
552
553
554
555
556
557
558 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
559 SLICE_NUM(slice_num) | SLICE_VALID;
560 if (mask)
561 offset = CORE_CFP_MASK_PORT(0);
562 else
563 offset = CORE_CFP_DATA_PORT(0);
564 core_writel(priv, reg, offset);
565}
566
567static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
568 int port, u32 location)
569{
570 struct cfp_rule *rule = NULL;
571
572 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
573 if (rule->port == port && rule->fs.location == location)
574 break;
575 }
576
577 return rule;
578}
579
580static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
581 struct ethtool_rx_flow_spec *fs)
582{
583 struct cfp_rule *rule = NULL;
584 size_t fs_size = 0;
585 int ret = 1;
586
587 if (list_empty(&priv->cfp.rules_list))
588 return ret;
589
590 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
591 ret = 1;
592 if (rule->port != port)
593 continue;
594
595 if (rule->fs.flow_type != fs->flow_type ||
596 rule->fs.ring_cookie != fs->ring_cookie ||
597 rule->fs.h_ext.data[0] != fs->h_ext.data[0])
598 continue;
599
600 switch (fs->flow_type & ~FLOW_EXT) {
601 case TCP_V6_FLOW:
602 case UDP_V6_FLOW:
603 fs_size = sizeof(struct ethtool_tcpip6_spec);
604 break;
605 case TCP_V4_FLOW:
606 case UDP_V4_FLOW:
607 fs_size = sizeof(struct ethtool_tcpip4_spec);
608 break;
609 default:
610 continue;
611 }
612
613 ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
614 ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
615
616 if (rule->fs.flow_type & FLOW_EXT) {
617 ret |= rule->fs.h_ext.vlan_tci != fs->h_ext.vlan_tci;
618 ret |= rule->fs.m_ext.vlan_tci != fs->m_ext.vlan_tci;
619 }
620 if (ret == 0)
621 break;
622 }
623
624 return ret;
625}
626
627static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
628 unsigned int port_num,
629 unsigned int queue_num,
630 struct ethtool_rx_flow_spec *fs)
631{
632 __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
633 struct ethtool_rx_flow_spec_input input = {};
634 unsigned int slice_num, rule_index[2];
635 const struct cfp_udf_layout *layout;
636 struct ethtool_rx_flow_rule *flow;
637 struct flow_match_ipv6_addrs ipv6;
638 struct flow_match_ports ports;
639 u8 ip_proto, ip_frag;
640 int ret = 0;
641 u8 num_udf;
642 u32 reg;
643
644 switch (fs->flow_type & ~FLOW_EXT) {
645 case TCP_V6_FLOW:
646 ip_proto = IPPROTO_TCP;
647 break;
648 case UDP_V6_FLOW:
649 ip_proto = IPPROTO_UDP;
650 break;
651 default:
652 return -EINVAL;
653 }
654
655 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
656
657
658 if (fs->flow_type & FLOW_EXT) {
659 vlan_tci = fs->h_ext.vlan_tci;
660 vlan_m_tci = fs->m_ext.vlan_tci;
661 }
662
663 layout = &udf_tcpip6_layout;
664 slice_num = bcm_sf2_get_slice_number(layout, 0);
665 if (slice_num == UDF_NUM_SLICES)
666 return -EINVAL;
667
668 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
669
670
671
672
673
674
675
676
677
678
679
680
681
682 if (fs->location == RX_CLS_LOC_ANY)
683 rule_index[1] = find_first_zero_bit(priv->cfp.used,
684 priv->num_cfp_rules);
685 else
686 rule_index[1] = fs->location;
687 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
688 return -ENOSPC;
689
690
691
692
693 set_bit(rule_index[1], priv->cfp.used);
694
695 rule_index[0] = find_first_zero_bit(priv->cfp.used,
696 priv->num_cfp_rules);
697 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
698 ret = -ENOSPC;
699 goto out_err;
700 }
701
702 input.fs = fs;
703 flow = ethtool_rx_flow_rule_create(&input);
704 if (IS_ERR(flow)) {
705 ret = PTR_ERR(flow);
706 goto out_err;
707 }
708 flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
709 flow_rule_match_ports(flow->rule, &ports);
710
711
712 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
713
714
715 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
716
717
718 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
735 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
736 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
737
738
739
740
741 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
742 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
743
744
745 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
746 ports.key->src, vlan_tci, slice_num,
747 udf_lower_bits(num_udf), false);
748 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
749 ports.mask->src, vlan_m_tci, SLICE_NUM_MASK,
750 udf_lower_bits(num_udf), true);
751
752
753 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
754
755 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
756 if (ret) {
757 pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
758 goto out_err_flow_rule;
759 }
760
761
762 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
763 queue_num, false);
764 if (ret)
765 goto out_err_flow_rule;
766
767
768 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
769 if (slice_num == UDF_NUM_SLICES) {
770 ret = -EINVAL;
771 goto out_err_flow_rule;
772 }
773
774 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
775
776
777 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
778
779
780
781
782 core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
783 core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
784
785
786
787
788
789
790
791
792 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
793 udf_lower_bits(num_udf) << 8;
794 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
795
796
797 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
798 udf_lower_bits(num_udf) << 8;
799 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
800
801 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
802 ports.key->dst, 0, slice_num,
803 0, false);
804 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
805 ports.key->dst, 0, SLICE_NUM_MASK,
806 0, true);
807
808
809 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
810
811 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
812 if (ret) {
813 pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
814 goto out_err_flow_rule;
815 }
816
817
818
819
820 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
821 queue_num, true);
822 if (ret)
823 goto out_err_flow_rule;
824
825
826 reg = core_readl(priv, CORE_CFP_CTL_REG);
827 reg |= BIT(port);
828 core_writel(priv, reg, CORE_CFP_CTL_REG);
829
830
831
832
833 set_bit(rule_index[0], priv->cfp.used);
834 set_bit(rule_index[1], priv->cfp.unique);
835 fs->location = rule_index[1];
836
837 return ret;
838
839out_err_flow_rule:
840 ethtool_rx_flow_rule_destroy(flow);
841out_err:
842 clear_bit(rule_index[1], priv->cfp.used);
843 return ret;
844}
845
846static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
847 struct ethtool_rx_flow_spec *fs)
848{
849 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
850 s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
851 __u64 ring_cookie = fs->ring_cookie;
852 struct switchdev_obj_port_vlan vlan;
853 unsigned int queue_num, port_num;
854 u16 vid;
855 int ret;
856
857
858
859
860 if (ring_cookie == RX_CLS_FLOW_WAKE)
861 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
862
863
864
865
866
867 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
868
869 if (ring_cookie == RX_CLS_FLOW_DISC ||
870 !(dsa_is_user_port(ds, port_num) ||
871 dsa_is_cpu_port(ds, port_num)) ||
872 port_num >= priv->hw_params.num_ports)
873 return -EINVAL;
874
875
876
877
878
879
880
881 if (fs->flow_type & FLOW_EXT) {
882
883 if ((be16_to_cpu(fs->m_ext.vlan_tci) & VLAN_VID_MASK) !=
884 VLAN_VID_MASK)
885 return -EINVAL;
886
887 vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
888 vlan.vid = vid;
889 if (be32_to_cpu(fs->h_ext.data[1]) & 1)
890 vlan.flags = BRIDGE_VLAN_INFO_UNTAGGED;
891 else
892 vlan.flags = 0;
893
894 ret = ds->ops->port_vlan_add(ds, port_num, &vlan, NULL);
895 if (ret)
896 return ret;
897 }
898
899
900
901
902
903 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
904 if (port_num >= 7)
905 port_num -= 1;
906
907 switch (fs->flow_type & ~FLOW_EXT) {
908 case TCP_V4_FLOW:
909 case UDP_V4_FLOW:
910 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
911 queue_num, fs);
912 break;
913 case TCP_V6_FLOW:
914 case UDP_V6_FLOW:
915 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
916 queue_num, fs);
917 break;
918 default:
919 ret = -EINVAL;
920 break;
921 }
922
923 return ret;
924}
925
926static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
927 struct ethtool_rx_flow_spec *fs)
928{
929 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
930 struct cfp_rule *rule = NULL;
931 int ret = -EINVAL;
932
933
934 if (fs->flow_type & FLOW_MAC_EXT)
935 return -EINVAL;
936
937 if (fs->location != RX_CLS_LOC_ANY &&
938 fs->location > bcm_sf2_cfp_rule_size(priv))
939 return -EINVAL;
940
941 if ((fs->flow_type & FLOW_EXT) &&
942 !(ds->ops->port_vlan_add || ds->ops->port_vlan_del))
943 return -EOPNOTSUPP;
944
945 if (fs->location != RX_CLS_LOC_ANY &&
946 test_bit(fs->location, priv->cfp.used))
947 return -EBUSY;
948
949 ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
950 if (ret == 0)
951 return -EEXIST;
952
953 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
954 if (!rule)
955 return -ENOMEM;
956
957 ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
958 if (ret) {
959 kfree(rule);
960 return ret;
961 }
962
963 rule->port = port;
964 memcpy(&rule->fs, fs, sizeof(*fs));
965 list_add_tail(&rule->next, &priv->cfp.rules_list);
966
967 return ret;
968}
969
970static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
971 u32 loc, u32 *next_loc)
972{
973 int ret;
974 u32 reg;
975
976
977 bcm_sf2_cfp_rule_addr_set(priv, loc);
978
979 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
980 if (ret)
981 return ret;
982
983
984
985
986
987 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
988 if (next_loc)
989 *next_loc = (reg >> 24) & CHAIN_ID_MASK;
990
991
992 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
993 reg &= ~SLICE_VALID;
994 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
995
996
997 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
998 if (ret)
999 return ret;
1000
1001 clear_bit(loc, priv->cfp.used);
1002 clear_bit(loc, priv->cfp.unique);
1003
1004 return 0;
1005}
1006
1007static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
1008 u32 loc)
1009{
1010 u32 next_loc = 0;
1011 int ret;
1012
1013 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
1014 if (ret)
1015 return ret;
1016
1017
1018 if (next_loc)
1019 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
1020
1021 return ret;
1022}
1023
1024static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
1025{
1026 struct cfp_rule *rule;
1027 int ret;
1028
1029 if (loc > bcm_sf2_cfp_rule_size(priv))
1030 return -EINVAL;
1031
1032
1033
1034
1035
1036 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
1037 return -EINVAL;
1038
1039 rule = bcm_sf2_cfp_rule_find(priv, port, loc);
1040 if (!rule)
1041 return -EINVAL;
1042
1043 ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
1044
1045 list_del(&rule->next);
1046 kfree(rule);
1047
1048 return ret;
1049}
1050
1051static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
1052{
1053 unsigned int i;
1054
1055 for (i = 0; i < sizeof(flow->m_u); i++)
1056 flow->m_u.hdata[i] ^= 0xff;
1057
1058 flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1059 flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1060 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1061 flow->m_ext.data[1] ^= cpu_to_be32(~0);
1062}
1063
1064static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1065 struct ethtool_rxnfc *nfc)
1066{
1067 struct cfp_rule *rule;
1068
1069 rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1070 if (!rule)
1071 return -EINVAL;
1072
1073 memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1074
1075 bcm_sf2_invert_masks(&nfc->fs);
1076
1077
1078 nfc->data = bcm_sf2_cfp_rule_size(priv);
1079
1080 return 0;
1081}
1082
1083
1084static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1085 int port, struct ethtool_rxnfc *nfc,
1086 u32 *rule_locs)
1087{
1088 unsigned int index = 1, rules_cnt = 0;
1089
1090 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1091 rule_locs[rules_cnt] = index;
1092 rules_cnt++;
1093 }
1094
1095
1096 nfc->data = bcm_sf2_cfp_rule_size(priv);
1097 nfc->rule_cnt = rules_cnt;
1098
1099 return 0;
1100}
1101
1102int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1103 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1104{
1105 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1106 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1107 int ret = 0;
1108
1109 mutex_lock(&priv->cfp.lock);
1110
1111 switch (nfc->cmd) {
1112 case ETHTOOL_GRXCLSRLCNT:
1113
1114 nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1115 priv->num_cfp_rules) - 1;
1116
1117 nfc->data |= RX_CLS_LOC_SPECIAL;
1118 break;
1119 case ETHTOOL_GRXCLSRULE:
1120 ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1121 break;
1122 case ETHTOOL_GRXCLSRLALL:
1123 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1124 break;
1125 default:
1126 ret = -EOPNOTSUPP;
1127 break;
1128 }
1129
1130 mutex_unlock(&priv->cfp.lock);
1131
1132 if (ret)
1133 return ret;
1134
1135
1136 if (p->ethtool_ops->get_rxnfc) {
1137 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1138 if (ret == -EOPNOTSUPP)
1139 ret = 0;
1140 }
1141
1142 return ret;
1143}
1144
1145int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1146 struct ethtool_rxnfc *nfc)
1147{
1148 struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
1149 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1150 int ret = 0;
1151
1152 mutex_lock(&priv->cfp.lock);
1153
1154 switch (nfc->cmd) {
1155 case ETHTOOL_SRXCLSRLINS:
1156 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1157 break;
1158
1159 case ETHTOOL_SRXCLSRLDEL:
1160 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1161 break;
1162 default:
1163 ret = -EOPNOTSUPP;
1164 break;
1165 }
1166
1167 mutex_unlock(&priv->cfp.lock);
1168
1169 if (ret)
1170 return ret;
1171
1172
1173
1174
1175 if (p->ethtool_ops->set_rxnfc) {
1176 ret = p->ethtool_ops->set_rxnfc(p, nfc);
1177 if (ret && ret != -EOPNOTSUPP) {
1178 mutex_lock(&priv->cfp.lock);
1179 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1180 mutex_unlock(&priv->cfp.lock);
1181 } else {
1182 ret = 0;
1183 }
1184 }
1185
1186 return ret;
1187}
1188
1189int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1190{
1191 unsigned int timeout = 1000;
1192 u32 reg;
1193
1194 reg = core_readl(priv, CORE_CFP_ACC);
1195 reg |= TCAM_RESET;
1196 core_writel(priv, reg, CORE_CFP_ACC);
1197
1198 do {
1199 reg = core_readl(priv, CORE_CFP_ACC);
1200 if (!(reg & TCAM_RESET))
1201 break;
1202
1203 cpu_relax();
1204 } while (timeout--);
1205
1206 if (!timeout)
1207 return -ETIMEDOUT;
1208
1209 return 0;
1210}
1211
1212void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1213{
1214 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1215 struct cfp_rule *rule, *n;
1216
1217 if (list_empty(&priv->cfp.rules_list))
1218 return;
1219
1220 list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1221 bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1222}
1223
1224int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1225{
1226 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1227 struct cfp_rule *rule;
1228 int ret = 0;
1229 u32 reg;
1230
1231 if (list_empty(&priv->cfp.rules_list))
1232 return ret;
1233
1234 reg = core_readl(priv, CORE_CFP_CTL_REG);
1235 reg &= ~CFP_EN_MAP_MASK;
1236 core_writel(priv, reg, CORE_CFP_CTL_REG);
1237
1238 ret = bcm_sf2_cfp_rst(priv);
1239 if (ret)
1240 return ret;
1241
1242 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1243 ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1244 rule->fs.location);
1245 if (ret) {
1246 dev_err(ds->dev, "failed to remove rule\n");
1247 return ret;
1248 }
1249
1250 ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1251 if (ret) {
1252 dev_err(ds->dev, "failed to restore rule\n");
1253 return ret;
1254 }
1255 }
1256
1257 return ret;
1258}
1259
1260static const struct bcm_sf2_cfp_stat {
1261 unsigned int offset;
1262 unsigned int ram_loc;
1263 const char *name;
1264} bcm_sf2_cfp_stats[] = {
1265 {
1266 .offset = CORE_STAT_GREEN_CNTR,
1267 .ram_loc = GREEN_STAT_RAM,
1268 .name = "Green"
1269 },
1270 {
1271 .offset = CORE_STAT_YELLOW_CNTR,
1272 .ram_loc = YELLOW_STAT_RAM,
1273 .name = "Yellow"
1274 },
1275 {
1276 .offset = CORE_STAT_RED_CNTR,
1277 .ram_loc = RED_STAT_RAM,
1278 .name = "Red"
1279 },
1280};
1281
1282void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1283 u32 stringset, uint8_t *data)
1284{
1285 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1286 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1287 char buf[ETH_GSTRING_LEN];
1288 unsigned int i, j, iter;
1289
1290 if (stringset != ETH_SS_STATS)
1291 return;
1292
1293 for (i = 1; i < priv->num_cfp_rules; i++) {
1294 for (j = 0; j < s; j++) {
1295 snprintf(buf, sizeof(buf),
1296 "CFP%03d_%sCntr",
1297 i, bcm_sf2_cfp_stats[j].name);
1298 iter = (i - 1) * s + j;
1299 strlcpy(data + iter * ETH_GSTRING_LEN,
1300 buf, ETH_GSTRING_LEN);
1301 }
1302 }
1303}
1304
1305void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1306 uint64_t *data)
1307{
1308 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1309 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1310 const struct bcm_sf2_cfp_stat *stat;
1311 unsigned int i, j, iter;
1312 struct cfp_rule *rule;
1313 int ret;
1314
1315 mutex_lock(&priv->cfp.lock);
1316 for (i = 1; i < priv->num_cfp_rules; i++) {
1317 rule = bcm_sf2_cfp_rule_find(priv, port, i);
1318 if (!rule)
1319 continue;
1320
1321 for (j = 0; j < s; j++) {
1322 stat = &bcm_sf2_cfp_stats[j];
1323
1324 bcm_sf2_cfp_rule_addr_set(priv, i);
1325 ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1326 if (ret)
1327 continue;
1328
1329 iter = (i - 1) * s + j;
1330 data[iter] = core_readl(priv, stat->offset);
1331 }
1332
1333 }
1334 mutex_unlock(&priv->cfp.lock);
1335}
1336
1337int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1338{
1339 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1340
1341 if (sset != ETH_SS_STATS)
1342 return 0;
1343
1344
1345 return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
1346}
1347