1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/phy.h>
11#include <linux/netdevice.h>
12#include <linux/bitfield.h>
13#include <linux/regmap.h>
14#include <net/dsa.h>
15#include <linux/of_net.h>
16#include <linux/of_mdio.h>
17#include <linux/of_platform.h>
18#include <linux/if_bridge.h>
19#include <linux/mdio.h>
20#include <linux/phylink.h>
21#include <linux/gpio/consumer.h>
22#include <linux/etherdevice.h>
23#include <linux/dsa/tag_qca.h>
24
25#include "qca8k.h"
26
27#define MIB_DESC(_s, _o, _n) \
28 { \
29 .size = (_s), \
30 .offset = (_o), \
31 .name = (_n), \
32 }
33
34static const struct qca8k_mib_desc ar8327_mib[] = {
35 MIB_DESC(1, 0x00, "RxBroad"),
36 MIB_DESC(1, 0x04, "RxPause"),
37 MIB_DESC(1, 0x08, "RxMulti"),
38 MIB_DESC(1, 0x0c, "RxFcsErr"),
39 MIB_DESC(1, 0x10, "RxAlignErr"),
40 MIB_DESC(1, 0x14, "RxRunt"),
41 MIB_DESC(1, 0x18, "RxFragment"),
42 MIB_DESC(1, 0x1c, "Rx64Byte"),
43 MIB_DESC(1, 0x20, "Rx128Byte"),
44 MIB_DESC(1, 0x24, "Rx256Byte"),
45 MIB_DESC(1, 0x28, "Rx512Byte"),
46 MIB_DESC(1, 0x2c, "Rx1024Byte"),
47 MIB_DESC(1, 0x30, "Rx1518Byte"),
48 MIB_DESC(1, 0x34, "RxMaxByte"),
49 MIB_DESC(1, 0x38, "RxTooLong"),
50 MIB_DESC(2, 0x3c, "RxGoodByte"),
51 MIB_DESC(2, 0x44, "RxBadByte"),
52 MIB_DESC(1, 0x4c, "RxOverFlow"),
53 MIB_DESC(1, 0x50, "Filtered"),
54 MIB_DESC(1, 0x54, "TxBroad"),
55 MIB_DESC(1, 0x58, "TxPause"),
56 MIB_DESC(1, 0x5c, "TxMulti"),
57 MIB_DESC(1, 0x60, "TxUnderRun"),
58 MIB_DESC(1, 0x64, "Tx64Byte"),
59 MIB_DESC(1, 0x68, "Tx128Byte"),
60 MIB_DESC(1, 0x6c, "Tx256Byte"),
61 MIB_DESC(1, 0x70, "Tx512Byte"),
62 MIB_DESC(1, 0x74, "Tx1024Byte"),
63 MIB_DESC(1, 0x78, "Tx1518Byte"),
64 MIB_DESC(1, 0x7c, "TxMaxByte"),
65 MIB_DESC(1, 0x80, "TxOverSize"),
66 MIB_DESC(2, 0x84, "TxByte"),
67 MIB_DESC(1, 0x8c, "TxCollision"),
68 MIB_DESC(1, 0x90, "TxAbortCol"),
69 MIB_DESC(1, 0x94, "TxMultiCol"),
70 MIB_DESC(1, 0x98, "TxSingleCol"),
71 MIB_DESC(1, 0x9c, "TxExcDefer"),
72 MIB_DESC(1, 0xa0, "TxDefer"),
73 MIB_DESC(1, 0xa4, "TxLateCol"),
74 MIB_DESC(1, 0xa8, "RXUnicast"),
75 MIB_DESC(1, 0xac, "TXUnicast"),
76};
77
78static void
79qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
80{
81 regaddr >>= 1;
82 *r1 = regaddr & 0x1e;
83
84 regaddr >>= 5;
85 *r2 = regaddr & 0x7;
86
87 regaddr >>= 3;
88 *page = regaddr & 0x3ff;
89}
90
91static int
92qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
93{
94 u16 *cached_lo = &priv->mdio_cache.lo;
95 struct mii_bus *bus = priv->bus;
96 int ret;
97
98 if (lo == *cached_lo)
99 return 0;
100
101 ret = bus->write(bus, phy_id, regnum, lo);
102 if (ret < 0)
103 dev_err_ratelimited(&bus->dev,
104 "failed to write qca8k 32bit lo register\n");
105
106 *cached_lo = lo;
107 return 0;
108}
109
110static int
111qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
112{
113 u16 *cached_hi = &priv->mdio_cache.hi;
114 struct mii_bus *bus = priv->bus;
115 int ret;
116
117 if (hi == *cached_hi)
118 return 0;
119
120 ret = bus->write(bus, phy_id, regnum, hi);
121 if (ret < 0)
122 dev_err_ratelimited(&bus->dev,
123 "failed to write qca8k 32bit hi register\n");
124
125 *cached_hi = hi;
126 return 0;
127}
128
129static int
130qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
131{
132 int ret;
133
134 ret = bus->read(bus, phy_id, regnum);
135 if (ret >= 0) {
136 *val = ret;
137 ret = bus->read(bus, phy_id, regnum + 1);
138 *val |= ret << 16;
139 }
140
141 if (ret < 0) {
142 dev_err_ratelimited(&bus->dev,
143 "failed to read qca8k 32bit register\n");
144 *val = 0;
145 return ret;
146 }
147
148 return 0;
149}
150
151static void
152qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
153{
154 u16 lo, hi;
155 int ret;
156
157 lo = val & 0xffff;
158 hi = (u16)(val >> 16);
159
160 ret = qca8k_set_lo(priv, phy_id, regnum, lo);
161 if (ret >= 0)
162 ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
163}
164
165static int
166qca8k_set_page(struct qca8k_priv *priv, u16 page)
167{
168 u16 *cached_page = &priv->mdio_cache.page;
169 struct mii_bus *bus = priv->bus;
170 int ret;
171
172 if (page == *cached_page)
173 return 0;
174
175 ret = bus->write(bus, 0x18, 0, page);
176 if (ret < 0) {
177 dev_err_ratelimited(&bus->dev,
178 "failed to set qca8k page\n");
179 return ret;
180 }
181
182 *cached_page = page;
183 usleep_range(1000, 2000);
184 return 0;
185}
186
187static int
188qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
189{
190 return regmap_read(priv->regmap, reg, val);
191}
192
193static int
194qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
195{
196 return regmap_write(priv->regmap, reg, val);
197}
198
199static int
200qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
201{
202 return regmap_update_bits(priv->regmap, reg, mask, write_val);
203}
204
205static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
206{
207 struct qca8k_mgmt_eth_data *mgmt_eth_data;
208 struct qca8k_priv *priv = ds->priv;
209 struct qca_mgmt_ethhdr *mgmt_ethhdr;
210 u8 len, cmd;
211
212 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
213 mgmt_eth_data = &priv->mgmt_eth_data;
214
215 cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command);
216 len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command);
217
218
219 if (mgmt_ethhdr->seq == mgmt_eth_data->seq)
220 mgmt_eth_data->ack = true;
221
222 if (cmd == MDIO_READ) {
223 mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data;
224
225
226
227
228 if (len > QCA_HDR_MGMT_DATA1_LEN)
229 memcpy(mgmt_eth_data->data + 1, skb->data,
230 QCA_HDR_MGMT_DATA2_LEN);
231 }
232
233 complete(&mgmt_eth_data->rw_done);
234}
235
236static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
237 int priority, unsigned int len)
238{
239 struct qca_mgmt_ethhdr *mgmt_ethhdr;
240 unsigned int real_len;
241 struct sk_buff *skb;
242 u32 *data2;
243 u16 hdr;
244
245 skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
246 if (!skb)
247 return NULL;
248
249
250
251
252
253
254
255
256 if (len == 16)
257 real_len = 15;
258 else
259 real_len = len;
260
261 skb_reset_mac_header(skb);
262 skb_set_network_header(skb, skb->len);
263
264 mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
265
266 hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
267 hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
268 hdr |= QCA_HDR_XMIT_FROM_CPU;
269 hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
270 hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
271
272 mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
273 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
274 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
275 mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
276 QCA_HDR_MGMT_CHECK_CODE_VAL);
277
278 if (cmd == MDIO_WRITE)
279 mgmt_ethhdr->mdio_data = *val;
280
281 mgmt_ethhdr->hdr = htons(hdr);
282
283 data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
284 if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN)
285 memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN);
286
287 return skb;
288}
289
290static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
291{
292 struct qca_mgmt_ethhdr *mgmt_ethhdr;
293
294 mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
295 mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
296}
297
298static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
299{
300 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
301 struct sk_buff *skb;
302 bool ack;
303 int ret;
304
305 skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
306 QCA8K_ETHERNET_MDIO_PRIORITY, len);
307 if (!skb)
308 return -ENOMEM;
309
310 mutex_lock(&mgmt_eth_data->mutex);
311
312
313 if (!priv->mgmt_master) {
314 kfree_skb(skb);
315 mutex_unlock(&mgmt_eth_data->mutex);
316 return -EINVAL;
317 }
318
319 skb->dev = priv->mgmt_master;
320
321 reinit_completion(&mgmt_eth_data->rw_done);
322
323
324 mgmt_eth_data->seq++;
325 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
326 mgmt_eth_data->ack = false;
327
328 dev_queue_xmit(skb);
329
330 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
331 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
332
333 *val = mgmt_eth_data->data[0];
334 if (len > QCA_HDR_MGMT_DATA1_LEN)
335 memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
336
337 ack = mgmt_eth_data->ack;
338
339 mutex_unlock(&mgmt_eth_data->mutex);
340
341 if (ret <= 0)
342 return -ETIMEDOUT;
343
344 if (!ack)
345 return -EINVAL;
346
347 return 0;
348}
349
350static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
351{
352 struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
353 struct sk_buff *skb;
354 bool ack;
355 int ret;
356
357 skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
358 QCA8K_ETHERNET_MDIO_PRIORITY, len);
359 if (!skb)
360 return -ENOMEM;
361
362 mutex_lock(&mgmt_eth_data->mutex);
363
364
365 if (!priv->mgmt_master) {
366 kfree_skb(skb);
367 mutex_unlock(&mgmt_eth_data->mutex);
368 return -EINVAL;
369 }
370
371 skb->dev = priv->mgmt_master;
372
373 reinit_completion(&mgmt_eth_data->rw_done);
374
375
376 mgmt_eth_data->seq++;
377 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
378 mgmt_eth_data->ack = false;
379
380 dev_queue_xmit(skb);
381
382 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
383 msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
384
385 ack = mgmt_eth_data->ack;
386
387 mutex_unlock(&mgmt_eth_data->mutex);
388
389 if (ret <= 0)
390 return -ETIMEDOUT;
391
392 if (!ack)
393 return -EINVAL;
394
395 return 0;
396}
397
398static int
399qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
400{
401 u32 val = 0;
402 int ret;
403
404 ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
405 if (ret)
406 return ret;
407
408 val &= ~mask;
409 val |= write_val;
410
411 return qca8k_write_eth(priv, reg, &val, sizeof(val));
412}
413
414static int
415qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
416{
417 int i, count = len / sizeof(u32), ret;
418
419 if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
420 return 0;
421
422 for (i = 0; i < count; i++) {
423 ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
424 if (ret < 0)
425 return ret;
426 }
427
428 return 0;
429}
430
431static int
432qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
433{
434 int i, count = len / sizeof(u32), ret;
435 u32 tmp;
436
437 if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
438 return 0;
439
440 for (i = 0; i < count; i++) {
441 tmp = val[i];
442
443 ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
444 if (ret < 0)
445 return ret;
446 }
447
448 return 0;
449}
450
451static int
452qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
453{
454 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
455 struct mii_bus *bus = priv->bus;
456 u16 r1, r2, page;
457 int ret;
458
459 if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
460 return 0;
461
462 qca8k_split_addr(reg, &r1, &r2, &page);
463
464 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
465
466 ret = qca8k_set_page(priv, page);
467 if (ret < 0)
468 goto exit;
469
470 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
471
472exit:
473 mutex_unlock(&bus->mdio_lock);
474 return ret;
475}
476
477static int
478qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
479{
480 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
481 struct mii_bus *bus = priv->bus;
482 u16 r1, r2, page;
483 int ret;
484
485 if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
486 return 0;
487
488 qca8k_split_addr(reg, &r1, &r2, &page);
489
490 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
491
492 ret = qca8k_set_page(priv, page);
493 if (ret < 0)
494 goto exit;
495
496 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
497
498exit:
499 mutex_unlock(&bus->mdio_lock);
500 return ret;
501}
502
503static int
504qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
505{
506 struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
507 struct mii_bus *bus = priv->bus;
508 u16 r1, r2, page;
509 u32 val;
510 int ret;
511
512 if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
513 return 0;
514
515 qca8k_split_addr(reg, &r1, &r2, &page);
516
517 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
518
519 ret = qca8k_set_page(priv, page);
520 if (ret < 0)
521 goto exit;
522
523 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
524 if (ret < 0)
525 goto exit;
526
527 val &= ~mask;
528 val |= write_val;
529 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
530
531exit:
532 mutex_unlock(&bus->mdio_lock);
533
534 return ret;
535}
536
537static const struct regmap_range qca8k_readable_ranges[] = {
538 regmap_reg_range(0x0000, 0x00e4),
539 regmap_reg_range(0x0100, 0x0168),
540 regmap_reg_range(0x0200, 0x0270),
541 regmap_reg_range(0x0400, 0x0454),
542 regmap_reg_range(0x0600, 0x0718),
543 regmap_reg_range(0x0800, 0x0b70),
544 regmap_reg_range(0x0c00, 0x0c80),
545 regmap_reg_range(0x0e00, 0x0e98),
546 regmap_reg_range(0x1000, 0x10ac),
547 regmap_reg_range(0x1100, 0x11ac),
548 regmap_reg_range(0x1200, 0x12ac),
549 regmap_reg_range(0x1300, 0x13ac),
550 regmap_reg_range(0x1400, 0x14ac),
551 regmap_reg_range(0x1500, 0x15ac),
552 regmap_reg_range(0x1600, 0x16ac),
553
554};
555
556static const struct regmap_access_table qca8k_readable_table = {
557 .yes_ranges = qca8k_readable_ranges,
558 .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
559};
560
561static struct regmap_config qca8k_regmap_config = {
562 .reg_bits = 16,
563 .val_bits = 32,
564 .reg_stride = 4,
565 .max_register = 0x16ac,
566 .reg_read = qca8k_regmap_read,
567 .reg_write = qca8k_regmap_write,
568 .reg_update_bits = qca8k_regmap_update_bits,
569 .rd_table = &qca8k_readable_table,
570 .disable_locking = true,
571 .cache_type = REGCACHE_NONE,
572};
573
574static int
575qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
576{
577 u32 val;
578
579 return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
580 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
581}
582
583static int
584qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
585{
586 u32 reg[3];
587 int ret;
588
589
590 ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
591 if (ret)
592 return ret;
593
594
595 fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
596
597 fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
598
599 fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
600
601 fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
602 fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
603 fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
604 fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
605 fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
606 fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
607
608 return 0;
609}
610
611static void
612qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
613 u8 aging)
614{
615 u32 reg[3] = { 0 };
616
617
618 reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
619
620 reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
621
622 reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
623
624 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
625 reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
626 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
627 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
628 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
629 reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
630
631
632 qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
633}
634
635static int
636qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
637{
638 u32 reg;
639 int ret;
640
641
642 reg = QCA8K_ATU_FUNC_BUSY;
643 reg |= cmd;
644 if (port >= 0) {
645 reg |= QCA8K_ATU_FUNC_PORT_EN;
646 reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
647 }
648
649
650 ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
651 if (ret)
652 return ret;
653
654
655 ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
656 if (ret)
657 return ret;
658
659
660 if (cmd == QCA8K_FDB_LOAD) {
661 ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, ®);
662 if (ret < 0)
663 return ret;
664 if (reg & QCA8K_ATU_FUNC_FULL)
665 return -1;
666 }
667
668 return 0;
669}
670
671static int
672qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
673{
674 int ret;
675
676 qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
677 ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
678 if (ret < 0)
679 return ret;
680
681 return qca8k_fdb_read(priv, fdb);
682}
683
684static int
685qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
686 u16 vid, u8 aging)
687{
688 int ret;
689
690 mutex_lock(&priv->reg_mutex);
691 qca8k_fdb_write(priv, vid, port_mask, mac, aging);
692 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
693 mutex_unlock(&priv->reg_mutex);
694
695 return ret;
696}
697
698static int
699qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
700{
701 int ret;
702
703 mutex_lock(&priv->reg_mutex);
704 qca8k_fdb_write(priv, vid, port_mask, mac, 0);
705 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
706 mutex_unlock(&priv->reg_mutex);
707
708 return ret;
709}
710
711static void
712qca8k_fdb_flush(struct qca8k_priv *priv)
713{
714 mutex_lock(&priv->reg_mutex);
715 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
716 mutex_unlock(&priv->reg_mutex);
717}
718
719static int
720qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
721 const u8 *mac, u16 vid)
722{
723 struct qca8k_fdb fdb = { 0 };
724 int ret;
725
726 mutex_lock(&priv->reg_mutex);
727
728 qca8k_fdb_write(priv, vid, 0, mac, 0);
729 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
730 if (ret < 0)
731 goto exit;
732
733 ret = qca8k_fdb_read(priv, &fdb);
734 if (ret < 0)
735 goto exit;
736
737
738 if (!fdb.aging) {
739 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
740 if (ret)
741 goto exit;
742 }
743
744
745 fdb.port_mask |= port_mask;
746
747 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
748 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
749
750exit:
751 mutex_unlock(&priv->reg_mutex);
752 return ret;
753}
754
755static int
756qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
757 const u8 *mac, u16 vid)
758{
759 struct qca8k_fdb fdb = { 0 };
760 int ret;
761
762 mutex_lock(&priv->reg_mutex);
763
764 qca8k_fdb_write(priv, vid, 0, mac, 0);
765 ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
766 if (ret < 0)
767 goto exit;
768
769
770 if (!fdb.aging) {
771 ret = -EINVAL;
772 goto exit;
773 }
774
775 ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
776 if (ret)
777 goto exit;
778
779
780 if (fdb.port_mask == port_mask)
781 goto exit;
782
783
784 fdb.port_mask &= ~port_mask;
785
786 qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
787 ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
788
789exit:
790 mutex_unlock(&priv->reg_mutex);
791 return ret;
792}
793
794static int
795qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
796{
797 u32 reg;
798 int ret;
799
800
801 reg = QCA8K_VTU_FUNC1_BUSY;
802 reg |= cmd;
803 reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
804
805
806 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
807 if (ret)
808 return ret;
809
810
811 ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
812 if (ret)
813 return ret;
814
815
816 if (cmd == QCA8K_VLAN_LOAD) {
817 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, ®);
818 if (ret < 0)
819 return ret;
820 if (reg & QCA8K_VTU_FUNC1_FULL)
821 return -ENOMEM;
822 }
823
824 return 0;
825}
826
827static int
828qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
829{
830 u32 reg;
831 int ret;
832
833
834
835
836
837 if (vid == 0)
838 return 0;
839
840 mutex_lock(&priv->reg_mutex);
841 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
842 if (ret < 0)
843 goto out;
844
845 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
846 if (ret < 0)
847 goto out;
848 reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
849 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
850 if (untagged)
851 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
852 else
853 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
854
855 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
856 if (ret)
857 goto out;
858 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
859
860out:
861 mutex_unlock(&priv->reg_mutex);
862
863 return ret;
864}
865
866static int
867qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
868{
869 u32 reg, mask;
870 int ret, i;
871 bool del;
872
873 mutex_lock(&priv->reg_mutex);
874 ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
875 if (ret < 0)
876 goto out;
877
878 ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, ®);
879 if (ret < 0)
880 goto out;
881 reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
882 reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
883
884
885 del = true;
886 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
887 mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
888
889 if ((reg & mask) != mask) {
890 del = false;
891 break;
892 }
893 }
894
895 if (del) {
896 ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
897 } else {
898 ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
899 if (ret)
900 goto out;
901 ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
902 }
903
904out:
905 mutex_unlock(&priv->reg_mutex);
906
907 return ret;
908}
909
910static int
911qca8k_mib_init(struct qca8k_priv *priv)
912{
913 int ret;
914
915 mutex_lock(&priv->reg_mutex);
916 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
917 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
918 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
919 QCA8K_MIB_BUSY);
920 if (ret)
921 goto exit;
922
923 ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
924 if (ret)
925 goto exit;
926
927 ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
928 if (ret)
929 goto exit;
930
931 ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
932
933exit:
934 mutex_unlock(&priv->reg_mutex);
935 return ret;
936}
937
938static void
939qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
940{
941 u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
942
943
944 if (port > 0 && port < 6)
945 mask |= QCA8K_PORT_STATUS_LINK_AUTO;
946
947 if (enable)
948 regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
949 else
950 regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
951}
952
953static int
954qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
955 struct sk_buff *read_skb, u32 *val)
956{
957 struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
958 bool ack;
959 int ret;
960
961 reinit_completion(&mgmt_eth_data->rw_done);
962
963
964 mgmt_eth_data->seq++;
965 qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
966 mgmt_eth_data->ack = false;
967
968 dev_queue_xmit(skb);
969
970 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
971 QCA8K_ETHERNET_TIMEOUT);
972
973 ack = mgmt_eth_data->ack;
974
975 if (ret <= 0)
976 return -ETIMEDOUT;
977
978 if (!ack)
979 return -EINVAL;
980
981 *val = mgmt_eth_data->data[0];
982
983 return 0;
984}
985
986static int
987qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
988 int regnum, u16 data)
989{
990 struct sk_buff *write_skb, *clear_skb, *read_skb;
991 struct qca8k_mgmt_eth_data *mgmt_eth_data;
992 u32 write_val, clear_val = 0, val;
993 struct net_device *mgmt_master;
994 int ret, ret1;
995 bool ack;
996
997 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
998 return -EINVAL;
999
1000 mgmt_eth_data = &priv->mgmt_eth_data;
1001
1002 write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1003 QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1004 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1005
1006 if (read) {
1007 write_val |= QCA8K_MDIO_MASTER_READ;
1008 } else {
1009 write_val |= QCA8K_MDIO_MASTER_WRITE;
1010 write_val |= QCA8K_MDIO_MASTER_DATA(data);
1011 }
1012
1013
1014 write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
1015 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
1016 if (!write_skb)
1017 return -ENOMEM;
1018
1019 clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1020 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1021 if (!clear_skb) {
1022 ret = -ENOMEM;
1023 goto err_clear_skb;
1024 }
1025
1026 read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
1027 QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
1028 if (!read_skb) {
1029 ret = -ENOMEM;
1030 goto err_read_skb;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039 mutex_lock(&mgmt_eth_data->mutex);
1040
1041
1042 mgmt_master = priv->mgmt_master;
1043 if (!mgmt_master) {
1044 mutex_unlock(&mgmt_eth_data->mutex);
1045 ret = -EINVAL;
1046 goto err_mgmt_master;
1047 }
1048
1049 read_skb->dev = mgmt_master;
1050 clear_skb->dev = mgmt_master;
1051 write_skb->dev = mgmt_master;
1052
1053 reinit_completion(&mgmt_eth_data->rw_done);
1054
1055
1056 mgmt_eth_data->seq++;
1057 qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
1058 mgmt_eth_data->ack = false;
1059
1060 dev_queue_xmit(write_skb);
1061
1062 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1063 QCA8K_ETHERNET_TIMEOUT);
1064
1065 ack = mgmt_eth_data->ack;
1066
1067 if (ret <= 0) {
1068 ret = -ETIMEDOUT;
1069 kfree_skb(read_skb);
1070 goto exit;
1071 }
1072
1073 if (!ack) {
1074 ret = -EINVAL;
1075 kfree_skb(read_skb);
1076 goto exit;
1077 }
1078
1079 ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
1080 !(val & QCA8K_MDIO_MASTER_BUSY), 0,
1081 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1082 mgmt_eth_data, read_skb, &val);
1083
1084 if (ret < 0 && ret1 < 0) {
1085 ret = ret1;
1086 goto exit;
1087 }
1088
1089 if (read) {
1090 reinit_completion(&mgmt_eth_data->rw_done);
1091
1092
1093 mgmt_eth_data->seq++;
1094 qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
1095 mgmt_eth_data->ack = false;
1096
1097 dev_queue_xmit(read_skb);
1098
1099 ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1100 QCA8K_ETHERNET_TIMEOUT);
1101
1102 ack = mgmt_eth_data->ack;
1103
1104 if (ret <= 0) {
1105 ret = -ETIMEDOUT;
1106 goto exit;
1107 }
1108
1109 if (!ack) {
1110 ret = -EINVAL;
1111 goto exit;
1112 }
1113
1114 ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
1115 } else {
1116 kfree_skb(read_skb);
1117 }
1118exit:
1119 reinit_completion(&mgmt_eth_data->rw_done);
1120
1121
1122 mgmt_eth_data->seq++;
1123 qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
1124 mgmt_eth_data->ack = false;
1125
1126 dev_queue_xmit(clear_skb);
1127
1128 wait_for_completion_timeout(&mgmt_eth_data->rw_done,
1129 QCA8K_ETHERNET_TIMEOUT);
1130
1131 mutex_unlock(&mgmt_eth_data->mutex);
1132
1133 return ret;
1134
1135
1136err_mgmt_master:
1137 kfree_skb(read_skb);
1138err_read_skb:
1139 kfree_skb(clear_skb);
1140err_clear_skb:
1141 kfree_skb(write_skb);
1142
1143 return ret;
1144}
1145
1146static u32
1147qca8k_port_to_phy(int port)
1148{
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 return port - 1;
1159}
1160
1161static int
1162qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
1163{
1164 u16 r1, r2, page;
1165 u32 val;
1166 int ret, ret1;
1167
1168 qca8k_split_addr(reg, &r1, &r2, &page);
1169
1170 ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
1171 QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
1172 bus, 0x10 | r2, r1, &val);
1173
1174
1175
1176
1177 if (ret < 0 && ret1 < 0)
1178 return ret1;
1179
1180 return ret;
1181}
1182
1183static int
1184qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
1185{
1186 struct mii_bus *bus = priv->bus;
1187 u16 r1, r2, page;
1188 u32 val;
1189 int ret;
1190
1191 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1192 return -EINVAL;
1193
1194 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1195 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1196 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
1197 QCA8K_MDIO_MASTER_DATA(data);
1198
1199 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1200
1201 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1202
1203 ret = qca8k_set_page(priv, page);
1204 if (ret)
1205 goto exit;
1206
1207 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1208
1209 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1210 QCA8K_MDIO_MASTER_BUSY);
1211
1212exit:
1213
1214 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1215
1216 mutex_unlock(&bus->mdio_lock);
1217
1218 return ret;
1219}
1220
1221static int
1222qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
1223{
1224 struct mii_bus *bus = priv->bus;
1225 u16 r1, r2, page;
1226 u32 val;
1227 int ret;
1228
1229 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
1230 return -EINVAL;
1231
1232 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
1233 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
1234 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
1235
1236 qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
1237
1238 mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
1239
1240 ret = qca8k_set_page(priv, page);
1241 if (ret)
1242 goto exit;
1243
1244 qca8k_mii_write32(priv, 0x10 | r2, r1, val);
1245
1246 ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
1247 QCA8K_MDIO_MASTER_BUSY);
1248 if (ret)
1249 goto exit;
1250
1251 ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
1252
1253exit:
1254
1255 qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
1256
1257 mutex_unlock(&bus->mdio_lock);
1258
1259 if (ret >= 0)
1260 ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
1261
1262 return ret;
1263}
1264
1265static int
1266qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
1267{
1268 struct qca8k_priv *priv = slave_bus->priv;
1269 int ret;
1270
1271
1272 ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
1273 if (!ret)
1274 return 0;
1275
1276 return qca8k_mdio_write(priv, phy, regnum, data);
1277}
1278
1279static int
1280qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
1281{
1282 struct qca8k_priv *priv = slave_bus->priv;
1283 int ret;
1284
1285
1286 ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
1287 if (ret >= 0)
1288 return ret;
1289
1290 return qca8k_mdio_read(priv, phy, regnum);
1291}
1292
1293static int
1294qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
1295{
1296 struct qca8k_priv *priv = ds->priv;
1297 int ret;
1298
1299
1300
1301
1302
1303 if (priv->legacy_phy_port_mapping)
1304 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1305
1306
1307 ret = qca8k_phy_eth_command(priv, false, port, regnum, 0);
1308 if (!ret)
1309 return ret;
1310
1311 return qca8k_mdio_write(priv, port, regnum, data);
1312}
1313
1314static int
1315qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
1316{
1317 struct qca8k_priv *priv = ds->priv;
1318 int ret;
1319
1320
1321
1322
1323
1324 if (priv->legacy_phy_port_mapping)
1325 port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
1326
1327
1328 ret = qca8k_phy_eth_command(priv, true, port, regnum, 0);
1329 if (ret >= 0)
1330 return ret;
1331
1332 ret = qca8k_mdio_read(priv, port, regnum);
1333
1334 if (ret < 0)
1335 return 0xffff;
1336
1337 return ret;
1338}
1339
1340static int
1341qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
1342{
1343 struct dsa_switch *ds = priv->ds;
1344 struct mii_bus *bus;
1345
1346 bus = devm_mdiobus_alloc(ds->dev);
1347
1348 if (!bus)
1349 return -ENOMEM;
1350
1351 bus->priv = (void *)priv;
1352 bus->name = "qca8k slave mii";
1353 bus->read = qca8k_internal_mdio_read;
1354 bus->write = qca8k_internal_mdio_write;
1355 snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
1356 ds->index);
1357
1358 bus->parent = ds->dev;
1359 bus->phy_mask = ~ds->phys_mii_mask;
1360
1361 ds->slave_mii_bus = bus;
1362
1363 return devm_of_mdiobus_register(priv->dev, bus, mdio);
1364}
1365
1366static int
1367qca8k_setup_mdio_bus(struct qca8k_priv *priv)
1368{
1369 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
1370 struct device_node *ports, *port, *mdio;
1371 phy_interface_t mode;
1372 int err;
1373
1374 ports = of_get_child_by_name(priv->dev->of_node, "ports");
1375 if (!ports)
1376 ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
1377
1378 if (!ports)
1379 return -EINVAL;
1380
1381 for_each_available_child_of_node(ports, port) {
1382 err = of_property_read_u32(port, "reg", ®);
1383 if (err) {
1384 of_node_put(port);
1385 of_node_put(ports);
1386 return err;
1387 }
1388
1389 if (!dsa_is_user_port(priv->ds, reg))
1390 continue;
1391
1392 of_get_phy_mode(port, &mode);
1393
1394 if (of_property_read_bool(port, "phy-handle") &&
1395 mode != PHY_INTERFACE_MODE_INTERNAL)
1396 external_mdio_mask |= BIT(reg);
1397 else
1398 internal_mdio_mask |= BIT(reg);
1399 }
1400
1401 of_node_put(ports);
1402 if (!external_mdio_mask && !internal_mdio_mask) {
1403 dev_err(priv->dev, "no PHYs are defined.\n");
1404 return -EINVAL;
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 if (!!external_mdio_mask && !!internal_mdio_mask) {
1419 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
1420 return -EINVAL;
1421 }
1422
1423 if (external_mdio_mask) {
1424
1425
1426
1427
1428 return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
1429 QCA8K_MDIO_MASTER_EN);
1430 }
1431
1432
1433 mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
1434 if (of_device_is_available(mdio)) {
1435 err = qca8k_mdio_register(priv, mdio);
1436 if (err)
1437 of_node_put(mdio);
1438
1439 return err;
1440 }
1441
1442
1443
1444
1445 priv->legacy_phy_port_mapping = true;
1446 priv->ops.phy_read = qca8k_phy_read;
1447 priv->ops.phy_write = qca8k_phy_write;
1448
1449 return 0;
1450}
1451
1452static int
1453qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
1454{
1455 u32 mask = 0;
1456 int ret = 0;
1457
1458
1459
1460
1461
1462 if (of_machine_is_compatible("qcom,ipq8064"))
1463 mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
1464
1465
1466 if (of_machine_is_compatible("qcom,ipq8065"))
1467 mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
1468
1469 if (mask) {
1470 ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
1471 QCA8K_MAC_PWR_RGMII0_1_8V |
1472 QCA8K_MAC_PWR_RGMII1_1_8V,
1473 mask);
1474 }
1475
1476 return ret;
1477}
1478
1479static int qca8k_find_cpu_port(struct dsa_switch *ds)
1480{
1481 struct qca8k_priv *priv = ds->priv;
1482
1483
1484 if (dsa_is_cpu_port(ds, 0))
1485 return 0;
1486
1487 dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
1488
1489 if (dsa_is_cpu_port(ds, 6))
1490 return 6;
1491
1492 return -EINVAL;
1493}
1494
1495static int
1496qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
1497{
1498 struct device_node *node = priv->dev->of_node;
1499 const struct qca8k_match_data *data;
1500 u32 val = 0;
1501 int ret;
1502
1503
1504
1505
1506
1507 if (priv->switch_id == QCA8K_ID_QCA8327) {
1508 data = of_device_get_match_data(priv->dev);
1509
1510
1511 if (data->reduced_package)
1512 val |= QCA8327_PWS_PACKAGE148_EN;
1513
1514 ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
1515 val);
1516 if (ret)
1517 return ret;
1518 }
1519
1520 if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
1521 val |= QCA8K_PWS_POWER_ON_SEL;
1522
1523 if (of_property_read_bool(node, "qca,led-open-drain")) {
1524 if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
1525 dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
1526 return -EINVAL;
1527 }
1528
1529 val |= QCA8K_PWS_LED_OPEN_EN_CSR;
1530 }
1531
1532 return qca8k_rmw(priv, QCA8K_REG_PWS,
1533 QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
1534 val);
1535}
1536
1537static int
1538qca8k_parse_port_config(struct qca8k_priv *priv)
1539{
1540 int port, cpu_port_index = -1, ret;
1541 struct device_node *port_dn;
1542 phy_interface_t mode;
1543 struct dsa_port *dp;
1544 u32 delay;
1545
1546
1547 for (port = 0; port < QCA8K_NUM_PORTS; port++) {
1548
1549 if (port != 0 && port != 6)
1550 continue;
1551
1552 dp = dsa_to_port(priv->ds, port);
1553 port_dn = dp->dn;
1554 cpu_port_index++;
1555
1556 if (!of_device_is_available(port_dn))
1557 continue;
1558
1559 ret = of_get_phy_mode(port_dn, &mode);
1560 if (ret)
1561 continue;
1562
1563 switch (mode) {
1564 case PHY_INTERFACE_MODE_RGMII:
1565 case PHY_INTERFACE_MODE_RGMII_ID:
1566 case PHY_INTERFACE_MODE_RGMII_TXID:
1567 case PHY_INTERFACE_MODE_RGMII_RXID:
1568 case PHY_INTERFACE_MODE_SGMII:
1569 delay = 0;
1570
1571 if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
1572
1573 delay = delay / 1000;
1574 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1575 mode == PHY_INTERFACE_MODE_RGMII_TXID)
1576 delay = 1;
1577
1578 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
1579 dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
1580 delay = 3;
1581 }
1582
1583 priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
1584
1585 delay = 0;
1586
1587 if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
1588
1589 delay = delay / 1000;
1590 else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
1591 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1592 delay = 2;
1593
1594 if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
1595 dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
1596 delay = 3;
1597 }
1598
1599 priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
1600
1601
1602 if (mode == PHY_INTERFACE_MODE_RGMII ||
1603 mode == PHY_INTERFACE_MODE_RGMII_ID ||
1604 mode == PHY_INTERFACE_MODE_RGMII_TXID ||
1605 mode == PHY_INTERFACE_MODE_RGMII_RXID)
1606 break;
1607
1608 if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
1609 priv->ports_config.sgmii_tx_clk_falling_edge = true;
1610
1611 if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
1612 priv->ports_config.sgmii_rx_clk_falling_edge = true;
1613
1614 if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
1615 priv->ports_config.sgmii_enable_pll = true;
1616
1617 if (priv->switch_id == QCA8K_ID_QCA8327) {
1618 dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
1619 priv->ports_config.sgmii_enable_pll = false;
1620 }
1621
1622 if (priv->switch_revision < 2)
1623 dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
1624 }
1625
1626 break;
1627 default:
1628 continue;
1629 }
1630 }
1631
1632 return 0;
1633}
1634
1635static void
1636qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
1637 u32 reg)
1638{
1639 u32 delay, val = 0;
1640 int ret;
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650 if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
1651 delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
1652
1653 val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
1654 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
1655 }
1656
1657 if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
1658 delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
1659
1660 val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
1661 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
1662 }
1663
1664
1665 ret = qca8k_rmw(priv, reg,
1666 QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
1667 QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
1668 QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
1669 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
1670 val);
1671 if (ret)
1672 dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
1673 cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
1674}
1675
1676static struct phylink_pcs *
1677qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
1678 phy_interface_t interface)
1679{
1680 struct qca8k_priv *priv = ds->priv;
1681 struct phylink_pcs *pcs = NULL;
1682
1683 switch (interface) {
1684 case PHY_INTERFACE_MODE_SGMII:
1685 case PHY_INTERFACE_MODE_1000BASEX:
1686 switch (port) {
1687 case 0:
1688 pcs = &priv->pcs_port_0.pcs;
1689 break;
1690
1691 case 6:
1692 pcs = &priv->pcs_port_6.pcs;
1693 break;
1694 }
1695 break;
1696
1697 default:
1698 break;
1699 }
1700
1701 return pcs;
1702}
1703
1704static void
1705qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
1706 const struct phylink_link_state *state)
1707{
1708 struct qca8k_priv *priv = ds->priv;
1709 int cpu_port_index;
1710 u32 reg;
1711
1712 switch (port) {
1713 case 0:
1714 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1715 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1716 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1717 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1718 state->interface != PHY_INTERFACE_MODE_SGMII)
1719 return;
1720
1721 reg = QCA8K_REG_PORT0_PAD_CTRL;
1722 cpu_port_index = QCA8K_CPU_PORT0;
1723 break;
1724 case 1:
1725 case 2:
1726 case 3:
1727 case 4:
1728 case 5:
1729
1730 return;
1731 case 6:
1732 if (state->interface != PHY_INTERFACE_MODE_RGMII &&
1733 state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
1734 state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
1735 state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
1736 state->interface != PHY_INTERFACE_MODE_SGMII &&
1737 state->interface != PHY_INTERFACE_MODE_1000BASEX)
1738 return;
1739
1740 reg = QCA8K_REG_PORT6_PAD_CTRL;
1741 cpu_port_index = QCA8K_CPU_PORT6;
1742 break;
1743 default:
1744 dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
1745 return;
1746 }
1747
1748 if (port != 6 && phylink_autoneg_inband(mode)) {
1749 dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
1750 __func__);
1751 return;
1752 }
1753
1754 switch (state->interface) {
1755 case PHY_INTERFACE_MODE_RGMII:
1756 case PHY_INTERFACE_MODE_RGMII_ID:
1757 case PHY_INTERFACE_MODE_RGMII_TXID:
1758 case PHY_INTERFACE_MODE_RGMII_RXID:
1759 qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
1760
1761
1762 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1763
1764
1765
1766
1767
1768 if (priv->switch_id == QCA8K_ID_QCA8337)
1769 qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
1770 QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
1771 break;
1772 case PHY_INTERFACE_MODE_SGMII:
1773 case PHY_INTERFACE_MODE_1000BASEX:
1774
1775 qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
1776 break;
1777 default:
1778 dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
1779 phy_modes(state->interface), port);
1780 return;
1781 }
1782}
1783
1784static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
1785 struct phylink_config *config)
1786{
1787 switch (port) {
1788 case 0:
1789 phy_interface_set_rgmii(config->supported_interfaces);
1790 __set_bit(PHY_INTERFACE_MODE_SGMII,
1791 config->supported_interfaces);
1792 break;
1793
1794 case 1:
1795 case 2:
1796 case 3:
1797 case 4:
1798 case 5:
1799
1800 __set_bit(PHY_INTERFACE_MODE_GMII,
1801 config->supported_interfaces);
1802 __set_bit(PHY_INTERFACE_MODE_INTERNAL,
1803 config->supported_interfaces);
1804 break;
1805
1806 case 6:
1807 phy_interface_set_rgmii(config->supported_interfaces);
1808 __set_bit(PHY_INTERFACE_MODE_SGMII,
1809 config->supported_interfaces);
1810 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
1811 config->supported_interfaces);
1812 break;
1813 }
1814
1815 config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1816 MAC_10 | MAC_100 | MAC_1000FD;
1817
1818 config->legacy_pre_march2020 = false;
1819}
1820
1821static void
1822qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
1823 phy_interface_t interface)
1824{
1825 struct qca8k_priv *priv = ds->priv;
1826
1827 qca8k_port_set_status(priv, port, 0);
1828}
1829
1830static void
1831qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
1832 phy_interface_t interface, struct phy_device *phydev,
1833 int speed, int duplex, bool tx_pause, bool rx_pause)
1834{
1835 struct qca8k_priv *priv = ds->priv;
1836 u32 reg;
1837
1838 if (phylink_autoneg_inband(mode)) {
1839 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1840 } else {
1841 switch (speed) {
1842 case SPEED_10:
1843 reg = QCA8K_PORT_STATUS_SPEED_10;
1844 break;
1845 case SPEED_100:
1846 reg = QCA8K_PORT_STATUS_SPEED_100;
1847 break;
1848 case SPEED_1000:
1849 reg = QCA8K_PORT_STATUS_SPEED_1000;
1850 break;
1851 default:
1852 reg = QCA8K_PORT_STATUS_LINK_AUTO;
1853 break;
1854 }
1855
1856 if (duplex == DUPLEX_FULL)
1857 reg |= QCA8K_PORT_STATUS_DUPLEX;
1858
1859 if (rx_pause || dsa_is_cpu_port(ds, port))
1860 reg |= QCA8K_PORT_STATUS_RXFLOW;
1861
1862 if (tx_pause || dsa_is_cpu_port(ds, port))
1863 reg |= QCA8K_PORT_STATUS_TXFLOW;
1864 }
1865
1866 reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
1867
1868 qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
1869}
1870
1871static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
1872{
1873 return container_of(pcs, struct qca8k_pcs, pcs);
1874}
1875
1876static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
1877 struct phylink_link_state *state)
1878{
1879 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1880 int port = pcs_to_qca8k_pcs(pcs)->port;
1881 u32 reg;
1882 int ret;
1883
1884 ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), ®);
1885 if (ret < 0) {
1886 state->link = false;
1887 return;
1888 }
1889
1890 state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
1891 state->an_complete = state->link;
1892 state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
1893 state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
1894 DUPLEX_HALF;
1895
1896 switch (reg & QCA8K_PORT_STATUS_SPEED) {
1897 case QCA8K_PORT_STATUS_SPEED_10:
1898 state->speed = SPEED_10;
1899 break;
1900 case QCA8K_PORT_STATUS_SPEED_100:
1901 state->speed = SPEED_100;
1902 break;
1903 case QCA8K_PORT_STATUS_SPEED_1000:
1904 state->speed = SPEED_1000;
1905 break;
1906 default:
1907 state->speed = SPEED_UNKNOWN;
1908 break;
1909 }
1910
1911 if (reg & QCA8K_PORT_STATUS_RXFLOW)
1912 state->pause |= MLO_PAUSE_RX;
1913 if (reg & QCA8K_PORT_STATUS_TXFLOW)
1914 state->pause |= MLO_PAUSE_TX;
1915}
1916
1917static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
1918 phy_interface_t interface,
1919 const unsigned long *advertising,
1920 bool permit_pause_to_mac)
1921{
1922 struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
1923 int cpu_port_index, ret, port;
1924 u32 reg, val;
1925
1926 port = pcs_to_qca8k_pcs(pcs)->port;
1927 switch (port) {
1928 case 0:
1929 reg = QCA8K_REG_PORT0_PAD_CTRL;
1930 cpu_port_index = QCA8K_CPU_PORT0;
1931 break;
1932
1933 case 6:
1934 reg = QCA8K_REG_PORT6_PAD_CTRL;
1935 cpu_port_index = QCA8K_CPU_PORT6;
1936 break;
1937
1938 default:
1939 WARN_ON(1);
1940 return -EINVAL;
1941 }
1942
1943
1944 ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
1945 if (ret)
1946 return ret;
1947 if (phylink_autoneg_inband(mode))
1948 val &= ~QCA8K_PWS_SERDES_AEN_DIS;
1949 else
1950 val |= QCA8K_PWS_SERDES_AEN_DIS;
1951 qca8k_write(priv, QCA8K_REG_PWS, val);
1952
1953
1954 ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
1955 if (ret)
1956 return ret;
1957
1958 val |= QCA8K_SGMII_EN_SD;
1959
1960 if (priv->ports_config.sgmii_enable_pll)
1961 val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
1962 QCA8K_SGMII_EN_TX;
1963
1964 if (dsa_is_cpu_port(priv->ds, port)) {
1965
1966 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1967 val |= QCA8K_SGMII_MODE_CTRL_PHY;
1968 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1969 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1970 val |= QCA8K_SGMII_MODE_CTRL_MAC;
1971 } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
1972 val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
1973 val |= QCA8K_SGMII_MODE_CTRL_BASEX;
1974 }
1975
1976 qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
1977
1978
1979
1980
1981 if (interface == PHY_INTERFACE_MODE_SGMII)
1982 qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
1983
1984
1985
1986 if (priv->switch_id == QCA8K_ID_QCA8327 ||
1987 priv->switch_id == QCA8K_ID_QCA8337)
1988 reg = QCA8K_REG_PORT0_PAD_CTRL;
1989
1990 val = 0;
1991
1992
1993 if (priv->ports_config.sgmii_rx_clk_falling_edge)
1994 val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
1995
1996 if (priv->ports_config.sgmii_tx_clk_falling_edge)
1997 val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
1998
1999 if (val)
2000 ret = qca8k_rmw(priv, reg,
2001 QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
2002 QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
2003 val);
2004
2005 return 0;
2006}
2007
2008static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
2009{
2010}
2011
2012static const struct phylink_pcs_ops qca8k_pcs_ops = {
2013 .pcs_get_state = qca8k_pcs_get_state,
2014 .pcs_config = qca8k_pcs_config,
2015 .pcs_an_restart = qca8k_pcs_an_restart,
2016};
2017
2018static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
2019 int port)
2020{
2021 qpcs->pcs.ops = &qca8k_pcs_ops;
2022
2023
2024 qpcs->pcs.poll = true;
2025 qpcs->priv = priv;
2026 qpcs->port = port;
2027}
2028
2029static void
2030qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
2031{
2032 const struct qca8k_match_data *match_data;
2033 struct qca8k_priv *priv = ds->priv;
2034 int i;
2035
2036 if (stringset != ETH_SS_STATS)
2037 return;
2038
2039 match_data = of_device_get_match_data(priv->dev);
2040
2041 for (i = 0; i < match_data->mib_count; i++)
2042 strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
2043 ETH_GSTRING_LEN);
2044}
2045
2046static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
2047{
2048 const struct qca8k_match_data *match_data;
2049 struct qca8k_mib_eth_data *mib_eth_data;
2050 struct qca8k_priv *priv = ds->priv;
2051 const struct qca8k_mib_desc *mib;
2052 struct mib_ethhdr *mib_ethhdr;
2053 int i, mib_len, offset = 0;
2054 u64 *data;
2055 u8 port;
2056
2057 mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
2058 mib_eth_data = &priv->mib_eth_data;
2059
2060
2061
2062
2063 port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
2064 if (port != mib_eth_data->req_port)
2065 goto exit;
2066
2067 match_data = device_get_match_data(priv->dev);
2068 data = mib_eth_data->data;
2069
2070 for (i = 0; i < match_data->mib_count; i++) {
2071 mib = &ar8327_mib[i];
2072
2073
2074 if (i < 3) {
2075 data[i] = mib_ethhdr->data[i];
2076 continue;
2077 }
2078
2079 mib_len = sizeof(uint32_t);
2080
2081
2082 if (mib->size == 2)
2083 mib_len = sizeof(uint64_t);
2084
2085
2086 memcpy(data + i, skb->data + offset, mib_len);
2087
2088
2089 offset += mib_len;
2090 }
2091
2092exit:
2093
2094 if (refcount_dec_and_test(&mib_eth_data->port_parsed))
2095 complete(&mib_eth_data->rw_done);
2096}
2097
2098static int
2099qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
2100{
2101 struct dsa_port *dp = dsa_to_port(ds, port);
2102 struct qca8k_mib_eth_data *mib_eth_data;
2103 struct qca8k_priv *priv = ds->priv;
2104 int ret;
2105
2106 mib_eth_data = &priv->mib_eth_data;
2107
2108 mutex_lock(&mib_eth_data->mutex);
2109
2110 reinit_completion(&mib_eth_data->rw_done);
2111
2112 mib_eth_data->req_port = dp->index;
2113 mib_eth_data->data = data;
2114 refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
2115
2116 mutex_lock(&priv->reg_mutex);
2117
2118
2119 ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
2120 QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
2121 FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
2122 QCA8K_MIB_BUSY);
2123
2124 mutex_unlock(&priv->reg_mutex);
2125
2126 if (ret)
2127 goto exit;
2128
2129 ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
2130
2131exit:
2132 mutex_unlock(&mib_eth_data->mutex);
2133
2134 return ret;
2135}
2136
2137static void
2138qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
2139 uint64_t *data)
2140{
2141 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2142 const struct qca8k_match_data *match_data;
2143 const struct qca8k_mib_desc *mib;
2144 u32 reg, i, val;
2145 u32 hi = 0;
2146 int ret;
2147
2148 if (priv->mgmt_master &&
2149 qca8k_get_ethtool_stats_eth(ds, port, data) > 0)
2150 return;
2151
2152 match_data = of_device_get_match_data(priv->dev);
2153
2154 for (i = 0; i < match_data->mib_count; i++) {
2155 mib = &ar8327_mib[i];
2156 reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
2157
2158 ret = qca8k_read(priv, reg, &val);
2159 if (ret < 0)
2160 continue;
2161
2162 if (mib->size == 2) {
2163 ret = qca8k_read(priv, reg + 4, &hi);
2164 if (ret < 0)
2165 continue;
2166 }
2167
2168 data[i] = val;
2169 if (mib->size == 2)
2170 data[i] |= (u64)hi << 32;
2171 }
2172}
2173
2174static int
2175qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
2176{
2177 const struct qca8k_match_data *match_data;
2178 struct qca8k_priv *priv = ds->priv;
2179
2180 if (sset != ETH_SS_STATS)
2181 return 0;
2182
2183 match_data = of_device_get_match_data(priv->dev);
2184
2185 return match_data->mib_count;
2186}
2187
2188static int
2189qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
2190{
2191 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2192 u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
2193 u32 reg;
2194 int ret;
2195
2196 mutex_lock(&priv->reg_mutex);
2197 ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, ®);
2198 if (ret < 0)
2199 goto exit;
2200
2201 if (eee->eee_enabled)
2202 reg |= lpi_en;
2203 else
2204 reg &= ~lpi_en;
2205 ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
2206
2207exit:
2208 mutex_unlock(&priv->reg_mutex);
2209 return ret;
2210}
2211
2212static int
2213qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
2214{
2215
2216 return 0;
2217}
2218
2219static void
2220qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
2221{
2222 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2223 u32 stp_state;
2224
2225 switch (state) {
2226 case BR_STATE_DISABLED:
2227 stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
2228 break;
2229 case BR_STATE_BLOCKING:
2230 stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
2231 break;
2232 case BR_STATE_LISTENING:
2233 stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
2234 break;
2235 case BR_STATE_LEARNING:
2236 stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
2237 break;
2238 case BR_STATE_FORWARDING:
2239 default:
2240 stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
2241 break;
2242 }
2243
2244 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2245 QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
2246}
2247
2248static int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
2249 struct dsa_bridge bridge,
2250 bool *tx_fwd_offload,
2251 struct netlink_ext_ack *extack)
2252{
2253 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2254 int port_mask, cpu_port;
2255 int i, ret;
2256
2257 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2258 port_mask = BIT(cpu_port);
2259
2260 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2261 if (dsa_is_cpu_port(ds, i))
2262 continue;
2263 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2264 continue;
2265
2266
2267
2268 ret = regmap_set_bits(priv->regmap,
2269 QCA8K_PORT_LOOKUP_CTRL(i),
2270 BIT(port));
2271 if (ret)
2272 return ret;
2273 if (i != port)
2274 port_mask |= BIT(i);
2275 }
2276
2277
2278 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2279 QCA8K_PORT_LOOKUP_MEMBER, port_mask);
2280
2281 return ret;
2282}
2283
2284static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
2285 struct dsa_bridge bridge)
2286{
2287 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2288 int cpu_port, i;
2289
2290 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
2291
2292 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2293 if (dsa_is_cpu_port(ds, i))
2294 continue;
2295 if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
2296 continue;
2297
2298
2299
2300 regmap_clear_bits(priv->regmap,
2301 QCA8K_PORT_LOOKUP_CTRL(i),
2302 BIT(port));
2303 }
2304
2305
2306
2307
2308 qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2309 QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
2310}
2311
2312static void
2313qca8k_port_fast_age(struct dsa_switch *ds, int port)
2314{
2315 struct qca8k_priv *priv = ds->priv;
2316
2317 mutex_lock(&priv->reg_mutex);
2318 qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
2319 mutex_unlock(&priv->reg_mutex);
2320}
2321
2322static int
2323qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
2324{
2325 struct qca8k_priv *priv = ds->priv;
2326 unsigned int secs = msecs / 1000;
2327 u32 val;
2328
2329
2330 val = secs / 7;
2331
2332
2333
2334
2335 if (!val)
2336 val = 1;
2337
2338 return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK,
2339 QCA8K_ATU_AGE_TIME(val));
2340}
2341
2342static int
2343qca8k_port_enable(struct dsa_switch *ds, int port,
2344 struct phy_device *phy)
2345{
2346 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2347
2348 qca8k_port_set_status(priv, port, 1);
2349 priv->port_sts[port].enabled = 1;
2350
2351 if (dsa_is_user_port(ds, port))
2352 phy_support_asym_pause(phy);
2353
2354 return 0;
2355}
2356
2357static void
2358qca8k_port_disable(struct dsa_switch *ds, int port)
2359{
2360 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2361
2362 qca8k_port_set_status(priv, port, 0);
2363 priv->port_sts[port].enabled = 0;
2364}
2365
2366static int
2367qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
2368{
2369 struct qca8k_priv *priv = ds->priv;
2370 int i, mtu = 0;
2371
2372 priv->port_mtu[port] = new_mtu;
2373
2374 for (i = 0; i < QCA8K_NUM_PORTS; i++)
2375 if (priv->port_mtu[i] > mtu)
2376 mtu = priv->port_mtu[i];
2377
2378
2379 return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
2380}
2381
2382static int
2383qca8k_port_max_mtu(struct dsa_switch *ds, int port)
2384{
2385 return QCA8K_MAX_MTU;
2386}
2387
2388static int
2389qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
2390 u16 port_mask, u16 vid)
2391{
2392
2393 if (!vid)
2394 vid = QCA8K_PORT_VID_DEF;
2395
2396 return qca8k_fdb_add(priv, addr, port_mask, vid,
2397 QCA8K_ATU_STATUS_STATIC);
2398}
2399
2400static int
2401qca8k_port_fdb_add(struct dsa_switch *ds, int port,
2402 const unsigned char *addr, u16 vid,
2403 struct dsa_db db)
2404{
2405 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2406 u16 port_mask = BIT(port);
2407
2408 return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
2409}
2410
2411static int
2412qca8k_port_fdb_del(struct dsa_switch *ds, int port,
2413 const unsigned char *addr, u16 vid,
2414 struct dsa_db db)
2415{
2416 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2417 u16 port_mask = BIT(port);
2418
2419 if (!vid)
2420 vid = QCA8K_PORT_VID_DEF;
2421
2422 return qca8k_fdb_del(priv, addr, port_mask, vid);
2423}
2424
2425static int
2426qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
2427 dsa_fdb_dump_cb_t *cb, void *data)
2428{
2429 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2430 struct qca8k_fdb _fdb = { 0 };
2431 int cnt = QCA8K_NUM_FDB_RECORDS;
2432 bool is_static;
2433 int ret = 0;
2434
2435 mutex_lock(&priv->reg_mutex);
2436 while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
2437 if (!_fdb.aging)
2438 break;
2439 is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
2440 ret = cb(_fdb.mac, _fdb.vid, is_static, data);
2441 if (ret)
2442 break;
2443 }
2444 mutex_unlock(&priv->reg_mutex);
2445
2446 return 0;
2447}
2448
2449static int
2450qca8k_port_mdb_add(struct dsa_switch *ds, int port,
2451 const struct switchdev_obj_port_mdb *mdb,
2452 struct dsa_db db)
2453{
2454 struct qca8k_priv *priv = ds->priv;
2455 const u8 *addr = mdb->addr;
2456 u16 vid = mdb->vid;
2457
2458 return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
2459}
2460
2461static int
2462qca8k_port_mdb_del(struct dsa_switch *ds, int port,
2463 const struct switchdev_obj_port_mdb *mdb,
2464 struct dsa_db db)
2465{
2466 struct qca8k_priv *priv = ds->priv;
2467 const u8 *addr = mdb->addr;
2468 u16 vid = mdb->vid;
2469
2470 return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
2471}
2472
2473static int
2474qca8k_port_mirror_add(struct dsa_switch *ds, int port,
2475 struct dsa_mall_mirror_tc_entry *mirror,
2476 bool ingress, struct netlink_ext_ack *extack)
2477{
2478 struct qca8k_priv *priv = ds->priv;
2479 int monitor_port, ret;
2480 u32 reg, val;
2481
2482
2483 if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
2484 return -EEXIST;
2485
2486 ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
2487 if (ret)
2488 return ret;
2489
2490
2491
2492
2493
2494 monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2495 if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
2496 return -EEXIST;
2497
2498
2499 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
2500 mirror->to_local_port);
2501 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2502 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2503 if (ret)
2504 return ret;
2505
2506 if (ingress) {
2507 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2508 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2509 } else {
2510 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2511 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2512 }
2513
2514 ret = regmap_update_bits(priv->regmap, reg, val, val);
2515 if (ret)
2516 return ret;
2517
2518
2519
2520
2521 if (ingress)
2522 priv->mirror_rx |= BIT(port);
2523 else
2524 priv->mirror_tx |= BIT(port);
2525
2526 return 0;
2527}
2528
2529static void
2530qca8k_port_mirror_del(struct dsa_switch *ds, int port,
2531 struct dsa_mall_mirror_tc_entry *mirror)
2532{
2533 struct qca8k_priv *priv = ds->priv;
2534 u32 reg, val;
2535 int ret;
2536
2537 if (mirror->ingress) {
2538 reg = QCA8K_PORT_LOOKUP_CTRL(port);
2539 val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
2540 } else {
2541 reg = QCA8K_REG_PORT_HOL_CTRL1(port);
2542 val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
2543 }
2544
2545 ret = regmap_clear_bits(priv->regmap, reg, val);
2546 if (ret)
2547 goto err;
2548
2549 if (mirror->ingress)
2550 priv->mirror_rx &= ~BIT(port);
2551 else
2552 priv->mirror_tx &= ~BIT(port);
2553
2554
2555 if (!priv->mirror_rx && !priv->mirror_tx) {
2556 val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
2557 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2558 QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
2559 if (ret)
2560 goto err;
2561 }
2562err:
2563 dev_err(priv->dev, "Failed to del mirror port from %d", port);
2564}
2565
2566static int
2567qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
2568 struct netlink_ext_ack *extack)
2569{
2570 struct qca8k_priv *priv = ds->priv;
2571 int ret;
2572
2573 if (vlan_filtering) {
2574 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2575 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2576 QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
2577 } else {
2578 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
2579 QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
2580 QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
2581 }
2582
2583 return ret;
2584}
2585
2586static int
2587qca8k_port_vlan_add(struct dsa_switch *ds, int port,
2588 const struct switchdev_obj_port_vlan *vlan,
2589 struct netlink_ext_ack *extack)
2590{
2591 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
2592 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
2593 struct qca8k_priv *priv = ds->priv;
2594 int ret;
2595
2596 ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
2597 if (ret) {
2598 dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
2599 return ret;
2600 }
2601
2602 if (pvid) {
2603 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
2604 QCA8K_EGREES_VLAN_PORT_MASK(port),
2605 QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
2606 if (ret)
2607 return ret;
2608
2609 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
2610 QCA8K_PORT_VLAN_CVID(vlan->vid) |
2611 QCA8K_PORT_VLAN_SVID(vlan->vid));
2612 }
2613
2614 return ret;
2615}
2616
2617static int
2618qca8k_port_vlan_del(struct dsa_switch *ds, int port,
2619 const struct switchdev_obj_port_vlan *vlan)
2620{
2621 struct qca8k_priv *priv = ds->priv;
2622 int ret;
2623
2624 ret = qca8k_vlan_del(priv, port, vlan->vid);
2625 if (ret)
2626 dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
2627
2628 return ret;
2629}
2630
2631static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
2632{
2633 struct qca8k_priv *priv = ds->priv;
2634
2635
2636
2637
2638
2639
2640
2641 if (port > 0 && port < 6)
2642 return priv->switch_revision;
2643
2644 return 0;
2645}
2646
2647static enum dsa_tag_protocol
2648qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
2649 enum dsa_tag_protocol mp)
2650{
2651 return DSA_TAG_PROTO_QCA;
2652}
2653
2654static bool
2655qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
2656 struct netdev_lag_upper_info *info)
2657{
2658 struct dsa_port *dp;
2659 int members = 0;
2660
2661 if (!lag.id)
2662 return false;
2663
2664 dsa_lag_foreach_port(dp, ds->dst, &lag)
2665
2666 members++;
2667
2668 if (members > QCA8K_NUM_PORTS_FOR_LAG)
2669 return false;
2670
2671 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2672 return false;
2673
2674 if (info->hash_type != NETDEV_LAG_HASH_L2 &&
2675 info->hash_type != NETDEV_LAG_HASH_L23)
2676 return false;
2677
2678 return true;
2679}
2680
2681static int
2682qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
2683 struct netdev_lag_upper_info *info)
2684{
2685 struct net_device *lag_dev = lag.dev;
2686 struct qca8k_priv *priv = ds->priv;
2687 bool unique_lag = true;
2688 unsigned int i;
2689 u32 hash = 0;
2690
2691 switch (info->hash_type) {
2692 case NETDEV_LAG_HASH_L23:
2693 hash |= QCA8K_TRUNK_HASH_SIP_EN;
2694 hash |= QCA8K_TRUNK_HASH_DIP_EN;
2695 fallthrough;
2696 case NETDEV_LAG_HASH_L2:
2697 hash |= QCA8K_TRUNK_HASH_SA_EN;
2698 hash |= QCA8K_TRUNK_HASH_DA_EN;
2699 break;
2700 default:
2701 return -EOPNOTSUPP;
2702 }
2703
2704
2705 dsa_lags_foreach_id(i, ds->dst)
2706 if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
2707 unique_lag = false;
2708 break;
2709 }
2710
2711
2712
2713
2714
2715
2716
2717
2718 if (unique_lag) {
2719 priv->lag_hash_mode = hash;
2720 } else if (priv->lag_hash_mode != hash) {
2721 netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
2722 return -EOPNOTSUPP;
2723 }
2724
2725 return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
2726 QCA8K_TRUNK_HASH_MASK, hash);
2727}
2728
2729static int
2730qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
2731 struct dsa_lag lag, bool delete)
2732{
2733 struct qca8k_priv *priv = ds->priv;
2734 int ret, id, i;
2735 u32 val;
2736
2737
2738 id = lag.id - 1;
2739
2740
2741 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
2742 if (ret)
2743 return ret;
2744
2745
2746 val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
2747 val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
2748 if (delete)
2749 val &= ~BIT(port);
2750 else
2751 val |= BIT(port);
2752
2753
2754 ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
2755 QCA8K_REG_GOL_TRUNK_MEMBER(id) |
2756 QCA8K_REG_GOL_TRUNK_EN(id),
2757 !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
2758 val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
2759
2760
2761 for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
2762 ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
2763 if (ret)
2764 return ret;
2765
2766 val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
2767 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
2768
2769 if (delete) {
2770
2771
2772
2773 if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2774 continue;
2775
2776 val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
2777 if (val != port)
2778 continue;
2779 } else {
2780
2781
2782
2783 if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
2784 continue;
2785 }
2786
2787
2788 break;
2789 }
2790
2791
2792 return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
2793 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
2794 QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
2795 !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
2796 port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
2797}
2798
2799static int
2800qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
2801 struct netdev_lag_upper_info *info)
2802{
2803 int ret;
2804
2805 if (!qca8k_lag_can_offload(ds, lag, info))
2806 return -EOPNOTSUPP;
2807
2808 ret = qca8k_lag_setup_hash(ds, lag, info);
2809 if (ret)
2810 return ret;
2811
2812 return qca8k_lag_refresh_portmap(ds, port, lag, false);
2813}
2814
2815static int
2816qca8k_port_lag_leave(struct dsa_switch *ds, int port,
2817 struct dsa_lag lag)
2818{
2819 return qca8k_lag_refresh_portmap(ds, port, lag, true);
2820}
2821
2822static void
2823qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
2824 bool operational)
2825{
2826 struct dsa_port *dp = master->dsa_ptr;
2827 struct qca8k_priv *priv = ds->priv;
2828
2829
2830 if (dp->index != 0)
2831 return;
2832
2833 mutex_lock(&priv->mgmt_eth_data.mutex);
2834 mutex_lock(&priv->mib_eth_data.mutex);
2835
2836 priv->mgmt_master = operational ? (struct net_device *)master : NULL;
2837
2838 mutex_unlock(&priv->mib_eth_data.mutex);
2839 mutex_unlock(&priv->mgmt_eth_data.mutex);
2840}
2841
2842static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
2843 enum dsa_tag_protocol proto)
2844{
2845 struct qca_tagger_data *tagger_data;
2846
2847 switch (proto) {
2848 case DSA_TAG_PROTO_QCA:
2849 tagger_data = ds->tagger_data;
2850
2851 tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
2852 tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
2853
2854 break;
2855 default:
2856 return -EOPNOTSUPP;
2857 }
2858
2859 return 0;
2860}
2861
2862static int
2863qca8k_setup(struct dsa_switch *ds)
2864{
2865 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
2866 int cpu_port, ret, i;
2867 u32 mask;
2868
2869 cpu_port = qca8k_find_cpu_port(ds);
2870 if (cpu_port < 0) {
2871 dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
2872 return cpu_port;
2873 }
2874
2875
2876 ret = qca8k_parse_port_config(priv);
2877 if (ret)
2878 return ret;
2879
2880 ret = qca8k_setup_mdio_bus(priv);
2881 if (ret)
2882 return ret;
2883
2884 ret = qca8k_setup_of_pws_reg(priv);
2885 if (ret)
2886 return ret;
2887
2888 ret = qca8k_setup_mac_pwr_sel(priv);
2889 if (ret)
2890 return ret;
2891
2892 qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
2893 qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
2894
2895
2896 ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
2897 QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
2898 if (ret) {
2899 dev_err(priv->dev, "failed disabling MAC06 exchange");
2900 return ret;
2901 }
2902
2903
2904 ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
2905 QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
2906 if (ret) {
2907 dev_err(priv->dev, "failed enabling CPU port");
2908 return ret;
2909 }
2910
2911
2912 ret = qca8k_mib_init(priv);
2913 if (ret)
2914 dev_warn(priv->dev, "mib init failed");
2915
2916
2917 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2918
2919 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2920 QCA8K_PORT_LOOKUP_MEMBER, 0);
2921 if (ret)
2922 return ret;
2923
2924
2925 if (dsa_is_cpu_port(ds, i)) {
2926 ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
2927 FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
2928 FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
2929 if (ret) {
2930 dev_err(priv->dev, "failed enabling QCA header mode");
2931 return ret;
2932 }
2933 }
2934
2935
2936 if (dsa_is_user_port(ds, i))
2937 qca8k_port_set_status(priv, i, 0);
2938 }
2939
2940
2941
2942
2943
2944 ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
2945 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
2946 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
2947 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
2948 FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
2949 if (ret)
2950 return ret;
2951
2952
2953
2954
2955 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
2956
2957 if (dsa_is_cpu_port(ds, i)) {
2958 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2959 QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
2960 if (ret)
2961 return ret;
2962 }
2963
2964
2965 if (dsa_is_user_port(ds, i)) {
2966 ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
2967 QCA8K_PORT_LOOKUP_MEMBER,
2968 BIT(cpu_port));
2969 if (ret)
2970 return ret;
2971
2972
2973 ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
2974 QCA8K_PORT_LOOKUP_LEARN);
2975 if (ret)
2976 return ret;
2977
2978
2979
2980
2981 ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
2982 QCA8K_EGREES_VLAN_PORT_MASK(i),
2983 QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
2984 if (ret)
2985 return ret;
2986
2987 ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
2988 QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
2989 QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
2990 if (ret)
2991 return ret;
2992 }
2993
2994
2995
2996
2997
2998
2999
3000 if (priv->switch_id == QCA8K_ID_QCA8337) {
3001 switch (i) {
3002
3003
3004
3005 case 0:
3006 case 5:
3007 case 6:
3008 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3009 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3010 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
3011 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
3012 QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
3013 QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
3014 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
3015 break;
3016 default:
3017 mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
3018 QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
3019 QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
3020 QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
3021 QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
3022 }
3023 qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
3024
3025 mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
3026 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3027 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3028 QCA8K_PORT_HOL_CTRL1_WRED_EN;
3029 qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
3030 QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
3031 QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
3032 QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
3033 QCA8K_PORT_HOL_CTRL1_WRED_EN,
3034 mask);
3035 }
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045 priv->port_mtu[i] = ETH_DATA_LEN;
3046 }
3047
3048
3049 if (priv->switch_id == QCA8K_ID_QCA8327) {
3050 mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
3051 QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
3052 qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
3053 QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
3054 QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
3055 mask);
3056 }
3057
3058
3059 ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
3060 if (ret)
3061 dev_warn(priv->dev, "failed setting MTU settings");
3062
3063
3064 qca8k_fdb_flush(priv);
3065
3066
3067 ds->ageing_time_min = 7000;
3068 ds->ageing_time_max = 458745000;
3069
3070
3071 ds->num_lag_ids = QCA8K_NUM_LAGS;
3072
3073 return 0;
3074}
3075
3076static const struct dsa_switch_ops qca8k_switch_ops = {
3077 .get_tag_protocol = qca8k_get_tag_protocol,
3078 .setup = qca8k_setup,
3079 .get_strings = qca8k_get_strings,
3080 .get_ethtool_stats = qca8k_get_ethtool_stats,
3081 .get_sset_count = qca8k_get_sset_count,
3082 .set_ageing_time = qca8k_set_ageing_time,
3083 .get_mac_eee = qca8k_get_mac_eee,
3084 .set_mac_eee = qca8k_set_mac_eee,
3085 .port_enable = qca8k_port_enable,
3086 .port_disable = qca8k_port_disable,
3087 .port_change_mtu = qca8k_port_change_mtu,
3088 .port_max_mtu = qca8k_port_max_mtu,
3089 .port_stp_state_set = qca8k_port_stp_state_set,
3090 .port_bridge_join = qca8k_port_bridge_join,
3091 .port_bridge_leave = qca8k_port_bridge_leave,
3092 .port_fast_age = qca8k_port_fast_age,
3093 .port_fdb_add = qca8k_port_fdb_add,
3094 .port_fdb_del = qca8k_port_fdb_del,
3095 .port_fdb_dump = qca8k_port_fdb_dump,
3096 .port_mdb_add = qca8k_port_mdb_add,
3097 .port_mdb_del = qca8k_port_mdb_del,
3098 .port_mirror_add = qca8k_port_mirror_add,
3099 .port_mirror_del = qca8k_port_mirror_del,
3100 .port_vlan_filtering = qca8k_port_vlan_filtering,
3101 .port_vlan_add = qca8k_port_vlan_add,
3102 .port_vlan_del = qca8k_port_vlan_del,
3103 .phylink_get_caps = qca8k_phylink_get_caps,
3104 .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
3105 .phylink_mac_config = qca8k_phylink_mac_config,
3106 .phylink_mac_link_down = qca8k_phylink_mac_link_down,
3107 .phylink_mac_link_up = qca8k_phylink_mac_link_up,
3108 .get_phy_flags = qca8k_get_phy_flags,
3109 .port_lag_join = qca8k_port_lag_join,
3110 .port_lag_leave = qca8k_port_lag_leave,
3111 .master_state_change = qca8k_master_change,
3112 .connect_tag_protocol = qca8k_connect_tag_protocol,
3113};
3114
3115static int qca8k_read_switch_id(struct qca8k_priv *priv)
3116{
3117 const struct qca8k_match_data *data;
3118 u32 val;
3119 u8 id;
3120 int ret;
3121
3122
3123 data = of_device_get_match_data(priv->dev);
3124 if (!data)
3125 return -ENODEV;
3126
3127 ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
3128 if (ret < 0)
3129 return -ENODEV;
3130
3131 id = QCA8K_MASK_CTRL_DEVICE_ID(val);
3132 if (id != data->id) {
3133 dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
3134 return -ENODEV;
3135 }
3136
3137 priv->switch_id = id;
3138
3139
3140 priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
3141
3142 return 0;
3143}
3144
3145static int
3146qca8k_sw_probe(struct mdio_device *mdiodev)
3147{
3148 struct qca8k_priv *priv;
3149 int ret;
3150
3151
3152
3153
3154 priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
3155 if (!priv)
3156 return -ENOMEM;
3157
3158 priv->bus = mdiodev->bus;
3159 priv->dev = &mdiodev->dev;
3160
3161 priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
3162 GPIOD_ASIS);
3163 if (IS_ERR(priv->reset_gpio))
3164 return PTR_ERR(priv->reset_gpio);
3165
3166 if (priv->reset_gpio) {
3167 gpiod_set_value_cansleep(priv->reset_gpio, 1);
3168
3169
3170
3171 msleep(20);
3172 gpiod_set_value_cansleep(priv->reset_gpio, 0);
3173 }
3174
3175
3176 priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
3177 &qca8k_regmap_config);
3178 if (IS_ERR(priv->regmap)) {
3179 dev_err(priv->dev, "regmap initialization failed");
3180 return PTR_ERR(priv->regmap);
3181 }
3182
3183 priv->mdio_cache.page = 0xffff;
3184 priv->mdio_cache.lo = 0xffff;
3185 priv->mdio_cache.hi = 0xffff;
3186
3187
3188 ret = qca8k_read_switch_id(priv);
3189 if (ret)
3190 return ret;
3191
3192 priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
3193 if (!priv->ds)
3194 return -ENOMEM;
3195
3196 mutex_init(&priv->mgmt_eth_data.mutex);
3197 init_completion(&priv->mgmt_eth_data.rw_done);
3198
3199 mutex_init(&priv->mib_eth_data.mutex);
3200 init_completion(&priv->mib_eth_data.rw_done);
3201
3202 priv->ds->dev = &mdiodev->dev;
3203 priv->ds->num_ports = QCA8K_NUM_PORTS;
3204 priv->ds->priv = priv;
3205 priv->ops = qca8k_switch_ops;
3206 priv->ds->ops = &priv->ops;
3207 mutex_init(&priv->reg_mutex);
3208 dev_set_drvdata(&mdiodev->dev, priv);
3209
3210 return dsa_register_switch(priv->ds);
3211}
3212
3213static void
3214qca8k_sw_remove(struct mdio_device *mdiodev)
3215{
3216 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3217 int i;
3218
3219 if (!priv)
3220 return;
3221
3222 for (i = 0; i < QCA8K_NUM_PORTS; i++)
3223 qca8k_port_set_status(priv, i, 0);
3224
3225 dsa_unregister_switch(priv->ds);
3226
3227 dev_set_drvdata(&mdiodev->dev, NULL);
3228}
3229
3230static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
3231{
3232 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
3233
3234 if (!priv)
3235 return;
3236
3237 dsa_switch_shutdown(priv->ds);
3238
3239 dev_set_drvdata(&mdiodev->dev, NULL);
3240}
3241
3242#ifdef CONFIG_PM_SLEEP
3243static void
3244qca8k_set_pm(struct qca8k_priv *priv, int enable)
3245{
3246 int i;
3247
3248 for (i = 0; i < QCA8K_NUM_PORTS; i++) {
3249 if (!priv->port_sts[i].enabled)
3250 continue;
3251
3252 qca8k_port_set_status(priv, i, enable);
3253 }
3254}
3255
3256static int qca8k_suspend(struct device *dev)
3257{
3258 struct qca8k_priv *priv = dev_get_drvdata(dev);
3259
3260 qca8k_set_pm(priv, 0);
3261
3262 return dsa_switch_suspend(priv->ds);
3263}
3264
3265static int qca8k_resume(struct device *dev)
3266{
3267 struct qca8k_priv *priv = dev_get_drvdata(dev);
3268
3269 qca8k_set_pm(priv, 1);
3270
3271 return dsa_switch_resume(priv->ds);
3272}
3273#endif
3274
3275static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
3276 qca8k_suspend, qca8k_resume);
3277
3278static const struct qca8k_match_data qca8327 = {
3279 .id = QCA8K_ID_QCA8327,
3280 .reduced_package = true,
3281 .mib_count = QCA8K_QCA832X_MIB_COUNT,
3282};
3283
3284static const struct qca8k_match_data qca8328 = {
3285 .id = QCA8K_ID_QCA8327,
3286 .mib_count = QCA8K_QCA832X_MIB_COUNT,
3287};
3288
3289static const struct qca8k_match_data qca833x = {
3290 .id = QCA8K_ID_QCA8337,
3291 .mib_count = QCA8K_QCA833X_MIB_COUNT,
3292};
3293
3294static const struct of_device_id qca8k_of_match[] = {
3295 { .compatible = "qca,qca8327", .data = &qca8327 },
3296 { .compatible = "qca,qca8328", .data = &qca8328 },
3297 { .compatible = "qca,qca8334", .data = &qca833x },
3298 { .compatible = "qca,qca8337", .data = &qca833x },
3299 { },
3300};
3301
3302static struct mdio_driver qca8kmdio_driver = {
3303 .probe = qca8k_sw_probe,
3304 .remove = qca8k_sw_remove,
3305 .shutdown = qca8k_sw_shutdown,
3306 .mdiodrv.driver = {
3307 .name = "qca8k",
3308 .of_match_table = qca8k_of_match,
3309 .pm = &qca8k_pm_ops,
3310 },
3311};
3312
3313mdio_module_driver(qca8kmdio_driver);
3314
3315MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
3316MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
3317MODULE_LICENSE("GPL v2");
3318MODULE_ALIAS("platform:qca8k");
3319