1
2
3
4
5
6
7
8
9
10#include <linux/acpi.h>
11#include <linux/kernel.h>
12#include <linux/netdevice.h>
13#include <linux/etherdevice.h>
14#include <linux/platform_device.h>
15#include <linux/skbuff.h>
16#include <linux/inetdevice.h>
17#include <linux/mbus.h>
18#include <linux/module.h>
19#include <linux/mfd/syscon.h>
20#include <linux/interrupt.h>
21#include <linux/cpumask.h>
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/of_mdio.h>
25#include <linux/of_net.h>
26#include <linux/of_address.h>
27#include <linux/of_device.h>
28#include <linux/phy.h>
29#include <linux/phylink.h>
30#include <linux/phy/phy.h>
31#include <linux/clk.h>
32#include <linux/hrtimer.h>
33#include <linux/ktime.h>
34#include <linux/regmap.h>
35#include <uapi/linux/ppp_defs.h>
36#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/tso.h>
39
40#include "mvpp2.h"
41#include "mvpp2_prs.h"
42#include "mvpp2_cls.h"
43
44enum mvpp2_bm_pool_log_num {
45 MVPP2_BM_SHORT,
46 MVPP2_BM_LONG,
47 MVPP2_BM_JUMBO,
48 MVPP2_BM_POOLS_NUM
49};
50
51static struct {
52 int pkt_size;
53 int buf_num;
54} mvpp2_pools[MVPP2_BM_POOLS_NUM];
55
56
57
58
59static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
60 const struct phylink_link_state *state);
61static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
63
64
65#define MVPP2_QDIST_SINGLE_MODE 0
66#define MVPP2_QDIST_MULTI_MODE 1
67
68static int queue_mode = MVPP2_QDIST_MULTI_MODE;
69
70module_param(queue_mode, int, 0444);
71MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
72
73
74
75void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
76{
77 writel(data, priv->swth_base[0] + offset);
78}
79
80u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
81{
82 return readl(priv->swth_base[0] + offset);
83}
84
85static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
86{
87 return readl_relaxed(priv->swth_base[0] + offset);
88}
89
90static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
91{
92 return cpu % priv->nthreads;
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
132 u32 offset, u32 data)
133{
134 writel(data, priv->swth_base[thread] + offset);
135}
136
137static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
138 u32 offset)
139{
140 return readl(priv->swth_base[thread] + offset);
141}
142
143static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
144 u32 offset, u32 data)
145{
146 writel_relaxed(data, priv->swth_base[thread] + offset);
147}
148
149static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
150 u32 offset)
151{
152 return readl_relaxed(priv->swth_base[thread] + offset);
153}
154
155static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
156 struct mvpp2_tx_desc *tx_desc)
157{
158 if (port->priv->hw_version == MVPP21)
159 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
160 else
161 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
162 MVPP2_DESC_DMA_MASK;
163}
164
165static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
166 struct mvpp2_tx_desc *tx_desc,
167 dma_addr_t dma_addr)
168{
169 dma_addr_t addr, offset;
170
171 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
172 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
173
174 if (port->priv->hw_version == MVPP21) {
175 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
176 tx_desc->pp21.packet_offset = offset;
177 } else {
178 __le64 val = cpu_to_le64(addr);
179
180 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
181 tx_desc->pp22.buf_dma_addr_ptp |= val;
182 tx_desc->pp22.packet_offset = offset;
183 }
184}
185
186static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
187 struct mvpp2_tx_desc *tx_desc)
188{
189 if (port->priv->hw_version == MVPP21)
190 return le16_to_cpu(tx_desc->pp21.data_size);
191 else
192 return le16_to_cpu(tx_desc->pp22.data_size);
193}
194
195static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
196 struct mvpp2_tx_desc *tx_desc,
197 size_t size)
198{
199 if (port->priv->hw_version == MVPP21)
200 tx_desc->pp21.data_size = cpu_to_le16(size);
201 else
202 tx_desc->pp22.data_size = cpu_to_le16(size);
203}
204
205static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
206 struct mvpp2_tx_desc *tx_desc,
207 unsigned int txq)
208{
209 if (port->priv->hw_version == MVPP21)
210 tx_desc->pp21.phys_txq = txq;
211 else
212 tx_desc->pp22.phys_txq = txq;
213}
214
215static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
216 struct mvpp2_tx_desc *tx_desc,
217 unsigned int command)
218{
219 if (port->priv->hw_version == MVPP21)
220 tx_desc->pp21.command = cpu_to_le32(command);
221 else
222 tx_desc->pp22.command = cpu_to_le32(command);
223}
224
225static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
226 struct mvpp2_tx_desc *tx_desc)
227{
228 if (port->priv->hw_version == MVPP21)
229 return tx_desc->pp21.packet_offset;
230 else
231 return tx_desc->pp22.packet_offset;
232}
233
234static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
235 struct mvpp2_rx_desc *rx_desc)
236{
237 if (port->priv->hw_version == MVPP21)
238 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
239 else
240 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
241 MVPP2_DESC_DMA_MASK;
242}
243
244static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
245 struct mvpp2_rx_desc *rx_desc)
246{
247 if (port->priv->hw_version == MVPP21)
248 return le32_to_cpu(rx_desc->pp21.buf_cookie);
249 else
250 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
251 MVPP2_DESC_DMA_MASK;
252}
253
254static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
255 struct mvpp2_rx_desc *rx_desc)
256{
257 if (port->priv->hw_version == MVPP21)
258 return le16_to_cpu(rx_desc->pp21.data_size);
259 else
260 return le16_to_cpu(rx_desc->pp22.data_size);
261}
262
263static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
264 struct mvpp2_rx_desc *rx_desc)
265{
266 if (port->priv->hw_version == MVPP21)
267 return le32_to_cpu(rx_desc->pp21.status);
268 else
269 return le32_to_cpu(rx_desc->pp22.status);
270}
271
272static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
273{
274 txq_pcpu->txq_get_index++;
275 if (txq_pcpu->txq_get_index == txq_pcpu->size)
276 txq_pcpu->txq_get_index = 0;
277}
278
279static void mvpp2_txq_inc_put(struct mvpp2_port *port,
280 struct mvpp2_txq_pcpu *txq_pcpu,
281 struct sk_buff *skb,
282 struct mvpp2_tx_desc *tx_desc)
283{
284 struct mvpp2_txq_pcpu_buf *tx_buf =
285 txq_pcpu->buffs + txq_pcpu->txq_put_index;
286 tx_buf->skb = skb;
287 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
288 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
289 mvpp2_txdesc_offset_get(port, tx_desc);
290 txq_pcpu->txq_put_index++;
291 if (txq_pcpu->txq_put_index == txq_pcpu->size)
292 txq_pcpu->txq_put_index = 0;
293}
294
295
296static int mvpp2_get_nrxqs(struct mvpp2 *priv)
297{
298 unsigned int nrxqs;
299
300 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
301 return 1;
302
303
304
305
306
307
308 nrxqs = (num_possible_cpus() + 3) & ~0x3;
309 if (nrxqs > MVPP2_PORT_MAX_RXQ)
310 nrxqs = MVPP2_PORT_MAX_RXQ;
311
312 return nrxqs;
313}
314
315
316static inline int mvpp2_egress_port(struct mvpp2_port *port)
317{
318 return MVPP2_MAX_TCONT + port->id;
319}
320
321
322static inline int mvpp2_txq_phys(int port, int txq)
323{
324 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
325}
326
327static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
328{
329 if (likely(pool->frag_size <= PAGE_SIZE))
330 return netdev_alloc_frag(pool->frag_size);
331 else
332 return kmalloc(pool->frag_size, GFP_ATOMIC);
333}
334
335static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
336{
337 if (likely(pool->frag_size <= PAGE_SIZE))
338 skb_free_frag(data);
339 else
340 kfree(data);
341}
342
343
344
345
346static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
347 struct mvpp2_bm_pool *bm_pool, int size)
348{
349 u32 val;
350
351
352
353
354 if (!IS_ALIGNED(size, 16))
355 return -EINVAL;
356
357
358
359
360 if (priv->hw_version == MVPP21)
361 bm_pool->size_bytes = 2 * sizeof(u32) * size;
362 else
363 bm_pool->size_bytes = 2 * sizeof(u64) * size;
364
365 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
366 &bm_pool->dma_addr,
367 GFP_KERNEL);
368 if (!bm_pool->virt_addr)
369 return -ENOMEM;
370
371 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
372 MVPP2_BM_POOL_PTR_ALIGN)) {
373 dma_free_coherent(dev, bm_pool->size_bytes,
374 bm_pool->virt_addr, bm_pool->dma_addr);
375 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
376 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
377 return -ENOMEM;
378 }
379
380 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
381 lower_32_bits(bm_pool->dma_addr));
382 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
383
384 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
385 val |= MVPP2_BM_START_MASK;
386 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
387
388 bm_pool->size = size;
389 bm_pool->pkt_size = 0;
390 bm_pool->buf_num = 0;
391
392 return 0;
393}
394
395
396static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
397 struct mvpp2_bm_pool *bm_pool,
398 int buf_size)
399{
400 u32 val;
401
402 bm_pool->buf_size = buf_size;
403
404 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
405 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
406}
407
408static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
409 struct mvpp2_bm_pool *bm_pool,
410 dma_addr_t *dma_addr,
411 phys_addr_t *phys_addr)
412{
413 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
414
415 *dma_addr = mvpp2_thread_read(priv, thread,
416 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
417 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
418
419 if (priv->hw_version == MVPP22) {
420 u32 val;
421 u32 dma_addr_highbits, phys_addr_highbits;
422
423 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
424 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
425 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
426 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
427
428 if (sizeof(dma_addr_t) == 8)
429 *dma_addr |= (u64)dma_addr_highbits << 32;
430
431 if (sizeof(phys_addr_t) == 8)
432 *phys_addr |= (u64)phys_addr_highbits << 32;
433 }
434
435 put_cpu();
436}
437
438
439static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
440 struct mvpp2_bm_pool *bm_pool, int buf_num)
441{
442 int i;
443
444 if (buf_num > bm_pool->buf_num) {
445 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
446 bm_pool->id, buf_num);
447 buf_num = bm_pool->buf_num;
448 }
449
450 for (i = 0; i < buf_num; i++) {
451 dma_addr_t buf_dma_addr;
452 phys_addr_t buf_phys_addr;
453 void *data;
454
455 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
456 &buf_dma_addr, &buf_phys_addr);
457
458 dma_unmap_single(dev, buf_dma_addr,
459 bm_pool->buf_size, DMA_FROM_DEVICE);
460
461 data = (void *)phys_to_virt(buf_phys_addr);
462 if (!data)
463 break;
464
465 mvpp2_frag_free(bm_pool, data);
466 }
467
468
469 bm_pool->buf_num -= i;
470}
471
472
473static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
474{
475 int buf_num = 0;
476
477 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
478 MVPP22_BM_POOL_PTRS_NUM_MASK;
479 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
480 MVPP2_BM_BPPI_PTR_NUM_MASK;
481
482
483 if (buf_num)
484 buf_num += 1;
485
486 return buf_num;
487}
488
489
490static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
491 struct mvpp2_bm_pool *bm_pool)
492{
493 int buf_num;
494 u32 val;
495
496 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
497 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
498
499
500 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
501 if (buf_num) {
502 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
503 bm_pool->id, bm_pool->buf_num);
504 return 0;
505 }
506
507 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
508 val |= MVPP2_BM_STOP_MASK;
509 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
510
511 dma_free_coherent(dev, bm_pool->size_bytes,
512 bm_pool->virt_addr,
513 bm_pool->dma_addr);
514 return 0;
515}
516
517static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
518{
519 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
520 struct mvpp2_bm_pool *bm_pool;
521
522 if (priv->percpu_pools)
523 poolnum = mvpp2_get_nrxqs(priv) * 2;
524
525
526 size = MVPP2_BM_POOL_SIZE_MAX;
527 for (i = 0; i < poolnum; i++) {
528 bm_pool = &priv->bm_pools[i];
529 bm_pool->id = i;
530 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
531 if (err)
532 goto err_unroll_pools;
533 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
534 }
535 return 0;
536
537err_unroll_pools:
538 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
539 for (i = i - 1; i >= 0; i--)
540 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
541 return err;
542}
543
544static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
545{
546 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
547
548 if (priv->percpu_pools)
549 poolnum = mvpp2_get_nrxqs(priv) * 2;
550
551 dev_info(dev, "using %d %s buffers\n", poolnum,
552 priv->percpu_pools ? "per-cpu" : "shared");
553
554 for (i = 0; i < poolnum; i++) {
555
556 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
557
558 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
559 }
560
561
562 priv->bm_pools = devm_kcalloc(dev, poolnum,
563 sizeof(*priv->bm_pools), GFP_KERNEL);
564 if (!priv->bm_pools)
565 return -ENOMEM;
566
567 err = mvpp2_bm_pools_init(dev, priv);
568 if (err < 0)
569 return err;
570 return 0;
571}
572
573static void mvpp2_setup_bm_pool(void)
574{
575
576 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
577 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
578
579
580 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
581 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
582
583
584 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
585 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
586}
587
588
589static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
590 int lrxq, int long_pool)
591{
592 u32 val, mask;
593 int prxq;
594
595
596 prxq = port->rxqs[lrxq]->id;
597
598 if (port->priv->hw_version == MVPP21)
599 mask = MVPP21_RXQ_POOL_LONG_MASK;
600 else
601 mask = MVPP22_RXQ_POOL_LONG_MASK;
602
603 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
604 val &= ~mask;
605 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
606 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
607}
608
609
610static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
611 int lrxq, int short_pool)
612{
613 u32 val, mask;
614 int prxq;
615
616
617 prxq = port->rxqs[lrxq]->id;
618
619 if (port->priv->hw_version == MVPP21)
620 mask = MVPP21_RXQ_POOL_SHORT_MASK;
621 else
622 mask = MVPP22_RXQ_POOL_SHORT_MASK;
623
624 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
625 val &= ~mask;
626 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
627 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
628}
629
630static void *mvpp2_buf_alloc(struct mvpp2_port *port,
631 struct mvpp2_bm_pool *bm_pool,
632 dma_addr_t *buf_dma_addr,
633 phys_addr_t *buf_phys_addr,
634 gfp_t gfp_mask)
635{
636 dma_addr_t dma_addr;
637 void *data;
638
639 data = mvpp2_frag_alloc(bm_pool);
640 if (!data)
641 return NULL;
642
643 dma_addr = dma_map_single(port->dev->dev.parent, data,
644 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
645 DMA_FROM_DEVICE);
646 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
647 mvpp2_frag_free(bm_pool, data);
648 return NULL;
649 }
650 *buf_dma_addr = dma_addr;
651 *buf_phys_addr = virt_to_phys(data);
652
653 return data;
654}
655
656
657static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
658 dma_addr_t buf_dma_addr,
659 phys_addr_t buf_phys_addr)
660{
661 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
662 unsigned long flags = 0;
663
664 if (test_bit(thread, &port->priv->lock_map))
665 spin_lock_irqsave(&port->bm_lock[thread], flags);
666
667 if (port->priv->hw_version == MVPP22) {
668 u32 val = 0;
669
670 if (sizeof(dma_addr_t) == 8)
671 val |= upper_32_bits(buf_dma_addr) &
672 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
673
674 if (sizeof(phys_addr_t) == 8)
675 val |= (upper_32_bits(buf_phys_addr)
676 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
677 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
678
679 mvpp2_thread_write_relaxed(port->priv, thread,
680 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
681 }
682
683
684
685
686
687
688 mvpp2_thread_write_relaxed(port->priv, thread,
689 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
690 mvpp2_thread_write_relaxed(port->priv, thread,
691 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
692
693 if (test_bit(thread, &port->priv->lock_map))
694 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
695
696 put_cpu();
697}
698
699
700static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
701 struct mvpp2_bm_pool *bm_pool, int buf_num)
702{
703 int i, buf_size, total_size;
704 dma_addr_t dma_addr;
705 phys_addr_t phys_addr;
706 void *buf;
707
708 if (port->priv->percpu_pools &&
709 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
710 netdev_err(port->dev,
711 "attempted to use jumbo frames with per-cpu pools");
712 return 0;
713 }
714
715 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
716 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
717
718 if (buf_num < 0 ||
719 (buf_num + bm_pool->buf_num > bm_pool->size)) {
720 netdev_err(port->dev,
721 "cannot allocate %d buffers for pool %d\n",
722 buf_num, bm_pool->id);
723 return 0;
724 }
725
726 for (i = 0; i < buf_num; i++) {
727 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
728 &phys_addr, GFP_KERNEL);
729 if (!buf)
730 break;
731
732 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
733 phys_addr);
734 }
735
736
737 bm_pool->buf_num += i;
738
739 netdev_dbg(port->dev,
740 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
741 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
742
743 netdev_dbg(port->dev,
744 "pool %d: %d of %d buffers added\n",
745 bm_pool->id, i, buf_num);
746 return i;
747}
748
749
750
751
752static struct mvpp2_bm_pool *
753mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
754{
755 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
756 int num;
757
758 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
759 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
760 netdev_err(port->dev, "Invalid pool %d\n", pool);
761 return NULL;
762 }
763
764
765
766
767 if (new_pool->pkt_size == 0) {
768 int pkts_num;
769
770
771
772
773 pkts_num = new_pool->buf_num;
774 if (pkts_num == 0) {
775 if (port->priv->percpu_pools) {
776 if (pool < port->nrxqs)
777 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
778 else
779 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
780 } else {
781 pkts_num = mvpp2_pools[pool].buf_num;
782 }
783 } else {
784 mvpp2_bm_bufs_free(port->dev->dev.parent,
785 port->priv, new_pool, pkts_num);
786 }
787
788 new_pool->pkt_size = pkt_size;
789 new_pool->frag_size =
790 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
791 MVPP2_SKB_SHINFO_SIZE;
792
793
794 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
795 if (num != pkts_num) {
796 WARN(1, "pool %d: %d of %d allocated\n",
797 new_pool->id, num, pkts_num);
798 return NULL;
799 }
800 }
801
802 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
803 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
804
805 return new_pool;
806}
807
808static struct mvpp2_bm_pool *
809mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
810 unsigned int pool, int pkt_size)
811{
812 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
813 int num;
814
815 if (pool > port->nrxqs * 2) {
816 netdev_err(port->dev, "Invalid pool %d\n", pool);
817 return NULL;
818 }
819
820
821
822
823 if (new_pool->pkt_size == 0) {
824 int pkts_num;
825
826
827
828
829 pkts_num = new_pool->buf_num;
830 if (pkts_num == 0)
831 pkts_num = mvpp2_pools[type].buf_num;
832 else
833 mvpp2_bm_bufs_free(port->dev->dev.parent,
834 port->priv, new_pool, pkts_num);
835
836 new_pool->pkt_size = pkt_size;
837 new_pool->frag_size =
838 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
839 MVPP2_SKB_SHINFO_SIZE;
840
841
842 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
843 if (num != pkts_num) {
844 WARN(1, "pool %d: %d of %d allocated\n",
845 new_pool->id, num, pkts_num);
846 return NULL;
847 }
848 }
849
850 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
851 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
852
853 return new_pool;
854}
855
856
857static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
858{
859 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
860 int rxq;
861
862
863
864
865
866 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
867 long_log_pool = MVPP2_BM_JUMBO;
868 short_log_pool = MVPP2_BM_LONG;
869 } else {
870 long_log_pool = MVPP2_BM_LONG;
871 short_log_pool = MVPP2_BM_SHORT;
872 }
873
874 if (!port->pool_long) {
875 port->pool_long =
876 mvpp2_bm_pool_use(port, long_log_pool,
877 mvpp2_pools[long_log_pool].pkt_size);
878 if (!port->pool_long)
879 return -ENOMEM;
880
881 port->pool_long->port_map |= BIT(port->id);
882
883 for (rxq = 0; rxq < port->nrxqs; rxq++)
884 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
885 }
886
887 if (!port->pool_short) {
888 port->pool_short =
889 mvpp2_bm_pool_use(port, short_log_pool,
890 mvpp2_pools[short_log_pool].pkt_size);
891 if (!port->pool_short)
892 return -ENOMEM;
893
894 port->pool_short->port_map |= BIT(port->id);
895
896 for (rxq = 0; rxq < port->nrxqs; rxq++)
897 mvpp2_rxq_short_pool_set(port, rxq,
898 port->pool_short->id);
899 }
900
901 return 0;
902}
903
904
905static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
906{
907 struct mvpp2_bm_pool *p;
908 int i;
909
910 for (i = 0; i < port->nrxqs; i++) {
911 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
912 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
913 if (!p)
914 return -ENOMEM;
915
916 port->priv->bm_pools[i].port_map |= BIT(port->id);
917 mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id);
918 }
919
920 for (i = 0; i < port->nrxqs; i++) {
921 p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
922 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
923 if (!p)
924 return -ENOMEM;
925
926 port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id);
927 mvpp2_rxq_long_pool_set(port, i,
928 port->priv->bm_pools[i + port->nrxqs].id);
929 }
930
931 port->pool_long = NULL;
932 port->pool_short = NULL;
933
934 return 0;
935}
936
937static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
938{
939 if (port->priv->percpu_pools)
940 return mvpp2_swf_bm_pool_init_percpu(port);
941 else
942 return mvpp2_swf_bm_pool_init_shared(port);
943}
944
945static void mvpp2_set_hw_csum(struct mvpp2_port *port,
946 enum mvpp2_bm_pool_log_num new_long_pool)
947{
948 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
949
950
951
952
953
954
955
956 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
957 port->dev->features &= ~csums;
958 port->dev->hw_features &= ~csums;
959 } else {
960 port->dev->features |= csums;
961 port->dev->hw_features |= csums;
962 }
963}
964
965static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
966{
967 struct mvpp2_port *port = netdev_priv(dev);
968 enum mvpp2_bm_pool_log_num new_long_pool;
969 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
970
971 if (port->priv->percpu_pools)
972 goto out_set;
973
974
975
976
977
978 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
979 new_long_pool = MVPP2_BM_JUMBO;
980 else
981 new_long_pool = MVPP2_BM_LONG;
982
983 if (new_long_pool != port->pool_long->id) {
984
985 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
986 port->pool_long->pkt_size);
987 port->pool_long->port_map &= ~BIT(port->id);
988 port->pool_long = NULL;
989
990 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
991 port->pool_short->pkt_size);
992 port->pool_short->port_map &= ~BIT(port->id);
993 port->pool_short = NULL;
994
995 port->pkt_size = pkt_size;
996
997
998 mvpp2_swf_bm_pool_init(port);
999
1000 mvpp2_set_hw_csum(port, new_long_pool);
1001 }
1002
1003out_set:
1004 dev->mtu = mtu;
1005 dev->wanted_features = dev->features;
1006
1007 netdev_update_features(dev);
1008 return 0;
1009}
1010
1011static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1012{
1013 int i, sw_thread_mask = 0;
1014
1015 for (i = 0; i < port->nqvecs; i++)
1016 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1017
1018 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1019 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1020}
1021
1022static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1023{
1024 int i, sw_thread_mask = 0;
1025
1026 for (i = 0; i < port->nqvecs; i++)
1027 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1028
1029 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1030 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1031}
1032
1033static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1034{
1035 struct mvpp2_port *port = qvec->port;
1036
1037 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1038 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1039}
1040
1041static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1042{
1043 struct mvpp2_port *port = qvec->port;
1044
1045 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1046 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1047}
1048
1049
1050
1051
1052
1053static void mvpp2_interrupts_mask(void *arg)
1054{
1055 struct mvpp2_port *port = arg;
1056
1057
1058 if (smp_processor_id() > port->priv->nthreads)
1059 return;
1060
1061 mvpp2_thread_write(port->priv,
1062 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1063 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1064}
1065
1066
1067
1068
1069
1070static void mvpp2_interrupts_unmask(void *arg)
1071{
1072 struct mvpp2_port *port = arg;
1073 u32 val;
1074
1075
1076 if (smp_processor_id() > port->priv->nthreads)
1077 return;
1078
1079 val = MVPP2_CAUSE_MISC_SUM_MASK |
1080 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1081 if (port->has_tx_irqs)
1082 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1083
1084 mvpp2_thread_write(port->priv,
1085 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1086 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1087}
1088
1089static void
1090mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1091{
1092 u32 val;
1093 int i;
1094
1095 if (port->priv->hw_version != MVPP22)
1096 return;
1097
1098 if (mask)
1099 val = 0;
1100 else
1101 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1102
1103 for (i = 0; i < port->nqvecs; i++) {
1104 struct mvpp2_queue_vector *v = port->qvecs + i;
1105
1106 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1107 continue;
1108
1109 mvpp2_thread_write(port->priv, v->sw_thread_id,
1110 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1111 }
1112}
1113
1114
1115static bool mvpp2_is_xlg(phy_interface_t interface)
1116{
1117 return interface == PHY_INTERFACE_MODE_10GBASER ||
1118 interface == PHY_INTERFACE_MODE_XAUI;
1119}
1120
1121static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1122{
1123 struct mvpp2 *priv = port->priv;
1124 u32 val;
1125
1126 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1127 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1128 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1129
1130 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1131 if (port->gop_id == 2)
1132 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1133 else if (port->gop_id == 3)
1134 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1135 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1136}
1137
1138static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1139{
1140 struct mvpp2 *priv = port->priv;
1141 u32 val;
1142
1143 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1144 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1145 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1146 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1147
1148 if (port->gop_id > 1) {
1149 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1150 if (port->gop_id == 2)
1151 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1152 else if (port->gop_id == 3)
1153 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1154 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1155 }
1156}
1157
1158static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1159{
1160 struct mvpp2 *priv = port->priv;
1161 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1162 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1163 u32 val;
1164
1165 val = readl(xpcs + MVPP22_XPCS_CFG0);
1166 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1167 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1168 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1169 writel(val, xpcs + MVPP22_XPCS_CFG0);
1170
1171 val = readl(mpcs + MVPP22_MPCS_CTRL);
1172 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1173 writel(val, mpcs + MVPP22_MPCS_CTRL);
1174
1175 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1176 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1177 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1178 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1179}
1180
1181static int mvpp22_gop_init(struct mvpp2_port *port)
1182{
1183 struct mvpp2 *priv = port->priv;
1184 u32 val;
1185
1186 if (!priv->sysctrl_base)
1187 return 0;
1188
1189 switch (port->phy_interface) {
1190 case PHY_INTERFACE_MODE_RGMII:
1191 case PHY_INTERFACE_MODE_RGMII_ID:
1192 case PHY_INTERFACE_MODE_RGMII_RXID:
1193 case PHY_INTERFACE_MODE_RGMII_TXID:
1194 if (port->gop_id == 0)
1195 goto invalid_conf;
1196 mvpp22_gop_init_rgmii(port);
1197 break;
1198 case PHY_INTERFACE_MODE_SGMII:
1199 case PHY_INTERFACE_MODE_1000BASEX:
1200 case PHY_INTERFACE_MODE_2500BASEX:
1201 mvpp22_gop_init_sgmii(port);
1202 break;
1203 case PHY_INTERFACE_MODE_10GBASER:
1204 if (port->gop_id != 0)
1205 goto invalid_conf;
1206 mvpp22_gop_init_10gkr(port);
1207 break;
1208 default:
1209 goto unsupported_conf;
1210 }
1211
1212 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1213 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1214 GENCONF_PORT_CTRL1_EN(port->gop_id);
1215 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1216
1217 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1218 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1219 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1220
1221 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1222 val |= GENCONF_SOFT_RESET1_GOP;
1223 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1224
1225unsupported_conf:
1226 return 0;
1227
1228invalid_conf:
1229 netdev_err(port->dev, "Invalid port configuration\n");
1230 return -EINVAL;
1231}
1232
1233static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1234{
1235 u32 val;
1236
1237 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1238 phy_interface_mode_is_8023z(port->phy_interface) ||
1239 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1240
1241 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1242 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1243 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1244 }
1245
1246 if (port->gop_id == 0) {
1247
1248 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1249 if (mvpp2_is_xlg(port->phy_interface))
1250 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1251 else
1252 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1253 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1254 }
1255}
1256
1257static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1258{
1259 u32 val;
1260
1261 if (port->gop_id == 0) {
1262 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1263 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1264 MVPP22_XLG_EXT_INT_MASK_GIG);
1265 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1266 }
1267
1268 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1269 phy_interface_mode_is_8023z(port->phy_interface) ||
1270 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1271 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1272 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1273 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1274 }
1275}
1276
1277static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1278{
1279 u32 val;
1280
1281 if (port->phylink ||
1282 phy_interface_mode_is_rgmii(port->phy_interface) ||
1283 phy_interface_mode_is_8023z(port->phy_interface) ||
1284 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1285 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1286 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1287 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1288 }
1289
1290 if (port->gop_id == 0) {
1291 val = readl(port->base + MVPP22_XLG_INT_MASK);
1292 val |= MVPP22_XLG_INT_MASK_LINK;
1293 writel(val, port->base + MVPP22_XLG_INT_MASK);
1294 }
1295
1296 mvpp22_gop_unmask_irq(port);
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309static int mvpp22_comphy_init(struct mvpp2_port *port)
1310{
1311 int ret;
1312
1313 if (!port->comphy)
1314 return 0;
1315
1316 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1317 port->phy_interface);
1318 if (ret)
1319 return ret;
1320
1321 return phy_power_on(port->comphy);
1322}
1323
1324static void mvpp2_port_enable(struct mvpp2_port *port)
1325{
1326 u32 val;
1327
1328
1329 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1330 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1331 val |= MVPP22_XLG_CTRL0_PORT_EN;
1332 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1333 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1334 } else {
1335 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1336 val |= MVPP2_GMAC_PORT_EN_MASK;
1337 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1338 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1339 }
1340}
1341
1342static void mvpp2_port_disable(struct mvpp2_port *port)
1343{
1344 u32 val;
1345
1346
1347 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1348 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1349 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1350 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1351 }
1352
1353 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1354 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1355 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1356}
1357
1358
1359static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1360{
1361 u32 val;
1362
1363 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1364 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1365 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1366}
1367
1368
1369static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1370 const struct phylink_link_state *state)
1371{
1372 u32 val;
1373
1374 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1375
1376 if (state->speed == 1000)
1377 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1378 else
1379 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1380
1381 if (phy_interface_mode_is_8023z(port->phy_interface) ||
1382 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
1383 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1384 else
1385 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1386
1387 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1388}
1389
1390struct mvpp2_ethtool_counter {
1391 unsigned int offset;
1392 const char string[ETH_GSTRING_LEN];
1393 bool reg_is_64b;
1394};
1395
1396static u64 mvpp2_read_count(struct mvpp2_port *port,
1397 const struct mvpp2_ethtool_counter *counter)
1398{
1399 u64 val;
1400
1401 val = readl(port->stats_base + counter->offset);
1402 if (counter->reg_is_64b)
1403 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1404
1405 return val;
1406}
1407
1408
1409
1410
1411
1412
1413static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1414{
1415 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1416 return mvpp2_read(priv, reg);
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1429 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1430 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1431 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1432 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1433 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1434 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1435 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1436 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1437 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1438 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1439 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1440 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1441 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1442 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1443 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1444 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1445 { MVPP2_MIB_FC_SENT, "fc_sent" },
1446 { MVPP2_MIB_FC_RCVD, "fc_received" },
1447 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1448 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1449 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1450 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1451 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1452 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1453 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1454 { MVPP2_MIB_COLLISION, "collision" },
1455 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1456};
1457
1458static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1459 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1460 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1461};
1462
1463static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1464 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1465 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1466 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1467 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1468 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1469 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1470 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1471 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1472 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1473};
1474
1475static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1476 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1477 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1478 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1479 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1480};
1481
1482#define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1483 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1484 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1485 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
1486
1487static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1488 u8 *data)
1489{
1490 struct mvpp2_port *port = netdev_priv(netdev);
1491 int i, q;
1492
1493 if (sset != ETH_SS_STATS)
1494 return;
1495
1496 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1497 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1498 ETH_GSTRING_LEN);
1499 data += ETH_GSTRING_LEN;
1500 }
1501
1502 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1503 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1504 ETH_GSTRING_LEN);
1505 data += ETH_GSTRING_LEN;
1506 }
1507
1508 for (q = 0; q < port->ntxqs; q++) {
1509 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1510 snprintf(data, ETH_GSTRING_LEN,
1511 mvpp2_ethtool_txq_regs[i].string, q);
1512 data += ETH_GSTRING_LEN;
1513 }
1514 }
1515
1516 for (q = 0; q < port->nrxqs; q++) {
1517 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1518 snprintf(data, ETH_GSTRING_LEN,
1519 mvpp2_ethtool_rxq_regs[i].string,
1520 q);
1521 data += ETH_GSTRING_LEN;
1522 }
1523 }
1524}
1525
1526static void mvpp2_read_stats(struct mvpp2_port *port)
1527{
1528 u64 *pstats;
1529 int i, q;
1530
1531 pstats = port->ethtool_stats;
1532
1533 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1534 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1535
1536 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1537 *pstats++ += mvpp2_read(port->priv,
1538 mvpp2_ethtool_port_regs[i].offset +
1539 4 * port->id);
1540
1541 for (q = 0; q < port->ntxqs; q++)
1542 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1543 *pstats++ += mvpp2_read_index(port->priv,
1544 MVPP22_CTRS_TX_CTR(port->id, i),
1545 mvpp2_ethtool_txq_regs[i].offset);
1546
1547
1548
1549
1550 for (q = 0; q < port->nrxqs; q++)
1551 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1552 *pstats++ += mvpp2_read_index(port->priv,
1553 port->first_rxq + i,
1554 mvpp2_ethtool_rxq_regs[i].offset);
1555}
1556
1557static void mvpp2_gather_hw_statistics(struct work_struct *work)
1558{
1559 struct delayed_work *del_work = to_delayed_work(work);
1560 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1561 stats_work);
1562
1563 mutex_lock(&port->gather_stats_lock);
1564
1565 mvpp2_read_stats(port);
1566
1567
1568
1569
1570 cancel_delayed_work(&port->stats_work);
1571 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1572 MVPP2_MIB_COUNTERS_STATS_DELAY);
1573
1574 mutex_unlock(&port->gather_stats_lock);
1575}
1576
1577static void mvpp2_ethtool_get_stats(struct net_device *dev,
1578 struct ethtool_stats *stats, u64 *data)
1579{
1580 struct mvpp2_port *port = netdev_priv(dev);
1581
1582
1583
1584
1585 mvpp2_gather_hw_statistics(&port->stats_work.work);
1586
1587 mutex_lock(&port->gather_stats_lock);
1588 memcpy(data, port->ethtool_stats,
1589 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1590 mutex_unlock(&port->gather_stats_lock);
1591}
1592
1593static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1594{
1595 struct mvpp2_port *port = netdev_priv(dev);
1596
1597 if (sset == ETH_SS_STATS)
1598 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1599
1600 return -EOPNOTSUPP;
1601}
1602
1603static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1604{
1605 u32 val;
1606
1607 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1608 MVPP2_GMAC_PORT_RESET_MASK;
1609 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1610
1611 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1612 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1613 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1614 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1615 }
1616}
1617
1618static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1619{
1620 struct mvpp2 *priv = port->priv;
1621 void __iomem *mpcs, *xpcs;
1622 u32 val;
1623
1624 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1625 return;
1626
1627 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1628 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1629
1630 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1631 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1632 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1633 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1634
1635 val = readl(xpcs + MVPP22_XPCS_CFG0);
1636 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1637}
1638
1639static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1640{
1641 struct mvpp2 *priv = port->priv;
1642 void __iomem *mpcs, *xpcs;
1643 u32 val;
1644
1645 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1646 return;
1647
1648 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1649 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1650
1651 switch (port->phy_interface) {
1652 case PHY_INTERFACE_MODE_10GBASER:
1653 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1654 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1655 MAC_CLK_RESET_SD_TX;
1656 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1657 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1658 break;
1659 case PHY_INTERFACE_MODE_XAUI:
1660 case PHY_INTERFACE_MODE_RXAUI:
1661 val = readl(xpcs + MVPP22_XPCS_CFG0);
1662 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1663 break;
1664 default:
1665 break;
1666 }
1667}
1668
1669
1670static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1671{
1672 u32 val;
1673
1674 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1675 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1676 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1677 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1678 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1679}
1680
1681
1682static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1683{
1684 u32 val;
1685
1686 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1687 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1688 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1689 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1690 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1691}
1692
1693
1694static void mvpp2_defaults_set(struct mvpp2_port *port)
1695{
1696 int tx_port_num, val, queue, lrxq;
1697
1698 if (port->priv->hw_version == MVPP21) {
1699
1700 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1701 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1702
1703 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1704 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1705 }
1706
1707
1708 tx_port_num = mvpp2_egress_port(port);
1709 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1710 tx_port_num);
1711 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1712
1713
1714 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1715
1716
1717 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1718 mvpp2_write(port->priv,
1719 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1720
1721
1722
1723
1724 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1725 port->priv->tclk / USEC_PER_SEC);
1726 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1727 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1728 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1729 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1730 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1731 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1732 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1733
1734
1735 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1736 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1737 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1738
1739
1740 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1741 queue = port->rxqs[lrxq]->id;
1742 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1743 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1744 MVPP2_SNOOP_BUF_HDR_MASK;
1745 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1746 }
1747
1748
1749 mvpp2_interrupts_disable(port);
1750}
1751
1752
1753static void mvpp2_ingress_enable(struct mvpp2_port *port)
1754{
1755 u32 val;
1756 int lrxq, queue;
1757
1758 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1759 queue = port->rxqs[lrxq]->id;
1760 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1761 val &= ~MVPP2_RXQ_DISABLE_MASK;
1762 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1763 }
1764}
1765
1766static void mvpp2_ingress_disable(struct mvpp2_port *port)
1767{
1768 u32 val;
1769 int lrxq, queue;
1770
1771 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1772 queue = port->rxqs[lrxq]->id;
1773 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1774 val |= MVPP2_RXQ_DISABLE_MASK;
1775 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1776 }
1777}
1778
1779
1780
1781
1782static void mvpp2_egress_enable(struct mvpp2_port *port)
1783{
1784 u32 qmap;
1785 int queue;
1786 int tx_port_num = mvpp2_egress_port(port);
1787
1788
1789 qmap = 0;
1790 for (queue = 0; queue < port->ntxqs; queue++) {
1791 struct mvpp2_tx_queue *txq = port->txqs[queue];
1792
1793 if (txq->descs)
1794 qmap |= (1 << queue);
1795 }
1796
1797 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1798 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
1799}
1800
1801
1802
1803
1804static void mvpp2_egress_disable(struct mvpp2_port *port)
1805{
1806 u32 reg_data;
1807 int delay;
1808 int tx_port_num = mvpp2_egress_port(port);
1809
1810
1811 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1812 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
1813 MVPP2_TXP_SCHED_ENQ_MASK;
1814 if (reg_data != 0)
1815 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
1816 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
1817
1818
1819 delay = 0;
1820 do {
1821 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
1822 netdev_warn(port->dev,
1823 "Tx stop timed out, status=0x%08x\n",
1824 reg_data);
1825 break;
1826 }
1827 mdelay(1);
1828 delay++;
1829
1830
1831
1832
1833 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
1834 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
1835}
1836
1837
1838
1839
1840static inline int
1841mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
1842{
1843 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
1844
1845 return val & MVPP2_RXQ_OCCUPIED_MASK;
1846}
1847
1848
1849
1850
1851static inline void
1852mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
1853 int used_count, int free_count)
1854{
1855
1856
1857
1858 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
1859
1860 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
1861}
1862
1863
1864static inline struct mvpp2_rx_desc *
1865mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
1866{
1867 int rx_desc = rxq->next_desc_to_proc;
1868
1869 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
1870 prefetch(rxq->descs + rxq->next_desc_to_proc);
1871 return rxq->descs + rx_desc;
1872}
1873
1874
1875static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
1876 int prxq, int offset)
1877{
1878 u32 val;
1879
1880
1881 offset = offset >> 5;
1882
1883 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
1884 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
1885
1886
1887 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
1888 MVPP2_RXQ_PACKET_OFFSET_MASK);
1889
1890 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
1891}
1892
1893
1894
1895
1896static struct mvpp2_tx_desc *
1897mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
1898{
1899 int tx_desc = txq->next_desc_to_proc;
1900
1901 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
1902 return txq->descs + tx_desc;
1903}
1904
1905
1906
1907
1908
1909
1910static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
1911{
1912
1913 mvpp2_thread_write(port->priv,
1914 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1915 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
1916}
1917
1918
1919
1920
1921
1922
1923
1924static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
1925 struct mvpp2_tx_queue *aggr_txq, int num)
1926{
1927 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
1928
1929 unsigned int thread =
1930 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1931 u32 val = mvpp2_read_relaxed(port->priv,
1932 MVPP2_AGGR_TXQ_STATUS_REG(thread));
1933
1934 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
1935
1936 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
1937 return -ENOMEM;
1938 }
1939 return 0;
1940}
1941
1942
1943
1944
1945
1946
1947
1948static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
1949 struct mvpp2_tx_queue *txq, int num)
1950{
1951 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1952 struct mvpp2 *priv = port->priv;
1953 u32 val;
1954
1955 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
1956 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
1957
1958 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
1959
1960 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
1961}
1962
1963
1964
1965
1966static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
1967 struct mvpp2_tx_queue *txq,
1968 struct mvpp2_txq_pcpu *txq_pcpu,
1969 int num)
1970{
1971 int req, desc_count;
1972 unsigned int thread;
1973
1974 if (txq_pcpu->reserved_num >= num)
1975 return 0;
1976
1977
1978
1979
1980
1981 desc_count = 0;
1982
1983 for (thread = 0; thread < port->priv->nthreads; thread++) {
1984 struct mvpp2_txq_pcpu *txq_pcpu_aux;
1985
1986 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
1987 desc_count += txq_pcpu_aux->count;
1988 desc_count += txq_pcpu_aux->reserved_num;
1989 }
1990
1991 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
1992 desc_count += req;
1993
1994 if (desc_count >
1995 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
1996 return -ENOMEM;
1997
1998 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
1999
2000
2001 if (txq_pcpu->reserved_num < num)
2002 return -ENOMEM;
2003 return 0;
2004}
2005
2006
2007
2008
2009static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2010{
2011 if (txq->next_desc_to_proc == 0)
2012 txq->next_desc_to_proc = txq->last_desc - 1;
2013 else
2014 txq->next_desc_to_proc--;
2015}
2016
2017
2018static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2019 int ip_hdr_len, int l4_proto)
2020{
2021 u32 command;
2022
2023
2024
2025
2026 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2027 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2028 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2029
2030 if (l3_proto == htons(ETH_P_IP)) {
2031 command &= ~MVPP2_TXD_IP_CSUM_DISABLE;
2032 command &= ~MVPP2_TXD_L3_IP6;
2033 } else {
2034 command |= MVPP2_TXD_L3_IP6;
2035 }
2036
2037 if (l4_proto == IPPROTO_TCP) {
2038 command &= ~MVPP2_TXD_L4_UDP;
2039 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2040 } else if (l4_proto == IPPROTO_UDP) {
2041 command |= MVPP2_TXD_L4_UDP;
2042 command &= ~MVPP2_TXD_L4_CSUM_FRAG;
2043 } else {
2044 command |= MVPP2_TXD_L4_CSUM_NOT;
2045 }
2046
2047 return command;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2059 struct mvpp2_tx_queue *txq)
2060{
2061 u32 val;
2062
2063
2064 val = mvpp2_thread_read_relaxed(port->priv,
2065 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2066 MVPP2_TXQ_SENT_REG(txq->id));
2067
2068 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2069 MVPP2_TRANSMITTED_COUNT_OFFSET;
2070}
2071
2072
2073
2074
2075static void mvpp2_txq_sent_counter_clear(void *arg)
2076{
2077 struct mvpp2_port *port = arg;
2078 int queue;
2079
2080
2081 if (smp_processor_id() > port->priv->nthreads)
2082 return;
2083
2084 for (queue = 0; queue < port->ntxqs; queue++) {
2085 int id = port->txqs[queue]->id;
2086
2087 mvpp2_thread_read(port->priv,
2088 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2089 MVPP2_TXQ_SENT_REG(id));
2090 }
2091}
2092
2093
2094static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2095{
2096 u32 val, size, mtu;
2097 int txq, tx_port_num;
2098
2099 mtu = port->pkt_size * 8;
2100 if (mtu > MVPP2_TXP_MTU_MAX)
2101 mtu = MVPP2_TXP_MTU_MAX;
2102
2103
2104 mtu = 3 * mtu;
2105
2106
2107 tx_port_num = mvpp2_egress_port(port);
2108 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2109
2110
2111 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2112 val &= ~MVPP2_TXP_MTU_MAX;
2113 val |= mtu;
2114 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2115
2116
2117 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2118 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2119 if (size < mtu) {
2120 size = mtu;
2121 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2122 val |= size;
2123 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2124 }
2125
2126 for (txq = 0; txq < port->ntxqs; txq++) {
2127 val = mvpp2_read(port->priv,
2128 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2129 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2130
2131 if (size < mtu) {
2132 size = mtu;
2133 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2134 val |= size;
2135 mvpp2_write(port->priv,
2136 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2137 val);
2138 }
2139 }
2140}
2141
2142
2143
2144
2145static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2146 struct mvpp2_rx_queue *rxq)
2147{
2148 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2149
2150 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2151 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2152
2153 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2154 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2155 rxq->pkts_coal);
2156
2157 put_cpu();
2158}
2159
2160
2161static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2162 struct mvpp2_tx_queue *txq)
2163{
2164 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2165 u32 val;
2166
2167 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2168 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2169
2170 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2171 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2172 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2173
2174 put_cpu();
2175}
2176
2177static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2178{
2179 u64 tmp = (u64)clk_hz * usec;
2180
2181 do_div(tmp, USEC_PER_SEC);
2182
2183 return tmp > U32_MAX ? U32_MAX : tmp;
2184}
2185
2186static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2187{
2188 u64 tmp = (u64)cycles * USEC_PER_SEC;
2189
2190 do_div(tmp, clk_hz);
2191
2192 return tmp > U32_MAX ? U32_MAX : tmp;
2193}
2194
2195
2196static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2197 struct mvpp2_rx_queue *rxq)
2198{
2199 unsigned long freq = port->priv->tclk;
2200 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2201
2202 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2203 rxq->time_coal =
2204 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2205
2206
2207 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2208 }
2209
2210 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2211}
2212
2213static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2214{
2215 unsigned long freq = port->priv->tclk;
2216 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2217
2218 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2219 port->tx_time_coal =
2220 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2221
2222
2223 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2224 }
2225
2226 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2227}
2228
2229
2230static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2231 struct mvpp2_tx_queue *txq,
2232 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2233{
2234 int i;
2235
2236 for (i = 0; i < num; i++) {
2237 struct mvpp2_txq_pcpu_buf *tx_buf =
2238 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2239
2240 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
2241 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2242 tx_buf->size, DMA_TO_DEVICE);
2243 if (tx_buf->skb)
2244 dev_kfree_skb_any(tx_buf->skb);
2245
2246 mvpp2_txq_inc_get(txq_pcpu);
2247 }
2248}
2249
2250static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2251 u32 cause)
2252{
2253 int queue = fls(cause) - 1;
2254
2255 return port->rxqs[queue];
2256}
2257
2258static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2259 u32 cause)
2260{
2261 int queue = fls(cause) - 1;
2262
2263 return port->txqs[queue];
2264}
2265
2266
2267static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2268 struct mvpp2_txq_pcpu *txq_pcpu)
2269{
2270 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2271 int tx_done;
2272
2273 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2274 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2275
2276 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2277 if (!tx_done)
2278 return;
2279 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2280
2281 txq_pcpu->count -= tx_done;
2282
2283 if (netif_tx_queue_stopped(nq))
2284 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2285 netif_tx_wake_queue(nq);
2286}
2287
2288static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2289 unsigned int thread)
2290{
2291 struct mvpp2_tx_queue *txq;
2292 struct mvpp2_txq_pcpu *txq_pcpu;
2293 unsigned int tx_todo = 0;
2294
2295 while (cause) {
2296 txq = mvpp2_get_tx_queue(port, cause);
2297 if (!txq)
2298 break;
2299
2300 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2301
2302 if (txq_pcpu->count) {
2303 mvpp2_txq_done(port, txq, txq_pcpu);
2304 tx_todo += txq_pcpu->count;
2305 }
2306
2307 cause &= ~(1 << txq->log_id);
2308 }
2309 return tx_todo;
2310}
2311
2312
2313
2314
2315static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2316 struct mvpp2_tx_queue *aggr_txq,
2317 unsigned int thread, struct mvpp2 *priv)
2318{
2319 u32 txq_dma;
2320
2321
2322 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2323 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2324 &aggr_txq->descs_dma, GFP_KERNEL);
2325 if (!aggr_txq->descs)
2326 return -ENOMEM;
2327
2328 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2329
2330
2331 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2332 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2333
2334
2335
2336
2337 if (priv->hw_version == MVPP21)
2338 txq_dma = aggr_txq->descs_dma;
2339 else
2340 txq_dma = aggr_txq->descs_dma >>
2341 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2342
2343 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2344 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2345 MVPP2_AGGR_TXQ_SIZE);
2346
2347 return 0;
2348}
2349
2350
2351static int mvpp2_rxq_init(struct mvpp2_port *port,
2352 struct mvpp2_rx_queue *rxq)
2353
2354{
2355 unsigned int thread;
2356 u32 rxq_dma;
2357
2358 rxq->size = port->rx_ring_size;
2359
2360
2361 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2362 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2363 &rxq->descs_dma, GFP_KERNEL);
2364 if (!rxq->descs)
2365 return -ENOMEM;
2366
2367 rxq->last_desc = rxq->size - 1;
2368
2369
2370 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2371
2372
2373 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2374 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2375 if (port->priv->hw_version == MVPP21)
2376 rxq_dma = rxq->descs_dma;
2377 else
2378 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2379 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2380 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2381 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2382 put_cpu();
2383
2384
2385 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2386
2387
2388 mvpp2_rx_pkts_coal_set(port, rxq);
2389 mvpp2_rx_time_coal_set(port, rxq);
2390
2391
2392 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2393
2394 return 0;
2395}
2396
2397
2398static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2399 struct mvpp2_rx_queue *rxq)
2400{
2401 int rx_received, i;
2402
2403 rx_received = mvpp2_rxq_received(port, rxq->id);
2404 if (!rx_received)
2405 return;
2406
2407 for (i = 0; i < rx_received; i++) {
2408 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2409 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2410 int pool;
2411
2412 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2413 MVPP2_RXD_BM_POOL_ID_OFFS;
2414
2415 mvpp2_bm_pool_put(port, pool,
2416 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2417 mvpp2_rxdesc_cookie_get(port, rx_desc));
2418 }
2419 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2420}
2421
2422
2423static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2424 struct mvpp2_rx_queue *rxq)
2425{
2426 unsigned int thread;
2427
2428 mvpp2_rxq_drop_pkts(port, rxq);
2429
2430 if (rxq->descs)
2431 dma_free_coherent(port->dev->dev.parent,
2432 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2433 rxq->descs,
2434 rxq->descs_dma);
2435
2436 rxq->descs = NULL;
2437 rxq->last_desc = 0;
2438 rxq->next_desc_to_proc = 0;
2439 rxq->descs_dma = 0;
2440
2441
2442
2443
2444 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2445 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2446 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2447 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2448 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2449 put_cpu();
2450}
2451
2452
2453static int mvpp2_txq_init(struct mvpp2_port *port,
2454 struct mvpp2_tx_queue *txq)
2455{
2456 u32 val;
2457 unsigned int thread;
2458 int desc, desc_per_txq, tx_port_num;
2459 struct mvpp2_txq_pcpu *txq_pcpu;
2460
2461 txq->size = port->tx_ring_size;
2462
2463
2464 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2465 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2466 &txq->descs_dma, GFP_KERNEL);
2467 if (!txq->descs)
2468 return -ENOMEM;
2469
2470 txq->last_desc = txq->size - 1;
2471
2472
2473 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2474 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2475 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2476 txq->descs_dma);
2477 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2478 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2479 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2480 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2481 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2482 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2483 val &= ~MVPP2_TXQ_PENDING_MASK;
2484 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2485
2486
2487
2488
2489
2490
2491 desc_per_txq = 16;
2492 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2493 (txq->log_id * desc_per_txq);
2494
2495 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2496 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2497 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2498 put_cpu();
2499
2500
2501 tx_port_num = mvpp2_egress_port(port);
2502 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2503
2504 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2505 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2506 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2507 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2508 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2509
2510 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2511 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2512 val);
2513
2514 for (thread = 0; thread < port->priv->nthreads; thread++) {
2515 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2516 txq_pcpu->size = txq->size;
2517 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2518 sizeof(*txq_pcpu->buffs),
2519 GFP_KERNEL);
2520 if (!txq_pcpu->buffs)
2521 return -ENOMEM;
2522
2523 txq_pcpu->count = 0;
2524 txq_pcpu->reserved_num = 0;
2525 txq_pcpu->txq_put_index = 0;
2526 txq_pcpu->txq_get_index = 0;
2527 txq_pcpu->tso_headers = NULL;
2528
2529 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2530 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2531
2532 txq_pcpu->tso_headers =
2533 dma_alloc_coherent(port->dev->dev.parent,
2534 txq_pcpu->size * TSO_HEADER_SIZE,
2535 &txq_pcpu->tso_headers_dma,
2536 GFP_KERNEL);
2537 if (!txq_pcpu->tso_headers)
2538 return -ENOMEM;
2539 }
2540
2541 return 0;
2542}
2543
2544
2545static void mvpp2_txq_deinit(struct mvpp2_port *port,
2546 struct mvpp2_tx_queue *txq)
2547{
2548 struct mvpp2_txq_pcpu *txq_pcpu;
2549 unsigned int thread;
2550
2551 for (thread = 0; thread < port->priv->nthreads; thread++) {
2552 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2553 kfree(txq_pcpu->buffs);
2554
2555 if (txq_pcpu->tso_headers)
2556 dma_free_coherent(port->dev->dev.parent,
2557 txq_pcpu->size * TSO_HEADER_SIZE,
2558 txq_pcpu->tso_headers,
2559 txq_pcpu->tso_headers_dma);
2560
2561 txq_pcpu->tso_headers = NULL;
2562 }
2563
2564 if (txq->descs)
2565 dma_free_coherent(port->dev->dev.parent,
2566 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2567 txq->descs, txq->descs_dma);
2568
2569 txq->descs = NULL;
2570 txq->last_desc = 0;
2571 txq->next_desc_to_proc = 0;
2572 txq->descs_dma = 0;
2573
2574
2575 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2576
2577
2578 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2579 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2580 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2581 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2582 put_cpu();
2583}
2584
2585
2586static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2587{
2588 struct mvpp2_txq_pcpu *txq_pcpu;
2589 int delay, pending;
2590 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2591 u32 val;
2592
2593 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2594 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2595 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2596 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2597
2598
2599
2600
2601 delay = 0;
2602 do {
2603 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2604 netdev_warn(port->dev,
2605 "port %d: cleaning queue %d timed out\n",
2606 port->id, txq->log_id);
2607 break;
2608 }
2609 mdelay(1);
2610 delay++;
2611
2612 pending = mvpp2_thread_read(port->priv, thread,
2613 MVPP2_TXQ_PENDING_REG);
2614 pending &= MVPP2_TXQ_PENDING_MASK;
2615 } while (pending);
2616
2617 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2618 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2619 put_cpu();
2620
2621 for (thread = 0; thread < port->priv->nthreads; thread++) {
2622 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2623
2624
2625 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2626
2627
2628 txq_pcpu->count = 0;
2629 txq_pcpu->txq_put_index = 0;
2630 txq_pcpu->txq_get_index = 0;
2631 }
2632}
2633
2634
2635static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2636{
2637 struct mvpp2_tx_queue *txq;
2638 int queue;
2639 u32 val;
2640
2641 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2642
2643
2644 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2645 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2646
2647 for (queue = 0; queue < port->ntxqs; queue++) {
2648 txq = port->txqs[queue];
2649 mvpp2_txq_clean(port, txq);
2650 mvpp2_txq_deinit(port, txq);
2651 }
2652
2653 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2654
2655 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2656 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2657}
2658
2659
2660static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2661{
2662 int queue;
2663
2664 for (queue = 0; queue < port->nrxqs; queue++)
2665 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2666}
2667
2668
2669static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2670{
2671 int queue, err;
2672
2673 for (queue = 0; queue < port->nrxqs; queue++) {
2674 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2675 if (err)
2676 goto err_cleanup;
2677 }
2678 return 0;
2679
2680err_cleanup:
2681 mvpp2_cleanup_rxqs(port);
2682 return err;
2683}
2684
2685
2686static int mvpp2_setup_txqs(struct mvpp2_port *port)
2687{
2688 struct mvpp2_tx_queue *txq;
2689 int queue, err, cpu;
2690
2691 for (queue = 0; queue < port->ntxqs; queue++) {
2692 txq = port->txqs[queue];
2693 err = mvpp2_txq_init(port, txq);
2694 if (err)
2695 goto err_cleanup;
2696
2697
2698 cpu = queue % num_present_cpus();
2699 netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
2700 }
2701
2702 if (port->has_tx_irqs) {
2703 mvpp2_tx_time_coal_set(port);
2704 for (queue = 0; queue < port->ntxqs; queue++) {
2705 txq = port->txqs[queue];
2706 mvpp2_tx_pkts_coal_set(port, txq);
2707 }
2708 }
2709
2710 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2711 return 0;
2712
2713err_cleanup:
2714 mvpp2_cleanup_txqs(port);
2715 return err;
2716}
2717
2718
2719static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2720{
2721 struct mvpp2_queue_vector *qv = dev_id;
2722
2723 mvpp2_qvec_interrupt_disable(qv);
2724
2725 napi_schedule(&qv->napi);
2726
2727 return IRQ_HANDLED;
2728}
2729
2730
2731static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2732{
2733 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2734 struct net_device *dev = port->dev;
2735 bool event = false, link = false;
2736 u32 val;
2737
2738 mvpp22_gop_mask_irq(port);
2739
2740 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
2741 val = readl(port->base + MVPP22_XLG_INT_STAT);
2742 if (val & MVPP22_XLG_INT_STAT_LINK) {
2743 event = true;
2744 val = readl(port->base + MVPP22_XLG_STATUS);
2745 if (val & MVPP22_XLG_STATUS_LINK_UP)
2746 link = true;
2747 }
2748 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
2749 phy_interface_mode_is_8023z(port->phy_interface) ||
2750 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
2751 val = readl(port->base + MVPP22_GMAC_INT_STAT);
2752 if (val & MVPP22_GMAC_INT_STAT_LINK) {
2753 event = true;
2754 val = readl(port->base + MVPP2_GMAC_STATUS0);
2755 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
2756 link = true;
2757 }
2758 }
2759
2760 if (port->phylink) {
2761 phylink_mac_change(port->phylink, link);
2762 goto handled;
2763 }
2764
2765 if (!netif_running(dev) || !event)
2766 goto handled;
2767
2768 if (link) {
2769 mvpp2_interrupts_enable(port);
2770
2771 mvpp2_egress_enable(port);
2772 mvpp2_ingress_enable(port);
2773 netif_carrier_on(dev);
2774 netif_tx_wake_all_queues(dev);
2775 } else {
2776 netif_tx_stop_all_queues(dev);
2777 netif_carrier_off(dev);
2778 mvpp2_ingress_disable(port);
2779 mvpp2_egress_disable(port);
2780
2781 mvpp2_interrupts_disable(port);
2782 }
2783
2784handled:
2785 mvpp22_gop_unmask_irq(port);
2786 return IRQ_HANDLED;
2787}
2788
2789static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
2790{
2791 struct net_device *dev;
2792 struct mvpp2_port *port;
2793 struct mvpp2_port_pcpu *port_pcpu;
2794 unsigned int tx_todo, cause;
2795
2796 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
2797 dev = port_pcpu->dev;
2798
2799 if (!netif_running(dev))
2800 return HRTIMER_NORESTART;
2801
2802 port_pcpu->timer_scheduled = false;
2803 port = netdev_priv(dev);
2804
2805
2806 cause = (1 << port->ntxqs) - 1;
2807 tx_todo = mvpp2_tx_done(port, cause,
2808 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
2809
2810
2811 if (tx_todo && !port_pcpu->timer_scheduled) {
2812 port_pcpu->timer_scheduled = true;
2813 hrtimer_forward_now(&port_pcpu->tx_done_timer,
2814 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
2815
2816 return HRTIMER_RESTART;
2817 }
2818 return HRTIMER_NORESTART;
2819}
2820
2821
2822
2823
2824static void mvpp2_rx_error(struct mvpp2_port *port,
2825 struct mvpp2_rx_desc *rx_desc)
2826{
2827 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2828 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
2829 char *err_str = NULL;
2830
2831 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
2832 case MVPP2_RXD_ERR_CRC:
2833 err_str = "crc";
2834 break;
2835 case MVPP2_RXD_ERR_OVERRUN:
2836 err_str = "overrun";
2837 break;
2838 case MVPP2_RXD_ERR_RESOURCE:
2839 err_str = "resource";
2840 break;
2841 }
2842 if (err_str && net_ratelimit())
2843 netdev_err(port->dev,
2844 "bad rx status %08x (%s error), size=%zu\n",
2845 status, err_str, sz);
2846}
2847
2848
2849static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
2850 struct sk_buff *skb)
2851{
2852 if (((status & MVPP2_RXD_L3_IP4) &&
2853 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
2854 (status & MVPP2_RXD_L3_IP6))
2855 if (((status & MVPP2_RXD_L4_UDP) ||
2856 (status & MVPP2_RXD_L4_TCP)) &&
2857 (status & MVPP2_RXD_L4_CSUM_OK)) {
2858 skb->csum = 0;
2859 skb->ip_summed = CHECKSUM_UNNECESSARY;
2860 return;
2861 }
2862
2863 skb->ip_summed = CHECKSUM_NONE;
2864}
2865
2866
2867static int mvpp2_rx_refill(struct mvpp2_port *port,
2868 struct mvpp2_bm_pool *bm_pool, int pool)
2869{
2870 dma_addr_t dma_addr;
2871 phys_addr_t phys_addr;
2872 void *buf;
2873
2874 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
2875 GFP_ATOMIC);
2876 if (!buf)
2877 return -ENOMEM;
2878
2879 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2880
2881 return 0;
2882}
2883
2884
2885static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2886{
2887 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2888 int ip_hdr_len = 0;
2889 u8 l4_proto;
2890 __be16 l3_proto = vlan_get_protocol(skb);
2891
2892 if (l3_proto == htons(ETH_P_IP)) {
2893 struct iphdr *ip4h = ip_hdr(skb);
2894
2895
2896 ip_hdr_len = ip4h->ihl;
2897 l4_proto = ip4h->protocol;
2898 } else if (l3_proto == htons(ETH_P_IPV6)) {
2899 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2900
2901
2902 if (skb_network_header_len(skb) > 0)
2903 ip_hdr_len = (skb_network_header_len(skb) >> 2);
2904 l4_proto = ip6h->nexthdr;
2905 } else {
2906 return MVPP2_TXD_L4_CSUM_NOT;
2907 }
2908
2909 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2910 l3_proto, ip_hdr_len, l4_proto);
2911 }
2912
2913 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
2914}
2915
2916
2917static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
2918 int rx_todo, struct mvpp2_rx_queue *rxq)
2919{
2920 struct net_device *dev = port->dev;
2921 int rx_received;
2922 int rx_done = 0;
2923 u32 rcvd_pkts = 0;
2924 u32 rcvd_bytes = 0;
2925
2926
2927 rx_received = mvpp2_rxq_received(port, rxq->id);
2928 if (rx_todo > rx_received)
2929 rx_todo = rx_received;
2930
2931 while (rx_done < rx_todo) {
2932 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2933 struct mvpp2_bm_pool *bm_pool;
2934 struct sk_buff *skb;
2935 unsigned int frag_size;
2936 dma_addr_t dma_addr;
2937 phys_addr_t phys_addr;
2938 u32 rx_status;
2939 int pool, rx_bytes, err;
2940 void *data;
2941
2942 rx_done++;
2943 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
2944 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
2945 rx_bytes -= MVPP2_MH_SIZE;
2946 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
2947 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
2948 data = (void *)phys_to_virt(phys_addr);
2949
2950 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2951 MVPP2_RXD_BM_POOL_ID_OFFS;
2952 bm_pool = &port->priv->bm_pools[pool];
2953
2954
2955
2956
2957
2958
2959 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
2960 goto err_drop_frame;
2961
2962 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
2963 rx_bytes + MVPP2_MH_SIZE,
2964 DMA_FROM_DEVICE);
2965 prefetch(data);
2966
2967 if (bm_pool->frag_size > PAGE_SIZE)
2968 frag_size = 0;
2969 else
2970 frag_size = bm_pool->frag_size;
2971
2972 skb = build_skb(data, frag_size);
2973 if (!skb) {
2974 netdev_warn(port->dev, "skb build failed\n");
2975 goto err_drop_frame;
2976 }
2977
2978 err = mvpp2_rx_refill(port, bm_pool, pool);
2979 if (err) {
2980 netdev_err(port->dev, "failed to refill BM pools\n");
2981 goto err_drop_frame;
2982 }
2983
2984 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
2985 bm_pool->buf_size, DMA_FROM_DEVICE,
2986 DMA_ATTR_SKIP_CPU_SYNC);
2987
2988 rcvd_pkts++;
2989 rcvd_bytes += rx_bytes;
2990
2991 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
2992 skb_put(skb, rx_bytes);
2993 skb->protocol = eth_type_trans(skb, dev);
2994 mvpp2_rx_csum(port, rx_status, skb);
2995
2996 napi_gro_receive(napi, skb);
2997 continue;
2998
2999err_drop_frame:
3000 dev->stats.rx_errors++;
3001 mvpp2_rx_error(port, rx_desc);
3002
3003 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3004 }
3005
3006 if (rcvd_pkts) {
3007 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3008
3009 u64_stats_update_begin(&stats->syncp);
3010 stats->rx_packets += rcvd_pkts;
3011 stats->rx_bytes += rcvd_bytes;
3012 u64_stats_update_end(&stats->syncp);
3013 }
3014
3015
3016 wmb();
3017 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3018
3019 return rx_todo;
3020}
3021
3022static inline void
3023tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3024 struct mvpp2_tx_desc *desc)
3025{
3026 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3027 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3028
3029 dma_addr_t buf_dma_addr =
3030 mvpp2_txdesc_dma_addr_get(port, desc);
3031 size_t buf_sz =
3032 mvpp2_txdesc_size_get(port, desc);
3033 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3034 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3035 buf_sz, DMA_TO_DEVICE);
3036 mvpp2_txq_desc_put(txq);
3037}
3038
3039
3040static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3041 struct mvpp2_tx_queue *aggr_txq,
3042 struct mvpp2_tx_queue *txq)
3043{
3044 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3045 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3046 struct mvpp2_tx_desc *tx_desc;
3047 int i;
3048 dma_addr_t buf_dma_addr;
3049
3050 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3051 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3052 void *addr = skb_frag_address(frag);
3053
3054 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3055 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3056 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3057
3058 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3059 skb_frag_size(frag),
3060 DMA_TO_DEVICE);
3061 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3062 mvpp2_txq_desc_put(txq);
3063 goto cleanup;
3064 }
3065
3066 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3067
3068 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3069
3070 mvpp2_txdesc_cmd_set(port, tx_desc,
3071 MVPP2_TXD_L_DESC);
3072 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3073 } else {
3074
3075 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3076 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3077 }
3078 }
3079
3080 return 0;
3081cleanup:
3082
3083
3084
3085 for (i = i - 1; i >= 0; i--) {
3086 tx_desc = txq->descs + i;
3087 tx_desc_unmap_put(port, txq, tx_desc);
3088 }
3089
3090 return -ENOMEM;
3091}
3092
3093static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3094 struct net_device *dev,
3095 struct mvpp2_tx_queue *txq,
3096 struct mvpp2_tx_queue *aggr_txq,
3097 struct mvpp2_txq_pcpu *txq_pcpu,
3098 int hdr_sz)
3099{
3100 struct mvpp2_port *port = netdev_priv(dev);
3101 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3102 dma_addr_t addr;
3103
3104 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3105 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3106
3107 addr = txq_pcpu->tso_headers_dma +
3108 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3109 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3110
3111 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3112 MVPP2_TXD_F_DESC |
3113 MVPP2_TXD_PADDING_DISABLE);
3114 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3115}
3116
3117static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3118 struct net_device *dev, struct tso_t *tso,
3119 struct mvpp2_tx_queue *txq,
3120 struct mvpp2_tx_queue *aggr_txq,
3121 struct mvpp2_txq_pcpu *txq_pcpu,
3122 int sz, bool left, bool last)
3123{
3124 struct mvpp2_port *port = netdev_priv(dev);
3125 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3126 dma_addr_t buf_dma_addr;
3127
3128 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3129 mvpp2_txdesc_size_set(port, tx_desc, sz);
3130
3131 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3132 DMA_TO_DEVICE);
3133 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3134 mvpp2_txq_desc_put(txq);
3135 return -ENOMEM;
3136 }
3137
3138 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3139
3140 if (!left) {
3141 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3142 if (last) {
3143 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3144 return 0;
3145 }
3146 } else {
3147 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3148 }
3149
3150 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3151 return 0;
3152}
3153
3154static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3155 struct mvpp2_tx_queue *txq,
3156 struct mvpp2_tx_queue *aggr_txq,
3157 struct mvpp2_txq_pcpu *txq_pcpu)
3158{
3159 struct mvpp2_port *port = netdev_priv(dev);
3160 struct tso_t tso;
3161 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
3162 int i, len, descs = 0;
3163
3164
3165 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3166 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3167 tso_count_descs(skb)))
3168 return 0;
3169
3170 tso_start(skb, &tso);
3171 len = skb->len - hdr_sz;
3172 while (len > 0) {
3173 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3174 char *hdr = txq_pcpu->tso_headers +
3175 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3176
3177 len -= left;
3178 descs++;
3179
3180 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3181 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3182
3183 while (left > 0) {
3184 int sz = min_t(int, tso.size, left);
3185 left -= sz;
3186 descs++;
3187
3188 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3189 txq_pcpu, sz, left, len == 0))
3190 goto release;
3191 tso_build_data(skb, &tso, sz);
3192 }
3193 }
3194
3195 return descs;
3196
3197release:
3198 for (i = descs - 1; i >= 0; i--) {
3199 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3200 tx_desc_unmap_put(port, txq, tx_desc);
3201 }
3202 return 0;
3203}
3204
3205
3206static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3207{
3208 struct mvpp2_port *port = netdev_priv(dev);
3209 struct mvpp2_tx_queue *txq, *aggr_txq;
3210 struct mvpp2_txq_pcpu *txq_pcpu;
3211 struct mvpp2_tx_desc *tx_desc;
3212 dma_addr_t buf_dma_addr;
3213 unsigned long flags = 0;
3214 unsigned int thread;
3215 int frags = 0;
3216 u16 txq_id;
3217 u32 tx_cmd;
3218
3219 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3220
3221 txq_id = skb_get_queue_mapping(skb);
3222 txq = port->txqs[txq_id];
3223 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3224 aggr_txq = &port->priv->aggr_txqs[thread];
3225
3226 if (test_bit(thread, &port->priv->lock_map))
3227 spin_lock_irqsave(&port->tx_lock[thread], flags);
3228
3229 if (skb_is_gso(skb)) {
3230 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3231 goto out;
3232 }
3233 frags = skb_shinfo(skb)->nr_frags + 1;
3234
3235
3236 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3237 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3238 frags = 0;
3239 goto out;
3240 }
3241
3242
3243 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3244 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3245 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3246
3247 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3248 skb_headlen(skb), DMA_TO_DEVICE);
3249 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3250 mvpp2_txq_desc_put(txq);
3251 frags = 0;
3252 goto out;
3253 }
3254
3255 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3256
3257 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3258
3259 if (frags == 1) {
3260
3261 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3262 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3263 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3264 } else {
3265
3266 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
3267 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3268 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3269
3270
3271 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
3272 tx_desc_unmap_put(port, txq, tx_desc);
3273 frags = 0;
3274 }
3275 }
3276
3277out:
3278 if (frags > 0) {
3279 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
3280 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
3281
3282 txq_pcpu->reserved_num -= frags;
3283 txq_pcpu->count += frags;
3284 aggr_txq->count += frags;
3285
3286
3287 wmb();
3288 mvpp2_aggr_txq_pend_desc_add(port, frags);
3289
3290 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3291 netif_tx_stop_queue(nq);
3292
3293 u64_stats_update_begin(&stats->syncp);
3294 stats->tx_packets++;
3295 stats->tx_bytes += skb->len;
3296 u64_stats_update_end(&stats->syncp);
3297 } else {
3298 dev->stats.tx_dropped++;
3299 dev_kfree_skb_any(skb);
3300 }
3301
3302
3303 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3304 mvpp2_txq_done(port, txq, txq_pcpu);
3305
3306
3307 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3308 txq_pcpu->count > 0) {
3309 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
3310
3311 if (!port_pcpu->timer_scheduled) {
3312 port_pcpu->timer_scheduled = true;
3313 hrtimer_start(&port_pcpu->tx_done_timer,
3314 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
3315 HRTIMER_MODE_REL_PINNED_SOFT);
3316 }
3317 }
3318
3319 if (test_bit(thread, &port->priv->lock_map))
3320 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
3321
3322 return NETDEV_TX_OK;
3323}
3324
3325static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3326{
3327 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3328 netdev_err(dev, "FCS error\n");
3329 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3330 netdev_err(dev, "rx fifo overrun error\n");
3331 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3332 netdev_err(dev, "tx fifo underrun error\n");
3333}
3334
3335static int mvpp2_poll(struct napi_struct *napi, int budget)
3336{
3337 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3338 int rx_done = 0;
3339 struct mvpp2_port *port = netdev_priv(napi->dev);
3340 struct mvpp2_queue_vector *qv;
3341 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3342
3343 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
3356 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3357
3358 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3359 if (cause_misc) {
3360 mvpp2_cause_error(port->dev, cause_misc);
3361
3362
3363 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3364 mvpp2_thread_write(port->priv, thread,
3365 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3366 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3367 }
3368
3369 if (port->has_tx_irqs) {
3370 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3371 if (cause_tx) {
3372 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3373 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3374 }
3375 }
3376
3377
3378 cause_rx = cause_rx_tx &
3379 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3380 cause_rx <<= qv->first_rxq;
3381 cause_rx |= qv->pending_cause_rx;
3382 while (cause_rx && budget > 0) {
3383 int count;
3384 struct mvpp2_rx_queue *rxq;
3385
3386 rxq = mvpp2_get_rx_queue(port, cause_rx);
3387 if (!rxq)
3388 break;
3389
3390 count = mvpp2_rx(port, napi, budget, rxq);
3391 rx_done += count;
3392 budget -= count;
3393 if (budget > 0) {
3394
3395
3396
3397
3398 cause_rx &= ~(1 << rxq->logic_rxq);
3399 }
3400 }
3401
3402 if (budget > 0) {
3403 cause_rx = 0;
3404 napi_complete_done(napi, rx_done);
3405
3406 mvpp2_qvec_interrupt_enable(qv);
3407 }
3408 qv->pending_cause_rx = cause_rx;
3409 return rx_done;
3410}
3411
3412static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3413{
3414 u32 ctrl3;
3415
3416
3417 mvpp2_mac_reset_assert(port);
3418
3419
3420 mvpp22_pcs_reset_assert(port);
3421
3422
3423 mvpp22_comphy_init(port);
3424
3425
3426 mvpp22_gop_init(port);
3427
3428 mvpp22_pcs_reset_deassert(port);
3429
3430
3431 if (port->gop_id == 0) {
3432 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3433 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3434
3435 if (mvpp2_is_xlg(port->phy_interface))
3436 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
3437 else
3438 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3439
3440 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
3441 }
3442
3443 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
3444 mvpp2_xlg_max_rx_size_set(port);
3445 else
3446 mvpp2_gmac_max_rx_size_set(port);
3447}
3448
3449
3450static void mvpp2_start_dev(struct mvpp2_port *port)
3451{
3452 int i;
3453
3454 mvpp2_txp_max_tx_size_set(port);
3455
3456 for (i = 0; i < port->nqvecs; i++)
3457 napi_enable(&port->qvecs[i].napi);
3458
3459
3460 mvpp2_interrupts_enable(port);
3461
3462 if (port->priv->hw_version == MVPP22)
3463 mvpp22_mode_reconfigure(port);
3464
3465 if (port->phylink) {
3466 phylink_start(port->phylink);
3467 } else {
3468
3469
3470
3471
3472 struct phylink_link_state state = {
3473 .interface = port->phy_interface,
3474 };
3475 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
3476 mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
3477 port->phy_interface, NULL);
3478 }
3479
3480 netif_tx_start_all_queues(port->dev);
3481}
3482
3483
3484static void mvpp2_stop_dev(struct mvpp2_port *port)
3485{
3486 int i;
3487
3488
3489 mvpp2_interrupts_disable(port);
3490
3491 for (i = 0; i < port->nqvecs; i++)
3492 napi_disable(&port->qvecs[i].napi);
3493
3494 if (port->phylink)
3495 phylink_stop(port->phylink);
3496 phy_power_off(port->comphy);
3497}
3498
3499static int mvpp2_check_ringparam_valid(struct net_device *dev,
3500 struct ethtool_ringparam *ring)
3501{
3502 u16 new_rx_pending = ring->rx_pending;
3503 u16 new_tx_pending = ring->tx_pending;
3504
3505 if (ring->rx_pending == 0 || ring->tx_pending == 0)
3506 return -EINVAL;
3507
3508 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
3509 new_rx_pending = MVPP2_MAX_RXD_MAX;
3510 else if (!IS_ALIGNED(ring->rx_pending, 16))
3511 new_rx_pending = ALIGN(ring->rx_pending, 16);
3512
3513 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
3514 new_tx_pending = MVPP2_MAX_TXD_MAX;
3515 else if (!IS_ALIGNED(ring->tx_pending, 32))
3516 new_tx_pending = ALIGN(ring->tx_pending, 32);
3517
3518
3519
3520
3521 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
3522 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
3523
3524 if (ring->rx_pending != new_rx_pending) {
3525 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
3526 ring->rx_pending, new_rx_pending);
3527 ring->rx_pending = new_rx_pending;
3528 }
3529
3530 if (ring->tx_pending != new_tx_pending) {
3531 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
3532 ring->tx_pending, new_tx_pending);
3533 ring->tx_pending = new_tx_pending;
3534 }
3535
3536 return 0;
3537}
3538
3539static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
3540{
3541 u32 mac_addr_l, mac_addr_m, mac_addr_h;
3542
3543 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3544 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
3545 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
3546 addr[0] = (mac_addr_h >> 24) & 0xFF;
3547 addr[1] = (mac_addr_h >> 16) & 0xFF;
3548 addr[2] = (mac_addr_h >> 8) & 0xFF;
3549 addr[3] = mac_addr_h & 0xFF;
3550 addr[4] = mac_addr_m & 0xFF;
3551 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
3552}
3553
3554static int mvpp2_irqs_init(struct mvpp2_port *port)
3555{
3556 int err, i;
3557
3558 for (i = 0; i < port->nqvecs; i++) {
3559 struct mvpp2_queue_vector *qv = port->qvecs + i;
3560
3561 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3562 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
3563 if (!qv->mask) {
3564 err = -ENOMEM;
3565 goto err;
3566 }
3567
3568 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
3569 }
3570
3571 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
3572 if (err)
3573 goto err;
3574
3575 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3576 unsigned int cpu;
3577
3578 for_each_present_cpu(cpu) {
3579 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
3580 qv->sw_thread_id)
3581 cpumask_set_cpu(cpu, qv->mask);
3582 }
3583
3584 irq_set_affinity_hint(qv->irq, qv->mask);
3585 }
3586 }
3587
3588 return 0;
3589err:
3590 for (i = 0; i < port->nqvecs; i++) {
3591 struct mvpp2_queue_vector *qv = port->qvecs + i;
3592
3593 irq_set_affinity_hint(qv->irq, NULL);
3594 kfree(qv->mask);
3595 qv->mask = NULL;
3596 free_irq(qv->irq, qv);
3597 }
3598
3599 return err;
3600}
3601
3602static void mvpp2_irqs_deinit(struct mvpp2_port *port)
3603{
3604 int i;
3605
3606 for (i = 0; i < port->nqvecs; i++) {
3607 struct mvpp2_queue_vector *qv = port->qvecs + i;
3608
3609 irq_set_affinity_hint(qv->irq, NULL);
3610 kfree(qv->mask);
3611 qv->mask = NULL;
3612 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
3613 free_irq(qv->irq, qv);
3614 }
3615}
3616
3617static bool mvpp22_rss_is_supported(void)
3618{
3619 return queue_mode == MVPP2_QDIST_MULTI_MODE;
3620}
3621
3622static int mvpp2_open(struct net_device *dev)
3623{
3624 struct mvpp2_port *port = netdev_priv(dev);
3625 struct mvpp2 *priv = port->priv;
3626 unsigned char mac_bcast[ETH_ALEN] = {
3627 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3628 bool valid = false;
3629 int err;
3630
3631 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
3632 if (err) {
3633 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3634 return err;
3635 }
3636 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
3637 if (err) {
3638 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
3639 return err;
3640 }
3641 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
3642 if (err) {
3643 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
3644 return err;
3645 }
3646 err = mvpp2_prs_def_flow(port);
3647 if (err) {
3648 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3649 return err;
3650 }
3651
3652
3653 err = mvpp2_setup_rxqs(port);
3654 if (err) {
3655 netdev_err(port->dev, "cannot allocate Rx queues\n");
3656 return err;
3657 }
3658
3659 err = mvpp2_setup_txqs(port);
3660 if (err) {
3661 netdev_err(port->dev, "cannot allocate Tx queues\n");
3662 goto err_cleanup_rxqs;
3663 }
3664
3665 err = mvpp2_irqs_init(port);
3666 if (err) {
3667 netdev_err(port->dev, "cannot init IRQs\n");
3668 goto err_cleanup_txqs;
3669 }
3670
3671
3672 if (port->of_node) {
3673 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
3674 if (err) {
3675 netdev_err(port->dev, "could not attach PHY (%d)\n",
3676 err);
3677 goto err_free_irq;
3678 }
3679
3680 valid = true;
3681 }
3682
3683 if (priv->hw_version == MVPP22 && port->link_irq) {
3684 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
3685 dev->name, port);
3686 if (err) {
3687 netdev_err(port->dev, "cannot request link IRQ %d\n",
3688 port->link_irq);
3689 goto err_free_irq;
3690 }
3691
3692 mvpp22_gop_setup_irq(port);
3693
3694
3695 netif_carrier_off(port->dev);
3696
3697 valid = true;
3698 } else {
3699 port->link_irq = 0;
3700 }
3701
3702 if (!valid) {
3703 netdev_err(port->dev,
3704 "invalid configuration: no dt or link IRQ");
3705 goto err_free_irq;
3706 }
3707
3708
3709 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
3710 mvpp2_shared_interrupt_mask_unmask(port, false);
3711
3712 mvpp2_start_dev(port);
3713
3714
3715 queue_delayed_work(priv->stats_queue, &port->stats_work,
3716 MVPP2_MIB_COUNTERS_STATS_DELAY);
3717
3718 return 0;
3719
3720err_free_irq:
3721 mvpp2_irqs_deinit(port);
3722err_cleanup_txqs:
3723 mvpp2_cleanup_txqs(port);
3724err_cleanup_rxqs:
3725 mvpp2_cleanup_rxqs(port);
3726 return err;
3727}
3728
3729static int mvpp2_stop(struct net_device *dev)
3730{
3731 struct mvpp2_port *port = netdev_priv(dev);
3732 struct mvpp2_port_pcpu *port_pcpu;
3733 unsigned int thread;
3734
3735 mvpp2_stop_dev(port);
3736
3737
3738 on_each_cpu(mvpp2_interrupts_mask, port, 1);
3739 mvpp2_shared_interrupt_mask_unmask(port, true);
3740
3741 if (port->phylink)
3742 phylink_disconnect_phy(port->phylink);
3743 if (port->link_irq)
3744 free_irq(port->link_irq, port);
3745
3746 mvpp2_irqs_deinit(port);
3747 if (!port->has_tx_irqs) {
3748 for (thread = 0; thread < port->priv->nthreads; thread++) {
3749 port_pcpu = per_cpu_ptr(port->pcpu, thread);
3750
3751 hrtimer_cancel(&port_pcpu->tx_done_timer);
3752 port_pcpu->timer_scheduled = false;
3753 }
3754 }
3755 mvpp2_cleanup_rxqs(port);
3756 mvpp2_cleanup_txqs(port);
3757
3758 cancel_delayed_work_sync(&port->stats_work);
3759
3760 mvpp2_mac_reset_assert(port);
3761 mvpp22_pcs_reset_assert(port);
3762
3763 return 0;
3764}
3765
3766static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
3767 struct netdev_hw_addr_list *list)
3768{
3769 struct netdev_hw_addr *ha;
3770 int ret;
3771
3772 netdev_hw_addr_list_for_each(ha, list) {
3773 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
3774 if (ret)
3775 return ret;
3776 }
3777
3778 return 0;
3779}
3780
3781static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
3782{
3783 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3784 mvpp2_prs_vid_enable_filtering(port);
3785 else
3786 mvpp2_prs_vid_disable_filtering(port);
3787
3788 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3789 MVPP2_PRS_L2_UNI_CAST, enable);
3790
3791 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3792 MVPP2_PRS_L2_MULTI_CAST, enable);
3793}
3794
3795static void mvpp2_set_rx_mode(struct net_device *dev)
3796{
3797 struct mvpp2_port *port = netdev_priv(dev);
3798
3799
3800 mvpp2_prs_mac_del_all(port);
3801
3802 if (dev->flags & IFF_PROMISC) {
3803 mvpp2_set_rx_promisc(port, true);
3804 return;
3805 }
3806
3807 mvpp2_set_rx_promisc(port, false);
3808
3809 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
3810 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
3811 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3812 MVPP2_PRS_L2_UNI_CAST, true);
3813
3814 if (dev->flags & IFF_ALLMULTI) {
3815 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3816 MVPP2_PRS_L2_MULTI_CAST, true);
3817 return;
3818 }
3819
3820 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
3821 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
3822 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3823 MVPP2_PRS_L2_MULTI_CAST, true);
3824}
3825
3826static int mvpp2_set_mac_address(struct net_device *dev, void *p)
3827{
3828 const struct sockaddr *addr = p;
3829 int err;
3830
3831 if (!is_valid_ether_addr(addr->sa_data))
3832 return -EADDRNOTAVAIL;
3833
3834 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
3835 if (err) {
3836
3837 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
3838 netdev_err(dev, "failed to change MAC address\n");
3839 }
3840 return err;
3841}
3842
3843
3844
3845
3846static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
3847{
3848 int numbufs = MVPP2_BM_POOLS_NUM, i;
3849 struct mvpp2_port *port = NULL;
3850 bool status[MVPP2_MAX_PORTS];
3851
3852 for (i = 0; i < priv->port_count; i++) {
3853 port = priv->port_list[i];
3854 status[i] = netif_running(port->dev);
3855 if (status[i])
3856 mvpp2_stop(port->dev);
3857 }
3858
3859
3860 if (priv->percpu_pools)
3861 numbufs = port->nrxqs * 2;
3862
3863 for (i = 0; i < numbufs; i++)
3864 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
3865
3866 devm_kfree(port->dev->dev.parent, priv->bm_pools);
3867 priv->percpu_pools = percpu;
3868 mvpp2_bm_init(port->dev->dev.parent, priv);
3869
3870 for (i = 0; i < priv->port_count; i++) {
3871 port = priv->port_list[i];
3872 mvpp2_swf_bm_pool_init(port);
3873 if (status[i])
3874 mvpp2_open(port->dev);
3875 }
3876
3877 return 0;
3878}
3879
3880static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3881{
3882 struct mvpp2_port *port = netdev_priv(dev);
3883 bool running = netif_running(dev);
3884 struct mvpp2 *priv = port->priv;
3885 int err;
3886
3887 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
3888 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
3889 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
3890 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3891 }
3892
3893 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
3894 if (priv->percpu_pools) {
3895 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
3896 mvpp2_bm_switch_buffers(priv, false);
3897 }
3898 } else {
3899 bool jumbo = false;
3900 int i;
3901
3902 for (i = 0; i < priv->port_count; i++)
3903 if (priv->port_list[i] != port &&
3904 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
3905 MVPP2_BM_LONG_PKT_SIZE) {
3906 jumbo = true;
3907 break;
3908 }
3909
3910
3911 if (!jumbo) {
3912 dev_info(port->dev->dev.parent,
3913 "all ports have a low MTU, switching to per-cpu buffers");
3914 mvpp2_bm_switch_buffers(priv, true);
3915 }
3916 }
3917
3918 if (running)
3919 mvpp2_stop_dev(port);
3920
3921 err = mvpp2_bm_update_mtu(dev, mtu);
3922 if (err) {
3923 netdev_err(dev, "failed to change MTU\n");
3924
3925 mvpp2_bm_update_mtu(dev, dev->mtu);
3926 } else {
3927 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3928 }
3929
3930 if (running) {
3931 mvpp2_start_dev(port);
3932 mvpp2_egress_enable(port);
3933 mvpp2_ingress_enable(port);
3934 }
3935
3936 return err;
3937}
3938
3939static void
3940mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
3941{
3942 struct mvpp2_port *port = netdev_priv(dev);
3943 unsigned int start;
3944 unsigned int cpu;
3945
3946 for_each_possible_cpu(cpu) {
3947 struct mvpp2_pcpu_stats *cpu_stats;
3948 u64 rx_packets;
3949 u64 rx_bytes;
3950 u64 tx_packets;
3951 u64 tx_bytes;
3952
3953 cpu_stats = per_cpu_ptr(port->stats, cpu);
3954 do {
3955 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3956 rx_packets = cpu_stats->rx_packets;
3957 rx_bytes = cpu_stats->rx_bytes;
3958 tx_packets = cpu_stats->tx_packets;
3959 tx_bytes = cpu_stats->tx_bytes;
3960 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3961
3962 stats->rx_packets += rx_packets;
3963 stats->rx_bytes += rx_bytes;
3964 stats->tx_packets += tx_packets;
3965 stats->tx_bytes += tx_bytes;
3966 }
3967
3968 stats->rx_errors = dev->stats.rx_errors;
3969 stats->rx_dropped = dev->stats.rx_dropped;
3970 stats->tx_dropped = dev->stats.tx_dropped;
3971}
3972
3973static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3974{
3975 struct mvpp2_port *port = netdev_priv(dev);
3976
3977 if (!port->phylink)
3978 return -ENOTSUPP;
3979
3980 return phylink_mii_ioctl(port->phylink, ifr, cmd);
3981}
3982
3983static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3984{
3985 struct mvpp2_port *port = netdev_priv(dev);
3986 int ret;
3987
3988 ret = mvpp2_prs_vid_entry_add(port, vid);
3989 if (ret)
3990 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3991 MVPP2_PRS_VLAN_FILT_MAX - 1);
3992 return ret;
3993}
3994
3995static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3996{
3997 struct mvpp2_port *port = netdev_priv(dev);
3998
3999 mvpp2_prs_vid_entry_remove(port, vid);
4000 return 0;
4001}
4002
4003static int mvpp2_set_features(struct net_device *dev,
4004 netdev_features_t features)
4005{
4006 netdev_features_t changed = dev->features ^ features;
4007 struct mvpp2_port *port = netdev_priv(dev);
4008
4009 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4010 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4011 mvpp2_prs_vid_enable_filtering(port);
4012 } else {
4013
4014
4015
4016 mvpp2_prs_vid_remove_all(port);
4017
4018 mvpp2_prs_vid_disable_filtering(port);
4019 }
4020 }
4021
4022 if (changed & NETIF_F_RXHASH) {
4023 if (features & NETIF_F_RXHASH)
4024 mvpp22_port_rss_enable(port);
4025 else
4026 mvpp22_port_rss_disable(port);
4027 }
4028
4029 return 0;
4030}
4031
4032
4033
4034static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4035{
4036 struct mvpp2_port *port = netdev_priv(dev);
4037
4038 if (!port->phylink)
4039 return -ENOTSUPP;
4040
4041 return phylink_ethtool_nway_reset(port->phylink);
4042}
4043
4044
4045static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4046 struct ethtool_coalesce *c)
4047{
4048 struct mvpp2_port *port = netdev_priv(dev);
4049 int queue;
4050
4051 for (queue = 0; queue < port->nrxqs; queue++) {
4052 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4053
4054 rxq->time_coal = c->rx_coalesce_usecs;
4055 rxq->pkts_coal = c->rx_max_coalesced_frames;
4056 mvpp2_rx_pkts_coal_set(port, rxq);
4057 mvpp2_rx_time_coal_set(port, rxq);
4058 }
4059
4060 if (port->has_tx_irqs) {
4061 port->tx_time_coal = c->tx_coalesce_usecs;
4062 mvpp2_tx_time_coal_set(port);
4063 }
4064
4065 for (queue = 0; queue < port->ntxqs; queue++) {
4066 struct mvpp2_tx_queue *txq = port->txqs[queue];
4067
4068 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4069
4070 if (port->has_tx_irqs)
4071 mvpp2_tx_pkts_coal_set(port, txq);
4072 }
4073
4074 return 0;
4075}
4076
4077
4078static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
4079 struct ethtool_coalesce *c)
4080{
4081 struct mvpp2_port *port = netdev_priv(dev);
4082
4083 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
4084 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
4085 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
4086 c->tx_coalesce_usecs = port->tx_time_coal;
4087 return 0;
4088}
4089
4090static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
4091 struct ethtool_drvinfo *drvinfo)
4092{
4093 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
4094 sizeof(drvinfo->driver));
4095 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
4096 sizeof(drvinfo->version));
4097 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4098 sizeof(drvinfo->bus_info));
4099}
4100
4101static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
4102 struct ethtool_ringparam *ring)
4103{
4104 struct mvpp2_port *port = netdev_priv(dev);
4105
4106 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
4107 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
4108 ring->rx_pending = port->rx_ring_size;
4109 ring->tx_pending = port->tx_ring_size;
4110}
4111
4112static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
4113 struct ethtool_ringparam *ring)
4114{
4115 struct mvpp2_port *port = netdev_priv(dev);
4116 u16 prev_rx_ring_size = port->rx_ring_size;
4117 u16 prev_tx_ring_size = port->tx_ring_size;
4118 int err;
4119
4120 err = mvpp2_check_ringparam_valid(dev, ring);
4121 if (err)
4122 return err;
4123
4124 if (!netif_running(dev)) {
4125 port->rx_ring_size = ring->rx_pending;
4126 port->tx_ring_size = ring->tx_pending;
4127 return 0;
4128 }
4129
4130
4131
4132
4133 mvpp2_stop_dev(port);
4134 mvpp2_cleanup_rxqs(port);
4135 mvpp2_cleanup_txqs(port);
4136
4137 port->rx_ring_size = ring->rx_pending;
4138 port->tx_ring_size = ring->tx_pending;
4139
4140 err = mvpp2_setup_rxqs(port);
4141 if (err) {
4142
4143 port->rx_ring_size = prev_rx_ring_size;
4144 ring->rx_pending = prev_rx_ring_size;
4145 err = mvpp2_setup_rxqs(port);
4146 if (err)
4147 goto err_out;
4148 }
4149 err = mvpp2_setup_txqs(port);
4150 if (err) {
4151
4152 port->tx_ring_size = prev_tx_ring_size;
4153 ring->tx_pending = prev_tx_ring_size;
4154 err = mvpp2_setup_txqs(port);
4155 if (err)
4156 goto err_clean_rxqs;
4157 }
4158
4159 mvpp2_start_dev(port);
4160 mvpp2_egress_enable(port);
4161 mvpp2_ingress_enable(port);
4162
4163 return 0;
4164
4165err_clean_rxqs:
4166 mvpp2_cleanup_rxqs(port);
4167err_out:
4168 netdev_err(dev, "failed to change ring parameters");
4169 return err;
4170}
4171
4172static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
4173 struct ethtool_pauseparam *pause)
4174{
4175 struct mvpp2_port *port = netdev_priv(dev);
4176
4177 if (!port->phylink)
4178 return;
4179
4180 phylink_ethtool_get_pauseparam(port->phylink, pause);
4181}
4182
4183static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
4184 struct ethtool_pauseparam *pause)
4185{
4186 struct mvpp2_port *port = netdev_priv(dev);
4187
4188 if (!port->phylink)
4189 return -ENOTSUPP;
4190
4191 return phylink_ethtool_set_pauseparam(port->phylink, pause);
4192}
4193
4194static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
4195 struct ethtool_link_ksettings *cmd)
4196{
4197 struct mvpp2_port *port = netdev_priv(dev);
4198
4199 if (!port->phylink)
4200 return -ENOTSUPP;
4201
4202 return phylink_ethtool_ksettings_get(port->phylink, cmd);
4203}
4204
4205static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
4206 const struct ethtool_link_ksettings *cmd)
4207{
4208 struct mvpp2_port *port = netdev_priv(dev);
4209
4210 if (!port->phylink)
4211 return -ENOTSUPP;
4212
4213 return phylink_ethtool_ksettings_set(port->phylink, cmd);
4214}
4215
4216static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
4217 struct ethtool_rxnfc *info, u32 *rules)
4218{
4219 struct mvpp2_port *port = netdev_priv(dev);
4220 int ret = 0, i, loc = 0;
4221
4222 if (!mvpp22_rss_is_supported())
4223 return -EOPNOTSUPP;
4224
4225 switch (info->cmd) {
4226 case ETHTOOL_GRXFH:
4227 ret = mvpp2_ethtool_rxfh_get(port, info);
4228 break;
4229 case ETHTOOL_GRXRINGS:
4230 info->data = port->nrxqs;
4231 break;
4232 case ETHTOOL_GRXCLSRLCNT:
4233 info->rule_cnt = port->n_rfs_rules;
4234 break;
4235 case ETHTOOL_GRXCLSRULE:
4236 ret = mvpp2_ethtool_cls_rule_get(port, info);
4237 break;
4238 case ETHTOOL_GRXCLSRLALL:
4239 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
4240 if (port->rfs_rules[i])
4241 rules[loc++] = i;
4242 }
4243 break;
4244 default:
4245 return -ENOTSUPP;
4246 }
4247
4248 return ret;
4249}
4250
4251static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
4252 struct ethtool_rxnfc *info)
4253{
4254 struct mvpp2_port *port = netdev_priv(dev);
4255 int ret = 0;
4256
4257 if (!mvpp22_rss_is_supported())
4258 return -EOPNOTSUPP;
4259
4260 switch (info->cmd) {
4261 case ETHTOOL_SRXFH:
4262 ret = mvpp2_ethtool_rxfh_set(port, info);
4263 break;
4264 case ETHTOOL_SRXCLSRLINS:
4265 ret = mvpp2_ethtool_cls_rule_ins(port, info);
4266 break;
4267 case ETHTOOL_SRXCLSRLDEL:
4268 ret = mvpp2_ethtool_cls_rule_del(port, info);
4269 break;
4270 default:
4271 return -EOPNOTSUPP;
4272 }
4273 return ret;
4274}
4275
4276static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
4277{
4278 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
4279}
4280
4281static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4282 u8 *hfunc)
4283{
4284 struct mvpp2_port *port = netdev_priv(dev);
4285 int ret = 0;
4286
4287 if (!mvpp22_rss_is_supported())
4288 return -EOPNOTSUPP;
4289
4290 if (indir)
4291 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
4292
4293 if (hfunc)
4294 *hfunc = ETH_RSS_HASH_CRC32;
4295
4296 return ret;
4297}
4298
4299static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4300 const u8 *key, const u8 hfunc)
4301{
4302 struct mvpp2_port *port = netdev_priv(dev);
4303 int ret = 0;
4304
4305 if (!mvpp22_rss_is_supported())
4306 return -EOPNOTSUPP;
4307
4308 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4309 return -EOPNOTSUPP;
4310
4311 if (key)
4312 return -EOPNOTSUPP;
4313
4314 if (indir)
4315 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
4316
4317 return ret;
4318}
4319
4320static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
4321 u8 *key, u8 *hfunc, u32 rss_context)
4322{
4323 struct mvpp2_port *port = netdev_priv(dev);
4324 int ret = 0;
4325
4326 if (!mvpp22_rss_is_supported())
4327 return -EOPNOTSUPP;
4328
4329 if (hfunc)
4330 *hfunc = ETH_RSS_HASH_CRC32;
4331
4332 if (indir)
4333 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
4334
4335 return ret;
4336}
4337
4338static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
4339 const u32 *indir, const u8 *key,
4340 const u8 hfunc, u32 *rss_context,
4341 bool delete)
4342{
4343 struct mvpp2_port *port = netdev_priv(dev);
4344 int ret;
4345
4346 if (!mvpp22_rss_is_supported())
4347 return -EOPNOTSUPP;
4348
4349 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4350 return -EOPNOTSUPP;
4351
4352 if (key)
4353 return -EOPNOTSUPP;
4354
4355 if (delete)
4356 return mvpp22_port_rss_ctx_delete(port, *rss_context);
4357
4358 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
4359 ret = mvpp22_port_rss_ctx_create(port, rss_context);
4360 if (ret)
4361 return ret;
4362 }
4363
4364 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
4365}
4366
4367
4368static const struct net_device_ops mvpp2_netdev_ops = {
4369 .ndo_open = mvpp2_open,
4370 .ndo_stop = mvpp2_stop,
4371 .ndo_start_xmit = mvpp2_tx,
4372 .ndo_set_rx_mode = mvpp2_set_rx_mode,
4373 .ndo_set_mac_address = mvpp2_set_mac_address,
4374 .ndo_change_mtu = mvpp2_change_mtu,
4375 .ndo_get_stats64 = mvpp2_get_stats64,
4376 .ndo_do_ioctl = mvpp2_ioctl,
4377 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
4378 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
4379 .ndo_set_features = mvpp2_set_features,
4380};
4381
4382static const struct ethtool_ops mvpp2_eth_tool_ops = {
4383 .nway_reset = mvpp2_ethtool_nway_reset,
4384 .get_link = ethtool_op_get_link,
4385 .set_coalesce = mvpp2_ethtool_set_coalesce,
4386 .get_coalesce = mvpp2_ethtool_get_coalesce,
4387 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
4388 .get_ringparam = mvpp2_ethtool_get_ringparam,
4389 .set_ringparam = mvpp2_ethtool_set_ringparam,
4390 .get_strings = mvpp2_ethtool_get_strings,
4391 .get_ethtool_stats = mvpp2_ethtool_get_stats,
4392 .get_sset_count = mvpp2_ethtool_get_sset_count,
4393 .get_pauseparam = mvpp2_ethtool_get_pause_param,
4394 .set_pauseparam = mvpp2_ethtool_set_pause_param,
4395 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
4396 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
4397 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
4398 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
4399 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
4400 .get_rxfh = mvpp2_ethtool_get_rxfh,
4401 .set_rxfh = mvpp2_ethtool_set_rxfh,
4402 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
4403 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
4404};
4405
4406
4407
4408
4409static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
4410 struct device_node *port_node)
4411{
4412 struct mvpp2_queue_vector *v = &port->qvecs[0];
4413
4414 v->first_rxq = 0;
4415 v->nrxqs = port->nrxqs;
4416 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4417 v->sw_thread_id = 0;
4418 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
4419 v->port = port;
4420 v->irq = irq_of_parse_and_map(port_node, 0);
4421 if (v->irq <= 0)
4422 return -EINVAL;
4423 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4424 NAPI_POLL_WEIGHT);
4425
4426 port->nqvecs = 1;
4427
4428 return 0;
4429}
4430
4431static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
4432 struct device_node *port_node)
4433{
4434 struct mvpp2 *priv = port->priv;
4435 struct mvpp2_queue_vector *v;
4436 int i, ret;
4437
4438 switch (queue_mode) {
4439 case MVPP2_QDIST_SINGLE_MODE:
4440 port->nqvecs = priv->nthreads + 1;
4441 break;
4442 case MVPP2_QDIST_MULTI_MODE:
4443 port->nqvecs = priv->nthreads;
4444 break;
4445 }
4446
4447 for (i = 0; i < port->nqvecs; i++) {
4448 char irqname[16];
4449
4450 v = port->qvecs + i;
4451
4452 v->port = port;
4453 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
4454 v->sw_thread_id = i;
4455 v->sw_thread_mask = BIT(i);
4456
4457 if (port->flags & MVPP2_F_DT_COMPAT)
4458 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
4459 else
4460 snprintf(irqname, sizeof(irqname), "hif%d", i);
4461
4462 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
4463 v->first_rxq = i;
4464 v->nrxqs = 1;
4465 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
4466 i == (port->nqvecs - 1)) {
4467 v->first_rxq = 0;
4468 v->nrxqs = port->nrxqs;
4469 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4470
4471 if (port->flags & MVPP2_F_DT_COMPAT)
4472 strncpy(irqname, "rx-shared", sizeof(irqname));
4473 }
4474
4475 if (port_node)
4476 v->irq = of_irq_get_byname(port_node, irqname);
4477 else
4478 v->irq = fwnode_irq_get(port->fwnode, i);
4479 if (v->irq <= 0) {
4480 ret = -EINVAL;
4481 goto err;
4482 }
4483
4484 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4485 NAPI_POLL_WEIGHT);
4486 }
4487
4488 return 0;
4489
4490err:
4491 for (i = 0; i < port->nqvecs; i++)
4492 irq_dispose_mapping(port->qvecs[i].irq);
4493 return ret;
4494}
4495
4496static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
4497 struct device_node *port_node)
4498{
4499 if (port->has_tx_irqs)
4500 return mvpp2_multi_queue_vectors_init(port, port_node);
4501 else
4502 return mvpp2_simple_queue_vectors_init(port, port_node);
4503}
4504
4505static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
4506{
4507 int i;
4508
4509 for (i = 0; i < port->nqvecs; i++)
4510 irq_dispose_mapping(port->qvecs[i].irq);
4511}
4512
4513
4514static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
4515{
4516 struct mvpp2 *priv = port->priv;
4517 u32 val;
4518 int i;
4519
4520 if (priv->hw_version == MVPP21) {
4521 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
4522 port->nrxqs);
4523 return;
4524 }
4525
4526
4527 for (i = 0; i < port->nqvecs; i++) {
4528 struct mvpp2_queue_vector *qv = port->qvecs + i;
4529
4530 if (!qv->nrxqs)
4531 continue;
4532
4533 val = qv->sw_thread_id;
4534 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
4535 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
4536
4537 val = qv->first_rxq;
4538 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
4539 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
4540 }
4541}
4542
4543
4544static int mvpp2_port_init(struct mvpp2_port *port)
4545{
4546 struct device *dev = port->dev->dev.parent;
4547 struct mvpp2 *priv = port->priv;
4548 struct mvpp2_txq_pcpu *txq_pcpu;
4549 unsigned int thread;
4550 int queue, err;
4551
4552
4553 if (port->first_rxq + port->nrxqs >
4554 MVPP2_MAX_PORTS * priv->max_port_rxqs)
4555 return -EINVAL;
4556
4557 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
4558 return -EINVAL;
4559
4560
4561 mvpp2_egress_disable(port);
4562 mvpp2_port_disable(port);
4563
4564 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
4565
4566 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
4567 GFP_KERNEL);
4568 if (!port->txqs)
4569 return -ENOMEM;
4570
4571
4572
4573
4574 for (queue = 0; queue < port->ntxqs; queue++) {
4575 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4576 struct mvpp2_tx_queue *txq;
4577
4578 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4579 if (!txq) {
4580 err = -ENOMEM;
4581 goto err_free_percpu;
4582 }
4583
4584 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
4585 if (!txq->pcpu) {
4586 err = -ENOMEM;
4587 goto err_free_percpu;
4588 }
4589
4590 txq->id = queue_phy_id;
4591 txq->log_id = queue;
4592 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4593 for (thread = 0; thread < priv->nthreads; thread++) {
4594 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4595 txq_pcpu->thread = thread;
4596 }
4597
4598 port->txqs[queue] = txq;
4599 }
4600
4601 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
4602 GFP_KERNEL);
4603 if (!port->rxqs) {
4604 err = -ENOMEM;
4605 goto err_free_percpu;
4606 }
4607
4608
4609 for (queue = 0; queue < port->nrxqs; queue++) {
4610 struct mvpp2_rx_queue *rxq;
4611
4612
4613 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4614 if (!rxq) {
4615 err = -ENOMEM;
4616 goto err_free_percpu;
4617 }
4618
4619 rxq->id = port->first_rxq + queue;
4620 rxq->port = port->id;
4621 rxq->logic_rxq = queue;
4622
4623 port->rxqs[queue] = rxq;
4624 }
4625
4626 mvpp2_rx_irqs_setup(port);
4627
4628
4629 for (queue = 0; queue < port->nrxqs; queue++) {
4630 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4631
4632 rxq->size = port->rx_ring_size;
4633 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4634 rxq->time_coal = MVPP2_RX_COAL_USEC;
4635 }
4636
4637 mvpp2_ingress_disable(port);
4638
4639
4640 mvpp2_defaults_set(port);
4641
4642
4643 mvpp2_cls_oversize_rxq_set(port);
4644 mvpp2_cls_port_config(port);
4645
4646 if (mvpp22_rss_is_supported())
4647 mvpp22_port_rss_init(port);
4648
4649
4650 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
4651
4652
4653 err = mvpp2_swf_bm_pool_init(port);
4654 if (err)
4655 goto err_free_percpu;
4656
4657
4658 mvpp2_read_stats(port);
4659 memset(port->ethtool_stats, 0,
4660 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
4661
4662 return 0;
4663
4664err_free_percpu:
4665 for (queue = 0; queue < port->ntxqs; queue++) {
4666 if (!port->txqs[queue])
4667 continue;
4668 free_percpu(port->txqs[queue]->pcpu);
4669 }
4670 return err;
4671}
4672
4673static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
4674 unsigned long *flags)
4675{
4676 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
4677 "tx-cpu3" };
4678 int i;
4679
4680 for (i = 0; i < 5; i++)
4681 if (of_property_match_string(port_node, "interrupt-names",
4682 irqs[i]) < 0)
4683 return false;
4684
4685 *flags |= MVPP2_F_DT_COMPAT;
4686 return true;
4687}
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
4698 struct device_node *port_node,
4699 unsigned long *flags)
4700{
4701 char name[5];
4702 int i;
4703
4704
4705 if (!port_node)
4706 return true;
4707
4708 if (priv->hw_version == MVPP21)
4709 return false;
4710
4711 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
4712 return true;
4713
4714 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
4715 snprintf(name, 5, "hif%d", i);
4716 if (of_property_match_string(port_node, "interrupt-names",
4717 name) < 0)
4718 return false;
4719 }
4720
4721 return true;
4722}
4723
4724static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
4725 struct fwnode_handle *fwnode,
4726 char **mac_from)
4727{
4728 struct mvpp2_port *port = netdev_priv(dev);
4729 char hw_mac_addr[ETH_ALEN] = {0};
4730 char fw_mac_addr[ETH_ALEN];
4731
4732 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
4733 *mac_from = "firmware node";
4734 ether_addr_copy(dev->dev_addr, fw_mac_addr);
4735 return;
4736 }
4737
4738 if (priv->hw_version == MVPP21) {
4739 mvpp21_get_mac_address(port, hw_mac_addr);
4740 if (is_valid_ether_addr(hw_mac_addr)) {
4741 *mac_from = "hardware";
4742 ether_addr_copy(dev->dev_addr, hw_mac_addr);
4743 return;
4744 }
4745 }
4746
4747 *mac_from = "random";
4748 eth_hw_addr_random(dev);
4749}
4750
4751static void mvpp2_phylink_validate(struct phylink_config *config,
4752 unsigned long *supported,
4753 struct phylink_link_state *state)
4754{
4755 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4756 phylink_config);
4757 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4758
4759
4760 switch (state->interface) {
4761 case PHY_INTERFACE_MODE_10GBASER:
4762 case PHY_INTERFACE_MODE_XAUI:
4763 if (port->gop_id != 0)
4764 goto empty_set;
4765 break;
4766 case PHY_INTERFACE_MODE_RGMII:
4767 case PHY_INTERFACE_MODE_RGMII_ID:
4768 case PHY_INTERFACE_MODE_RGMII_RXID:
4769 case PHY_INTERFACE_MODE_RGMII_TXID:
4770 if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
4771 goto empty_set;
4772 break;
4773 default:
4774 break;
4775 }
4776
4777 phylink_set(mask, Autoneg);
4778 phylink_set_port_modes(mask);
4779 phylink_set(mask, Pause);
4780 phylink_set(mask, Asym_Pause);
4781
4782 switch (state->interface) {
4783 case PHY_INTERFACE_MODE_10GBASER:
4784 case PHY_INTERFACE_MODE_XAUI:
4785 case PHY_INTERFACE_MODE_NA:
4786 if (port->gop_id == 0) {
4787 phylink_set(mask, 10000baseT_Full);
4788 phylink_set(mask, 10000baseCR_Full);
4789 phylink_set(mask, 10000baseSR_Full);
4790 phylink_set(mask, 10000baseLR_Full);
4791 phylink_set(mask, 10000baseLRM_Full);
4792 phylink_set(mask, 10000baseER_Full);
4793 phylink_set(mask, 10000baseKR_Full);
4794 }
4795 if (state->interface != PHY_INTERFACE_MODE_NA)
4796 break;
4797
4798 case PHY_INTERFACE_MODE_RGMII:
4799 case PHY_INTERFACE_MODE_RGMII_ID:
4800 case PHY_INTERFACE_MODE_RGMII_RXID:
4801 case PHY_INTERFACE_MODE_RGMII_TXID:
4802 case PHY_INTERFACE_MODE_SGMII:
4803 phylink_set(mask, 10baseT_Half);
4804 phylink_set(mask, 10baseT_Full);
4805 phylink_set(mask, 100baseT_Half);
4806 phylink_set(mask, 100baseT_Full);
4807 phylink_set(mask, 1000baseT_Full);
4808 phylink_set(mask, 1000baseX_Full);
4809 if (state->interface != PHY_INTERFACE_MODE_NA)
4810 break;
4811
4812 case PHY_INTERFACE_MODE_1000BASEX:
4813 case PHY_INTERFACE_MODE_2500BASEX:
4814 if (port->comphy ||
4815 state->interface != PHY_INTERFACE_MODE_2500BASEX) {
4816 phylink_set(mask, 1000baseT_Full);
4817 phylink_set(mask, 1000baseX_Full);
4818 }
4819 if (port->comphy ||
4820 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
4821 phylink_set(mask, 2500baseT_Full);
4822 phylink_set(mask, 2500baseX_Full);
4823 }
4824 break;
4825 default:
4826 goto empty_set;
4827 }
4828
4829 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
4830 bitmap_and(state->advertising, state->advertising, mask,
4831 __ETHTOOL_LINK_MODE_MASK_NBITS);
4832
4833 phylink_helper_basex_speed(state);
4834 return;
4835
4836empty_set:
4837 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
4838}
4839
4840static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
4841 struct phylink_link_state *state)
4842{
4843 u32 val;
4844
4845 state->speed = SPEED_10000;
4846 state->duplex = 1;
4847 state->an_complete = 1;
4848
4849 val = readl(port->base + MVPP22_XLG_STATUS);
4850 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
4851
4852 state->pause = 0;
4853 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4854 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
4855 state->pause |= MLO_PAUSE_TX;
4856 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
4857 state->pause |= MLO_PAUSE_RX;
4858}
4859
4860static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
4861 struct phylink_link_state *state)
4862{
4863 u32 val;
4864
4865 val = readl(port->base + MVPP2_GMAC_STATUS0);
4866
4867 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
4868 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
4869 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
4870
4871 switch (port->phy_interface) {
4872 case PHY_INTERFACE_MODE_1000BASEX:
4873 state->speed = SPEED_1000;
4874 break;
4875 case PHY_INTERFACE_MODE_2500BASEX:
4876 state->speed = SPEED_2500;
4877 break;
4878 default:
4879 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
4880 state->speed = SPEED_1000;
4881 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
4882 state->speed = SPEED_100;
4883 else
4884 state->speed = SPEED_10;
4885 }
4886
4887 state->pause = 0;
4888 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
4889 state->pause |= MLO_PAUSE_RX;
4890 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
4891 state->pause |= MLO_PAUSE_TX;
4892}
4893
4894static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
4895 struct phylink_link_state *state)
4896{
4897 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4898 phylink_config);
4899
4900 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
4901 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
4902 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4903
4904 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
4905 mvpp22_xlg_pcs_get_state(port, state);
4906 return;
4907 }
4908 }
4909
4910 mvpp2_gmac_pcs_get_state(port, state);
4911}
4912
4913static void mvpp2_mac_an_restart(struct phylink_config *config)
4914{
4915 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4916 phylink_config);
4917 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4918
4919 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
4920 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4921 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
4922 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4923}
4924
4925static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
4926 const struct phylink_link_state *state)
4927{
4928 u32 old_ctrl0, ctrl0;
4929 u32 old_ctrl4, ctrl4;
4930
4931 old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
4932 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
4933
4934 ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4935
4936 if (state->pause & MLO_PAUSE_TX)
4937 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4938 else
4939 ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4940
4941 if (state->pause & MLO_PAUSE_RX)
4942 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4943 else
4944 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4945
4946 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4947 MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4948 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4949
4950 if (old_ctrl0 != ctrl0)
4951 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
4952 if (old_ctrl4 != ctrl4)
4953 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
4954
4955 if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
4956 while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
4957 MVPP22_XLG_CTRL0_MAC_RESET_DIS))
4958 continue;
4959 }
4960}
4961
4962static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
4963 const struct phylink_link_state *state)
4964{
4965 u32 old_an, an;
4966 u32 old_ctrl0, ctrl0;
4967 u32 old_ctrl2, ctrl2;
4968 u32 old_ctrl4, ctrl4;
4969
4970 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4971 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4972 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4973 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4974
4975 an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
4976 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
4977 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4978 MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
4979 MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
4980 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4981 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
4982 MVPP2_GMAC_PCS_ENABLE_MASK);
4983 ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
4984
4985
4986 if (phy_interface_mode_is_8023z(state->interface)) {
4987 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
4988 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4989 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4990 MVPP22_CTRL4_DP_CLK_SEL |
4991 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4992 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4993 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
4994 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4995 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4996 MVPP22_CTRL4_DP_CLK_SEL |
4997 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4998 } else if (phy_interface_mode_is_rgmii(state->interface)) {
4999 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
5000 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5001 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5002 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5003 }
5004
5005
5006 if (phylink_test(state->advertising, Pause))
5007 an |= MVPP2_GMAC_FC_ADV_EN;
5008 if (phylink_test(state->advertising, Asym_Pause))
5009 an |= MVPP2_GMAC_FC_ADV_ASM_EN;
5010
5011
5012 if (!phylink_autoneg_inband(mode)) {
5013
5014 if (state->duplex)
5015 an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5016
5017 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
5018 an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5019 else if (state->speed == SPEED_100)
5020 an |= MVPP2_GMAC_CONFIG_MII_SPEED;
5021
5022 if (state->pause & MLO_PAUSE_TX)
5023 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5024 if (state->pause & MLO_PAUSE_RX)
5025 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5026 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5027
5028
5029 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
5030 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5031 MVPP2_GMAC_AN_SPEED_EN |
5032 MVPP2_GMAC_AN_DUPLEX_EN;
5033
5034 if (state->pause & MLO_PAUSE_TX)
5035 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5036 if (state->pause & MLO_PAUSE_RX)
5037 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5038 } else if (phy_interface_mode_is_8023z(state->interface)) {
5039
5040
5041
5042
5043
5044 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
5045 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
5046 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5047 MVPP2_GMAC_CONFIG_GMII_SPEED |
5048 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5049
5050 if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
5051 an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5052 } else {
5053 if (state->pause & MLO_PAUSE_TX)
5054 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
5055 if (state->pause & MLO_PAUSE_RX)
5056 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
5057 }
5058 }
5059
5060
5061
5062
5063#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
5064 (MVPP2_GMAC_IN_BAND_AUTONEG | \
5065 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
5066 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
5067 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
5068 MVPP2_GMAC_AN_DUPLEX_EN)
5069
5070 if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
5071 (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
5072 (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
5073
5074 old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5075 old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
5076 writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5077
5078
5079
5080
5081 old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
5082 writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5083 }
5084
5085 if (old_ctrl0 != ctrl0)
5086 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
5087 if (old_ctrl2 != ctrl2)
5088 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5089 if (old_ctrl4 != ctrl4)
5090 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
5091 if (old_an != an)
5092 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5093
5094 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
5095 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5096 MVPP2_GMAC_PORT_RESET_MASK)
5097 continue;
5098 }
5099}
5100
5101static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
5102 const struct phylink_link_state *state)
5103{
5104 struct net_device *dev = to_net_dev(config->dev);
5105 struct mvpp2_port *port = netdev_priv(dev);
5106 bool change_interface = port->phy_interface != state->interface;
5107
5108
5109 if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
5110 netdev_err(dev, "Invalid mode on %s\n", dev->name);
5111 return;
5112 }
5113
5114
5115 mvpp2_port_disable(port);
5116
5117 if (port->priv->hw_version == MVPP22 && change_interface) {
5118 mvpp22_gop_mask_irq(port);
5119
5120 port->phy_interface = state->interface;
5121
5122
5123 phy_power_off(port->comphy);
5124 mvpp22_mode_reconfigure(port);
5125 }
5126
5127
5128 if (mvpp2_is_xlg(state->interface))
5129 mvpp2_xlg_config(port, mode, state);
5130 else if (phy_interface_mode_is_rgmii(state->interface) ||
5131 phy_interface_mode_is_8023z(state->interface) ||
5132 state->interface == PHY_INTERFACE_MODE_SGMII)
5133 mvpp2_gmac_config(port, mode, state);
5134
5135 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
5136 mvpp2_port_loopback_set(port, state);
5137
5138 if (port->priv->hw_version == MVPP22 && change_interface)
5139 mvpp22_gop_unmask_irq(port);
5140
5141 mvpp2_port_enable(port);
5142}
5143
5144static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
5145 phy_interface_t interface, struct phy_device *phy)
5146{
5147 struct net_device *dev = to_net_dev(config->dev);
5148 struct mvpp2_port *port = netdev_priv(dev);
5149 u32 val;
5150
5151 if (!phylink_autoneg_inband(mode)) {
5152 if (mvpp2_is_xlg(interface)) {
5153 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5154 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5155 val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5156 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5157 } else {
5158 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5159 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5160 val |= MVPP2_GMAC_FORCE_LINK_PASS;
5161 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5162 }
5163 }
5164
5165 mvpp2_port_enable(port);
5166
5167 mvpp2_egress_enable(port);
5168 mvpp2_ingress_enable(port);
5169 netif_tx_wake_all_queues(dev);
5170}
5171
5172static void mvpp2_mac_link_down(struct phylink_config *config,
5173 unsigned int mode, phy_interface_t interface)
5174{
5175 struct net_device *dev = to_net_dev(config->dev);
5176 struct mvpp2_port *port = netdev_priv(dev);
5177 u32 val;
5178
5179 if (!phylink_autoneg_inband(mode)) {
5180 if (mvpp2_is_xlg(interface)) {
5181 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5182 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5183 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5184 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5185 } else {
5186 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5187 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5188 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5189 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5190 }
5191 }
5192
5193 netif_tx_stop_all_queues(dev);
5194 mvpp2_egress_disable(port);
5195 mvpp2_ingress_disable(port);
5196
5197 mvpp2_port_disable(port);
5198}
5199
5200static const struct phylink_mac_ops mvpp2_phylink_ops = {
5201 .validate = mvpp2_phylink_validate,
5202 .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
5203 .mac_an_restart = mvpp2_mac_an_restart,
5204 .mac_config = mvpp2_mac_config,
5205 .mac_link_up = mvpp2_mac_link_up,
5206 .mac_link_down = mvpp2_mac_link_down,
5207};
5208
5209
5210static int mvpp2_port_probe(struct platform_device *pdev,
5211 struct fwnode_handle *port_fwnode,
5212 struct mvpp2 *priv)
5213{
5214 struct phy *comphy = NULL;
5215 struct mvpp2_port *port;
5216 struct mvpp2_port_pcpu *port_pcpu;
5217 struct device_node *port_node = to_of_node(port_fwnode);
5218 netdev_features_t features;
5219 struct net_device *dev;
5220 struct phylink *phylink;
5221 char *mac_from = "";
5222 unsigned int ntxqs, nrxqs, thread;
5223 unsigned long flags = 0;
5224 bool has_tx_irqs;
5225 u32 id;
5226 int phy_mode;
5227 int err, i;
5228
5229 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
5230 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
5231 dev_err(&pdev->dev,
5232 "not enough IRQs to support multi queue mode\n");
5233 return -EINVAL;
5234 }
5235
5236 ntxqs = MVPP2_MAX_TXQ;
5237 nrxqs = mvpp2_get_nrxqs(priv);
5238
5239 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
5240 if (!dev)
5241 return -ENOMEM;
5242
5243 phy_mode = fwnode_get_phy_mode(port_fwnode);
5244 if (phy_mode < 0) {
5245 dev_err(&pdev->dev, "incorrect phy mode\n");
5246 err = phy_mode;
5247 goto err_free_netdev;
5248 }
5249
5250
5251
5252
5253
5254
5255
5256 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
5257 phy_mode = PHY_INTERFACE_MODE_10GBASER;
5258
5259 if (port_node) {
5260 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
5261 if (IS_ERR(comphy)) {
5262 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
5263 err = -EPROBE_DEFER;
5264 goto err_free_netdev;
5265 }
5266 comphy = NULL;
5267 }
5268 }
5269
5270 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
5271 err = -EINVAL;
5272 dev_err(&pdev->dev, "missing port-id value\n");
5273 goto err_free_netdev;
5274 }
5275
5276 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
5277 dev->watchdog_timeo = 5 * HZ;
5278 dev->netdev_ops = &mvpp2_netdev_ops;
5279 dev->ethtool_ops = &mvpp2_eth_tool_ops;
5280
5281 port = netdev_priv(dev);
5282 port->dev = dev;
5283 port->fwnode = port_fwnode;
5284 port->has_phy = !!of_find_property(port_node, "phy", NULL);
5285 port->ntxqs = ntxqs;
5286 port->nrxqs = nrxqs;
5287 port->priv = priv;
5288 port->has_tx_irqs = has_tx_irqs;
5289 port->flags = flags;
5290
5291 err = mvpp2_queue_vectors_init(port, port_node);
5292 if (err)
5293 goto err_free_netdev;
5294
5295 if (port_node)
5296 port->link_irq = of_irq_get_byname(port_node, "link");
5297 else
5298 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
5299 if (port->link_irq == -EPROBE_DEFER) {
5300 err = -EPROBE_DEFER;
5301 goto err_deinit_qvecs;
5302 }
5303 if (port->link_irq <= 0)
5304
5305 port->link_irq = 0;
5306
5307 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
5308 port->flags |= MVPP2_F_LOOPBACK;
5309
5310 port->id = id;
5311 if (priv->hw_version == MVPP21)
5312 port->first_rxq = port->id * port->nrxqs;
5313 else
5314 port->first_rxq = port->id * priv->max_port_rxqs;
5315
5316 port->of_node = port_node;
5317 port->phy_interface = phy_mode;
5318 port->comphy = comphy;
5319
5320 if (priv->hw_version == MVPP21) {
5321 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
5322 if (IS_ERR(port->base)) {
5323 err = PTR_ERR(port->base);
5324 goto err_free_irq;
5325 }
5326
5327 port->stats_base = port->priv->lms_base +
5328 MVPP21_MIB_COUNTERS_OFFSET +
5329 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
5330 } else {
5331 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
5332 &port->gop_id)) {
5333 err = -EINVAL;
5334 dev_err(&pdev->dev, "missing gop-port-id value\n");
5335 goto err_deinit_qvecs;
5336 }
5337
5338 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
5339 port->stats_base = port->priv->iface_base +
5340 MVPP22_MIB_COUNTERS_OFFSET +
5341 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
5342 }
5343
5344
5345 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
5346 if (!port->stats) {
5347 err = -ENOMEM;
5348 goto err_free_irq;
5349 }
5350
5351 port->ethtool_stats = devm_kcalloc(&pdev->dev,
5352 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
5353 sizeof(u64), GFP_KERNEL);
5354 if (!port->ethtool_stats) {
5355 err = -ENOMEM;
5356 goto err_free_stats;
5357 }
5358
5359 mutex_init(&port->gather_stats_lock);
5360 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
5361
5362 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
5363
5364 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
5365 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
5366 SET_NETDEV_DEV(dev, &pdev->dev);
5367
5368 err = mvpp2_port_init(port);
5369 if (err < 0) {
5370 dev_err(&pdev->dev, "failed to init port %d\n", id);
5371 goto err_free_stats;
5372 }
5373
5374 mvpp2_port_periodic_xon_disable(port);
5375
5376 mvpp2_mac_reset_assert(port);
5377 mvpp22_pcs_reset_assert(port);
5378
5379 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
5380 if (!port->pcpu) {
5381 err = -ENOMEM;
5382 goto err_free_txq_pcpu;
5383 }
5384
5385 if (!port->has_tx_irqs) {
5386 for (thread = 0; thread < priv->nthreads; thread++) {
5387 port_pcpu = per_cpu_ptr(port->pcpu, thread);
5388
5389 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
5390 HRTIMER_MODE_REL_PINNED_SOFT);
5391 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
5392 port_pcpu->timer_scheduled = false;
5393 port_pcpu->dev = dev;
5394 }
5395 }
5396
5397 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5398 NETIF_F_TSO;
5399 dev->features = features | NETIF_F_RXCSUM;
5400 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
5401 NETIF_F_HW_VLAN_CTAG_FILTER;
5402
5403 if (mvpp22_rss_is_supported()) {
5404 dev->hw_features |= NETIF_F_RXHASH;
5405 dev->features |= NETIF_F_NTUPLE;
5406 }
5407
5408 if (!port->priv->percpu_pools)
5409 mvpp2_set_hw_csum(port, port->pool_long->id);
5410
5411 dev->vlan_features |= features;
5412 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
5413 dev->priv_flags |= IFF_UNICAST_FLT;
5414
5415
5416 dev->min_mtu = ETH_MIN_MTU;
5417
5418 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
5419 dev->dev.of_node = port_node;
5420
5421
5422 if (port_node) {
5423 port->phylink_config.dev = &dev->dev;
5424 port->phylink_config.type = PHYLINK_NETDEV;
5425
5426 phylink = phylink_create(&port->phylink_config, port_fwnode,
5427 phy_mode, &mvpp2_phylink_ops);
5428 if (IS_ERR(phylink)) {
5429 err = PTR_ERR(phylink);
5430 goto err_free_port_pcpu;
5431 }
5432 port->phylink = phylink;
5433 } else {
5434 port->phylink = NULL;
5435 }
5436
5437
5438
5439
5440
5441 if (port->comphy) {
5442 err = mvpp22_comphy_init(port);
5443 if (err == 0)
5444 phy_power_off(port->comphy);
5445 }
5446
5447 err = register_netdev(dev);
5448 if (err < 0) {
5449 dev_err(&pdev->dev, "failed to register netdev\n");
5450 goto err_phylink;
5451 }
5452 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
5453
5454 priv->port_list[priv->port_count++] = port;
5455
5456 return 0;
5457
5458err_phylink:
5459 if (port->phylink)
5460 phylink_destroy(port->phylink);
5461err_free_port_pcpu:
5462 free_percpu(port->pcpu);
5463err_free_txq_pcpu:
5464 for (i = 0; i < port->ntxqs; i++)
5465 free_percpu(port->txqs[i]->pcpu);
5466err_free_stats:
5467 free_percpu(port->stats);
5468err_free_irq:
5469 if (port->link_irq)
5470 irq_dispose_mapping(port->link_irq);
5471err_deinit_qvecs:
5472 mvpp2_queue_vectors_deinit(port);
5473err_free_netdev:
5474 free_netdev(dev);
5475 return err;
5476}
5477
5478
5479static void mvpp2_port_remove(struct mvpp2_port *port)
5480{
5481 int i;
5482
5483 unregister_netdev(port->dev);
5484 if (port->phylink)
5485 phylink_destroy(port->phylink);
5486 free_percpu(port->pcpu);
5487 free_percpu(port->stats);
5488 for (i = 0; i < port->ntxqs; i++)
5489 free_percpu(port->txqs[i]->pcpu);
5490 mvpp2_queue_vectors_deinit(port);
5491 if (port->link_irq)
5492 irq_dispose_mapping(port->link_irq);
5493 free_netdev(port->dev);
5494}
5495
5496
5497static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
5498 struct mvpp2 *priv)
5499{
5500 u32 win_enable;
5501 int i;
5502
5503 for (i = 0; i < 6; i++) {
5504 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
5505 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
5506
5507 if (i < 4)
5508 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
5509 }
5510
5511 win_enable = 0;
5512
5513 for (i = 0; i < dram->num_cs; i++) {
5514 const struct mbus_dram_window *cs = dram->cs + i;
5515
5516 mvpp2_write(priv, MVPP2_WIN_BASE(i),
5517 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
5518 dram->mbus_dram_target_id);
5519
5520 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
5521 (cs->size - 1) & 0xffff0000);
5522
5523 win_enable |= (1 << i);
5524 }
5525
5526 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
5527}
5528
5529
5530static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
5531{
5532 int port;
5533
5534 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5535 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5536 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5537 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5538 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5539 }
5540
5541 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5542 MVPP2_RX_FIFO_PORT_MIN_PKT);
5543 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5544}
5545
5546static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
5547{
5548 int port;
5549
5550
5551
5552
5553
5554
5555
5556
5557 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
5558 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
5559 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
5560 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
5561
5562 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
5563 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
5564 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
5565 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
5566
5567 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
5568 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5569 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5570 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5571 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5572 }
5573
5574 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5575 MVPP2_RX_FIFO_PORT_MIN_PKT);
5576 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5577}
5578
5579
5580
5581
5582
5583static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
5584{
5585 int port, size, thrs;
5586
5587 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5588 if (port == 0) {
5589 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
5590 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
5591 } else {
5592 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
5593 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
5594 }
5595 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
5596 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
5597 }
5598}
5599
5600static void mvpp2_axi_init(struct mvpp2 *priv)
5601{
5602 u32 val, rdval, wrval;
5603
5604 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
5605
5606
5607
5608 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
5609 << MVPP22_AXI_ATTR_CACHE_OFFS;
5610 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5611 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5612
5613 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
5614 << MVPP22_AXI_ATTR_CACHE_OFFS;
5615 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5616 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5617
5618
5619 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
5620 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
5621
5622
5623 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
5624 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
5625 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
5626 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
5627
5628
5629 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
5630 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
5631
5632 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
5633 << MVPP22_AXI_CODE_CACHE_OFFS;
5634 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5635 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5636 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
5637 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
5638
5639 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
5640 << MVPP22_AXI_CODE_CACHE_OFFS;
5641 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5642 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5643
5644 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
5645
5646 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
5647 << MVPP22_AXI_CODE_CACHE_OFFS;
5648 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5649 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5650
5651 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
5652}
5653
5654
5655static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
5656{
5657 const struct mbus_dram_target_info *dram_target_info;
5658 int err, i;
5659 u32 val;
5660
5661
5662 dram_target_info = mv_mbus_dram_info();
5663 if (dram_target_info)
5664 mvpp2_conf_mbus_windows(dram_target_info, priv);
5665
5666 if (priv->hw_version == MVPP22)
5667 mvpp2_axi_init(priv);
5668
5669
5670 if (priv->hw_version == MVPP21) {
5671 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5672 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5673 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5674 } else {
5675 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5676 val &= ~MVPP22_SMI_POLLING_EN;
5677 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5678 }
5679
5680
5681 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
5682 sizeof(*priv->aggr_txqs),
5683 GFP_KERNEL);
5684 if (!priv->aggr_txqs)
5685 return -ENOMEM;
5686
5687 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5688 priv->aggr_txqs[i].id = i;
5689 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5690 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
5691 if (err < 0)
5692 return err;
5693 }
5694
5695
5696 if (priv->hw_version == MVPP21) {
5697 mvpp2_rx_fifo_init(priv);
5698 } else {
5699 mvpp22_rx_fifo_init(priv);
5700 mvpp22_tx_fifo_init(priv);
5701 }
5702
5703 if (priv->hw_version == MVPP21)
5704 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5705 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5706
5707
5708 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5709
5710
5711 err = mvpp2_bm_init(&pdev->dev, priv);
5712 if (err < 0)
5713 return err;
5714
5715
5716 err = mvpp2_prs_default_init(pdev, priv);
5717 if (err < 0)
5718 return err;
5719
5720
5721 mvpp2_cls_init(priv);
5722
5723 return 0;
5724}
5725
5726static int mvpp2_probe(struct platform_device *pdev)
5727{
5728 const struct acpi_device_id *acpi_id;
5729 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5730 struct fwnode_handle *port_fwnode;
5731 struct mvpp2 *priv;
5732 struct resource *res;
5733 void __iomem *base;
5734 int i, shared;
5735 int err;
5736
5737 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
5738 if (!priv)
5739 return -ENOMEM;
5740
5741 if (has_acpi_companion(&pdev->dev)) {
5742 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
5743 &pdev->dev);
5744 if (!acpi_id)
5745 return -EINVAL;
5746 priv->hw_version = (unsigned long)acpi_id->driver_data;
5747 } else {
5748 priv->hw_version =
5749 (unsigned long)of_device_get_match_data(&pdev->dev);
5750 }
5751
5752
5753
5754
5755 if (priv->hw_version == MVPP21)
5756 queue_mode = MVPP2_QDIST_SINGLE_MODE;
5757
5758 base = devm_platform_ioremap_resource(pdev, 0);
5759 if (IS_ERR(base))
5760 return PTR_ERR(base);
5761
5762 if (priv->hw_version == MVPP21) {
5763 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
5764 if (IS_ERR(priv->lms_base))
5765 return PTR_ERR(priv->lms_base);
5766 } else {
5767 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5768 if (has_acpi_companion(&pdev->dev)) {
5769
5770
5771
5772
5773
5774
5775
5776
5777 release_resource(res);
5778 }
5779 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
5780 if (IS_ERR(priv->iface_base))
5781 return PTR_ERR(priv->iface_base);
5782 }
5783
5784 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
5785 priv->sysctrl_base =
5786 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5787 "marvell,system-controller");
5788 if (IS_ERR(priv->sysctrl_base))
5789
5790
5791
5792
5793
5794 priv->sysctrl_base = NULL;
5795 }
5796
5797 if (priv->hw_version == MVPP22 &&
5798 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
5799 priv->percpu_pools = 1;
5800
5801 mvpp2_setup_bm_pool();
5802
5803
5804 priv->nthreads = min_t(unsigned int, num_present_cpus(),
5805 MVPP2_MAX_THREADS);
5806
5807 shared = num_present_cpus() - priv->nthreads;
5808 if (shared > 0)
5809 bitmap_fill(&priv->lock_map,
5810 min_t(int, shared, MVPP2_MAX_THREADS));
5811
5812 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5813 u32 addr_space_sz;
5814
5815 addr_space_sz = (priv->hw_version == MVPP21 ?
5816 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
5817 priv->swth_base[i] = base + i * addr_space_sz;
5818 }
5819
5820 if (priv->hw_version == MVPP21)
5821 priv->max_port_rxqs = 8;
5822 else
5823 priv->max_port_rxqs = 32;
5824
5825 if (dev_of_node(&pdev->dev)) {
5826 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
5827 if (IS_ERR(priv->pp_clk))
5828 return PTR_ERR(priv->pp_clk);
5829 err = clk_prepare_enable(priv->pp_clk);
5830 if (err < 0)
5831 return err;
5832
5833 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
5834 if (IS_ERR(priv->gop_clk)) {
5835 err = PTR_ERR(priv->gop_clk);
5836 goto err_pp_clk;
5837 }
5838 err = clk_prepare_enable(priv->gop_clk);
5839 if (err < 0)
5840 goto err_pp_clk;
5841
5842 if (priv->hw_version == MVPP22) {
5843 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
5844 if (IS_ERR(priv->mg_clk)) {
5845 err = PTR_ERR(priv->mg_clk);
5846 goto err_gop_clk;
5847 }
5848
5849 err = clk_prepare_enable(priv->mg_clk);
5850 if (err < 0)
5851 goto err_gop_clk;
5852
5853 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
5854 if (IS_ERR(priv->mg_core_clk)) {
5855 priv->mg_core_clk = NULL;
5856 } else {
5857 err = clk_prepare_enable(priv->mg_core_clk);
5858 if (err < 0)
5859 goto err_mg_clk;
5860 }
5861 }
5862
5863 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
5864 if (IS_ERR(priv->axi_clk)) {
5865 err = PTR_ERR(priv->axi_clk);
5866 if (err == -EPROBE_DEFER)
5867 goto err_mg_core_clk;
5868 priv->axi_clk = NULL;
5869 } else {
5870 err = clk_prepare_enable(priv->axi_clk);
5871 if (err < 0)
5872 goto err_mg_core_clk;
5873 }
5874
5875
5876 priv->tclk = clk_get_rate(priv->pp_clk);
5877 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
5878 &priv->tclk)) {
5879 dev_err(&pdev->dev, "missing clock-frequency value\n");
5880 return -EINVAL;
5881 }
5882
5883 if (priv->hw_version == MVPP22) {
5884 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
5885 if (err)
5886 goto err_axi_clk;
5887
5888
5889
5890
5891
5892 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5893 if (err)
5894 goto err_axi_clk;
5895 }
5896
5897
5898 err = mvpp2_init(pdev, priv);
5899 if (err < 0) {
5900 dev_err(&pdev->dev, "failed to initialize controller\n");
5901 goto err_axi_clk;
5902 }
5903
5904
5905 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5906 err = mvpp2_port_probe(pdev, port_fwnode, priv);
5907 if (err < 0)
5908 goto err_port_probe;
5909 }
5910
5911 if (priv->port_count == 0) {
5912 dev_err(&pdev->dev, "no ports enabled\n");
5913 err = -ENODEV;
5914 goto err_axi_clk;
5915 }
5916
5917
5918
5919
5920
5921
5922
5923 snprintf(priv->queue_name, sizeof(priv->queue_name),
5924 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
5925 priv->port_count > 1 ? "+" : "");
5926 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
5927 if (!priv->stats_queue) {
5928 err = -ENOMEM;
5929 goto err_port_probe;
5930 }
5931
5932 mvpp2_dbgfs_init(priv, pdev->name);
5933
5934 platform_set_drvdata(pdev, priv);
5935 return 0;
5936
5937err_port_probe:
5938 i = 0;
5939 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5940 if (priv->port_list[i])
5941 mvpp2_port_remove(priv->port_list[i]);
5942 i++;
5943 }
5944err_axi_clk:
5945 clk_disable_unprepare(priv->axi_clk);
5946
5947err_mg_core_clk:
5948 if (priv->hw_version == MVPP22)
5949 clk_disable_unprepare(priv->mg_core_clk);
5950err_mg_clk:
5951 if (priv->hw_version == MVPP22)
5952 clk_disable_unprepare(priv->mg_clk);
5953err_gop_clk:
5954 clk_disable_unprepare(priv->gop_clk);
5955err_pp_clk:
5956 clk_disable_unprepare(priv->pp_clk);
5957 return err;
5958}
5959
5960static int mvpp2_remove(struct platform_device *pdev)
5961{
5962 struct mvpp2 *priv = platform_get_drvdata(pdev);
5963 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5964 struct fwnode_handle *port_fwnode;
5965 int i = 0;
5966
5967 mvpp2_dbgfs_cleanup(priv);
5968
5969 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5970 if (priv->port_list[i]) {
5971 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
5972 mvpp2_port_remove(priv->port_list[i]);
5973 }
5974 i++;
5975 }
5976
5977 destroy_workqueue(priv->stats_queue);
5978
5979 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5980 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
5981
5982 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
5983 }
5984
5985 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5986 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
5987
5988 dma_free_coherent(&pdev->dev,
5989 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5990 aggr_txq->descs,
5991 aggr_txq->descs_dma);
5992 }
5993
5994 if (is_acpi_node(port_fwnode))
5995 return 0;
5996
5997 clk_disable_unprepare(priv->axi_clk);
5998 clk_disable_unprepare(priv->mg_core_clk);
5999 clk_disable_unprepare(priv->mg_clk);
6000 clk_disable_unprepare(priv->pp_clk);
6001 clk_disable_unprepare(priv->gop_clk);
6002
6003 return 0;
6004}
6005
6006static const struct of_device_id mvpp2_match[] = {
6007 {
6008 .compatible = "marvell,armada-375-pp2",
6009 .data = (void *)MVPP21,
6010 },
6011 {
6012 .compatible = "marvell,armada-7k-pp22",
6013 .data = (void *)MVPP22,
6014 },
6015 { }
6016};
6017MODULE_DEVICE_TABLE(of, mvpp2_match);
6018
6019static const struct acpi_device_id mvpp2_acpi_match[] = {
6020 { "MRVL0110", MVPP22 },
6021 { },
6022};
6023MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
6024
6025static struct platform_driver mvpp2_driver = {
6026 .probe = mvpp2_probe,
6027 .remove = mvpp2_remove,
6028 .driver = {
6029 .name = MVPP2_DRIVER_NAME,
6030 .of_match_table = mvpp2_match,
6031 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
6032 },
6033};
6034
6035module_platform_driver(mvpp2_driver);
6036
6037MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6038MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6039MODULE_LICENSE("GPL v2");
6040