1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/etherdevice.h>
17#include <linux/if_vlan.h>
18#include <linux/inetdevice.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/mbus.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/phy/phy.h>
31#include <linux/phy.h>
32#include <linux/phylink.h>
33#include <linux/platform_device.h>
34#include <linux/skbuff.h>
35#include <net/hwbm.h>
36#include "mvneta_bm.h"
37#include <net/ip.h>
38#include <net/ipv6.h>
39#include <net/tso.h>
40
41
42#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
43#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
44#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
45#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
46#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
47#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
48#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
49#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
50#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
51#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
52#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
53#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
54#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
55#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
56#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
57#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
58#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
59#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
60#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
61#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
62#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
63#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
64#define MVNETA_PORT_RX_RESET 0x1cc0
65#define MVNETA_PORT_RX_DMA_RESET BIT(0)
66#define MVNETA_PHY_ADDR 0x2000
67#define MVNETA_PHY_ADDR_MASK 0x1f
68#define MVNETA_MBUS_RETRY 0x2010
69#define MVNETA_UNIT_INTR_CAUSE 0x2080
70#define MVNETA_UNIT_CONTROL 0x20B0
71#define MVNETA_PHY_POLLING_ENABLE BIT(1)
72#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
73#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
74#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
75#define MVNETA_BASE_ADDR_ENABLE 0x2290
76#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
77#define MVNETA_PORT_CONFIG 0x2400
78#define MVNETA_UNI_PROMISC_MODE BIT(0)
79#define MVNETA_DEF_RXQ(q) ((q) << 1)
80#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
81#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
82#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
83#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
84#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
85#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
86#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
87 MVNETA_DEF_RXQ_ARP(q) | \
88 MVNETA_DEF_RXQ_TCP(q) | \
89 MVNETA_DEF_RXQ_UDP(q) | \
90 MVNETA_DEF_RXQ_BPDU(q) | \
91 MVNETA_TX_UNSET_ERR_SUM | \
92 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
93#define MVNETA_PORT_CONFIG_EXTEND 0x2404
94#define MVNETA_MAC_ADDR_LOW 0x2414
95#define MVNETA_MAC_ADDR_HIGH 0x2418
96#define MVNETA_SDMA_CONFIG 0x241c
97#define MVNETA_SDMA_BRST_SIZE_16 4
98#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
99#define MVNETA_RX_NO_DATA_SWAP BIT(4)
100#define MVNETA_TX_NO_DATA_SWAP BIT(5)
101#define MVNETA_DESC_SWAP BIT(6)
102#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
103#define MVNETA_PORT_STATUS 0x2444
104#define MVNETA_TX_IN_PRGRS BIT(1)
105#define MVNETA_TX_FIFO_EMPTY BIT(8)
106#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
107#define MVNETA_SERDES_CFG 0x24A0
108#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
109#define MVNETA_QSGMII_SERDES_PROTO 0x0667
110#define MVNETA_TYPE_PRIO 0x24bc
111#define MVNETA_FORCE_UNI BIT(21)
112#define MVNETA_TXQ_CMD_1 0x24e4
113#define MVNETA_TXQ_CMD 0x2448
114#define MVNETA_TXQ_DISABLE_SHIFT 8
115#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
116#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
117#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
118#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
119#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
120#define MVNETA_ACC_MODE 0x2500
121#define MVNETA_BM_ADDRESS 0x2504
122#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
123#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
124#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
125#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
126#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
127#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
128
129
130
131
132
133
134
135
136
137#define MVNETA_INTR_NEW_CAUSE 0x25a0
138#define MVNETA_INTR_NEW_MASK 0x25a4
139
140
141
142
143
144
145
146
147#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
148#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
149#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
150#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
151#define MVNETA_MISCINTR_INTR_MASK BIT(31)
152
153#define MVNETA_INTR_OLD_CAUSE 0x25a8
154#define MVNETA_INTR_OLD_MASK 0x25ac
155
156
157#define MVNETA_INTR_MISC_CAUSE 0x25b0
158#define MVNETA_INTR_MISC_MASK 0x25b4
159
160#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
161#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
162#define MVNETA_CAUSE_PTP BIT(4)
163
164#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
165#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
166#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
167#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
168#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
169#define MVNETA_CAUSE_PRBS_ERR BIT(12)
170#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
171#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
172
173#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
174#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
175#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
176
177#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
178#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
179#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
180
181#define MVNETA_INTR_ENABLE 0x25b8
182#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
183#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
184
185#define MVNETA_RXQ_CMD 0x2680
186#define MVNETA_RXQ_DISABLE_SHIFT 8
187#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
188#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
189#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
190#define MVNETA_GMAC_CTRL_0 0x2c00
191#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
192#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
193#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
194#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
195#define MVNETA_GMAC_CTRL_2 0x2c08
196#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
197#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
198#define MVNETA_GMAC2_PORT_RGMII BIT(4)
199#define MVNETA_GMAC2_PORT_RESET BIT(6)
200#define MVNETA_GMAC_STATUS 0x2c10
201#define MVNETA_GMAC_LINK_UP BIT(0)
202#define MVNETA_GMAC_SPEED_1000 BIT(1)
203#define MVNETA_GMAC_SPEED_100 BIT(2)
204#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
205#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
206#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
207#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
208#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
209#define MVNETA_GMAC_AN_COMPLETE BIT(11)
210#define MVNETA_GMAC_SYNC_OK BIT(14)
211#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
212#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
213#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
214#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
215#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
216#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
217#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
218#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
219#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
220#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
221#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
222#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
223#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
224#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
225#define MVNETA_GMAC_CTRL_4 0x2c90
226#define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
227#define MVNETA_MIB_COUNTERS_BASE 0x3000
228#define MVNETA_MIB_LATE_COLLISION 0x7c
229#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
230#define MVNETA_DA_FILT_OTH_MCAST 0x3500
231#define MVNETA_DA_FILT_UCAST_BASE 0x3600
232#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
233#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
234#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
235#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
236#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
237#define MVNETA_TXQ_DEC_SENT_SHIFT 16
238#define MVNETA_TXQ_DEC_SENT_MASK 0xff
239#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
240#define MVNETA_TXQ_SENT_DESC_SHIFT 16
241#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
242#define MVNETA_PORT_TX_RESET 0x3cf0
243#define MVNETA_PORT_TX_DMA_RESET BIT(0)
244#define MVNETA_TX_MTU 0x3e0c
245#define MVNETA_TX_TOKEN_SIZE 0x3e14
246#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
247#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
248#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
249
250#define MVNETA_LPI_CTRL_0 0x2cc0
251#define MVNETA_LPI_CTRL_1 0x2cc4
252#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
253#define MVNETA_LPI_CTRL_2 0x2cc8
254#define MVNETA_LPI_STATUS 0x2ccc
255
256#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
257
258
259#define MVNETA_QUEUE_NEXT_DESC(q, index) \
260 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
261
262
263
264
265#define MVNETA_TXDONE_COAL_PKTS 0
266#define MVNETA_RX_COAL_PKTS 32
267#define MVNETA_RX_COAL_USEC 100
268
269
270
271
272
273
274
275
276
277#define MVNETA_MH_SIZE 2
278
279#define MVNETA_VLAN_TAG_LEN 4
280
281#define MVNETA_TX_CSUM_DEF_SIZE 1600
282#define MVNETA_TX_CSUM_MAX_SIZE 9800
283#define MVNETA_ACC_MODE_EXT1 1
284#define MVNETA_ACC_MODE_EXT2 2
285
286#define MVNETA_MAX_DECODE_WIN 6
287
288
289#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
290#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
291#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
292
293#define MVNETA_TX_MTU_MAX 0x3ffff
294
295
296
297
298#define MVNETA_RSS_LU_TABLE_SIZE 1
299
300
301#define MVNETA_MAX_RXD 512
302
303
304#define MVNETA_MAX_TXD 1024
305
306
307#define MVNETA_MAX_TSO_SEGS 100
308
309#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
310
311
312#define MVNETA_DESC_ALIGNED_SIZE 32
313
314
315
316
317
318#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
319
320#define MVNETA_RX_PKT_SIZE(mtu) \
321 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
322 ETH_HLEN + ETH_FCS_LEN, \
323 cache_line_size())
324
325#define IS_TSO_HEADER(txq, addr) \
326 ((addr >= txq->tso_hdrs_phys) && \
327 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
328
329#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
330 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
331
332enum {
333 ETHTOOL_STAT_EEE_WAKEUP,
334 ETHTOOL_STAT_SKB_ALLOC_ERR,
335 ETHTOOL_STAT_REFILL_ERR,
336 ETHTOOL_MAX_STATS,
337};
338
339struct mvneta_statistic {
340 unsigned short offset;
341 unsigned short type;
342 const char name[ETH_GSTRING_LEN];
343};
344
345#define T_REG_32 32
346#define T_REG_64 64
347#define T_SW 1
348
349static const struct mvneta_statistic mvneta_statistics[] = {
350 { 0x3000, T_REG_64, "good_octets_received", },
351 { 0x3010, T_REG_32, "good_frames_received", },
352 { 0x3008, T_REG_32, "bad_octets_received", },
353 { 0x3014, T_REG_32, "bad_frames_received", },
354 { 0x3018, T_REG_32, "broadcast_frames_received", },
355 { 0x301c, T_REG_32, "multicast_frames_received", },
356 { 0x3050, T_REG_32, "unrec_mac_control_received", },
357 { 0x3058, T_REG_32, "good_fc_received", },
358 { 0x305c, T_REG_32, "bad_fc_received", },
359 { 0x3060, T_REG_32, "undersize_received", },
360 { 0x3064, T_REG_32, "fragments_received", },
361 { 0x3068, T_REG_32, "oversize_received", },
362 { 0x306c, T_REG_32, "jabber_received", },
363 { 0x3070, T_REG_32, "mac_receive_error", },
364 { 0x3074, T_REG_32, "bad_crc_event", },
365 { 0x3078, T_REG_32, "collision", },
366 { 0x307c, T_REG_32, "late_collision", },
367 { 0x2484, T_REG_32, "rx_discard", },
368 { 0x2488, T_REG_32, "rx_overrun", },
369 { 0x3020, T_REG_32, "frames_64_octets", },
370 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
371 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
372 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
373 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
374 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
375 { 0x3038, T_REG_64, "good_octets_sent", },
376 { 0x3040, T_REG_32, "good_frames_sent", },
377 { 0x3044, T_REG_32, "excessive_collision", },
378 { 0x3048, T_REG_32, "multicast_frames_sent", },
379 { 0x304c, T_REG_32, "broadcast_frames_sent", },
380 { 0x3054, T_REG_32, "fc_sent", },
381 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
382 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
383 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
384 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
385};
386
387struct mvneta_pcpu_stats {
388 struct u64_stats_sync syncp;
389 u64 rx_packets;
390 u64 rx_bytes;
391 u64 tx_packets;
392 u64 tx_bytes;
393};
394
395struct mvneta_pcpu_port {
396
397 struct mvneta_port *pp;
398
399
400 struct napi_struct napi;
401
402
403 u32 cause_rx_tx;
404};
405
406struct mvneta_port {
407 u8 id;
408 struct mvneta_pcpu_port __percpu *ports;
409 struct mvneta_pcpu_stats __percpu *stats;
410
411 int pkt_size;
412 void __iomem *base;
413 struct mvneta_rx_queue *rxqs;
414 struct mvneta_tx_queue *txqs;
415 struct net_device *dev;
416 struct hlist_node node_online;
417 struct hlist_node node_dead;
418 int rxq_def;
419
420
421
422 spinlock_t lock;
423 bool is_stopped;
424
425 u32 cause_rx_tx;
426 struct napi_struct napi;
427
428
429 struct clk *clk;
430
431 struct clk *clk_bus;
432 u8 mcast_count[256];
433 u16 tx_ring_size;
434 u16 rx_ring_size;
435
436 phy_interface_t phy_interface;
437 struct device_node *dn;
438 unsigned int tx_csum_limit;
439 struct phylink *phylink;
440 struct phylink_config phylink_config;
441 struct phy *comphy;
442
443 struct mvneta_bm *bm_priv;
444 struct mvneta_bm_pool *pool_long;
445 struct mvneta_bm_pool *pool_short;
446 int bm_win_id;
447
448 bool eee_enabled;
449 bool eee_active;
450 bool tx_lpi_enabled;
451
452 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
453
454 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
455
456
457 bool neta_armada3700;
458 u16 rx_offset_correction;
459 const struct mbus_dram_target_info *dram_target_info;
460};
461
462
463
464
465
466
467#define MVNETA_TX_L3_OFF_SHIFT 0
468#define MVNETA_TX_IP_HLEN_SHIFT 8
469#define MVNETA_TX_L4_UDP BIT(16)
470#define MVNETA_TX_L3_IP6 BIT(17)
471#define MVNETA_TXD_IP_CSUM BIT(18)
472#define MVNETA_TXD_Z_PAD BIT(19)
473#define MVNETA_TXD_L_DESC BIT(20)
474#define MVNETA_TXD_F_DESC BIT(21)
475#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
476 MVNETA_TXD_L_DESC | \
477 MVNETA_TXD_F_DESC)
478#define MVNETA_TX_L4_CSUM_FULL BIT(30)
479#define MVNETA_TX_L4_CSUM_NOT BIT(31)
480
481#define MVNETA_RXD_ERR_CRC 0x0
482#define MVNETA_RXD_BM_POOL_SHIFT 13
483#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
484#define MVNETA_RXD_ERR_SUMMARY BIT(16)
485#define MVNETA_RXD_ERR_OVERRUN BIT(17)
486#define MVNETA_RXD_ERR_LEN BIT(18)
487#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
488#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
489#define MVNETA_RXD_L3_IP4 BIT(25)
490#define MVNETA_RXD_LAST_DESC BIT(26)
491#define MVNETA_RXD_FIRST_DESC BIT(27)
492#define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
493 MVNETA_RXD_LAST_DESC)
494#define MVNETA_RXD_L4_CSUM_OK BIT(30)
495
496#if defined(__LITTLE_ENDIAN)
497struct mvneta_tx_desc {
498 u32 command;
499 u16 reserved1;
500 u16 data_size;
501 u32 buf_phys_addr;
502 u32 reserved2;
503 u32 reserved3[4];
504};
505
506struct mvneta_rx_desc {
507 u32 status;
508 u16 reserved1;
509 u16 data_size;
510
511 u32 buf_phys_addr;
512 u32 reserved2;
513
514 u32 buf_cookie;
515 u16 reserved3;
516 u16 reserved4;
517
518 u32 reserved5;
519 u32 reserved6;
520};
521#else
522struct mvneta_tx_desc {
523 u16 data_size;
524 u16 reserved1;
525 u32 command;
526 u32 reserved2;
527 u32 buf_phys_addr;
528 u32 reserved3[4];
529};
530
531struct mvneta_rx_desc {
532 u16 data_size;
533 u16 reserved1;
534 u32 status;
535
536 u32 reserved2;
537 u32 buf_phys_addr;
538
539 u16 reserved4;
540 u16 reserved3;
541 u32 buf_cookie;
542
543 u32 reserved5;
544 u32 reserved6;
545};
546#endif
547
548struct mvneta_tx_queue {
549
550 u8 id;
551
552
553 int size;
554
555
556
557
558 int count;
559 int pending;
560 int tx_stop_threshold;
561 int tx_wake_threshold;
562
563
564 struct sk_buff **tx_skb;
565
566
567 int txq_put_index;
568
569
570 int txq_get_index;
571
572 u32 done_pkts_coal;
573
574
575 struct mvneta_tx_desc *descs;
576
577
578 dma_addr_t descs_phys;
579
580
581 int last_desc;
582
583
584 int next_desc_to_proc;
585
586
587 char *tso_hdrs;
588
589
590 dma_addr_t tso_hdrs_phys;
591
592
593 cpumask_t affinity_mask;
594};
595
596struct mvneta_rx_queue {
597
598 u8 id;
599
600
601 int size;
602
603 u32 pkts_coal;
604 u32 time_coal;
605
606
607 void **buf_virt_addr;
608
609
610 struct mvneta_rx_desc *descs;
611
612
613 dma_addr_t descs_phys;
614
615
616 int last_desc;
617
618
619 int next_desc_to_proc;
620
621
622 int first_to_refill;
623 u32 refill_num;
624
625
626 struct sk_buff *skb;
627 int left_size;
628
629
630 u32 skb_alloc_err;
631 u32 refill_err;
632};
633
634static enum cpuhp_state online_hpstate;
635
636
637
638static int rxq_number = 8;
639static int txq_number = 8;
640
641static int rxq_def;
642
643static int rx_copybreak __read_mostly = 256;
644static int rx_header_size __read_mostly = 128;
645
646
647static int global_port_id;
648
649#define MVNETA_DRIVER_NAME "mvneta"
650#define MVNETA_DRIVER_VERSION "1.0"
651
652
653
654
655static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
656{
657 writel(data, pp->base + offset);
658}
659
660
661static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
662{
663 return readl(pp->base + offset);
664}
665
666
667static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
668{
669 txq->txq_get_index++;
670 if (txq->txq_get_index == txq->size)
671 txq->txq_get_index = 0;
672}
673
674
675static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
676{
677 txq->txq_put_index++;
678 if (txq->txq_put_index == txq->size)
679 txq->txq_put_index = 0;
680}
681
682
683
684static void mvneta_mib_counters_clear(struct mvneta_port *pp)
685{
686 int i;
687 u32 dummy;
688
689
690 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
691 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
692 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
693 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
694}
695
696
697static void
698mvneta_get_stats64(struct net_device *dev,
699 struct rtnl_link_stats64 *stats)
700{
701 struct mvneta_port *pp = netdev_priv(dev);
702 unsigned int start;
703 int cpu;
704
705 for_each_possible_cpu(cpu) {
706 struct mvneta_pcpu_stats *cpu_stats;
707 u64 rx_packets;
708 u64 rx_bytes;
709 u64 tx_packets;
710 u64 tx_bytes;
711
712 cpu_stats = per_cpu_ptr(pp->stats, cpu);
713 do {
714 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
715 rx_packets = cpu_stats->rx_packets;
716 rx_bytes = cpu_stats->rx_bytes;
717 tx_packets = cpu_stats->tx_packets;
718 tx_bytes = cpu_stats->tx_bytes;
719 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
720
721 stats->rx_packets += rx_packets;
722 stats->rx_bytes += rx_bytes;
723 stats->tx_packets += tx_packets;
724 stats->tx_bytes += tx_bytes;
725 }
726
727 stats->rx_errors = dev->stats.rx_errors;
728 stats->rx_dropped = dev->stats.rx_dropped;
729
730 stats->tx_dropped = dev->stats.tx_dropped;
731}
732
733
734
735
736
737
738
739
740static int mvneta_rxq_desc_is_first_last(u32 status)
741{
742 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
743 MVNETA_RXD_FIRST_LAST_DESC;
744}
745
746
747static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
748 struct mvneta_rx_queue *rxq,
749 int ndescs)
750{
751
752
753
754 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
755 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
756 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
757 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
758 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
759 }
760
761 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
762 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
763}
764
765
766static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
767 struct mvneta_rx_queue *rxq)
768{
769 u32 val;
770
771 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
772 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
773}
774
775
776
777
778static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
779 struct mvneta_rx_queue *rxq,
780 int rx_done, int rx_filled)
781{
782 u32 val;
783
784 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
785 val = rx_done |
786 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
787 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
788 return;
789 }
790
791
792 while ((rx_done > 0) || (rx_filled > 0)) {
793 if (rx_done <= 0xff) {
794 val = rx_done;
795 rx_done = 0;
796 } else {
797 val = 0xff;
798 rx_done -= 0xff;
799 }
800 if (rx_filled <= 0xff) {
801 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
802 rx_filled = 0;
803 } else {
804 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
805 rx_filled -= 0xff;
806 }
807 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
808 }
809}
810
811
812static struct mvneta_rx_desc *
813mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
814{
815 int rx_desc = rxq->next_desc_to_proc;
816
817 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
818 prefetch(rxq->descs + rxq->next_desc_to_proc);
819 return rxq->descs + rx_desc;
820}
821
822
823static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
824{
825 u32 val;
826
827 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
828 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
829 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
830 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
831 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
832}
833
834
835
836static void mvneta_rxq_offset_set(struct mvneta_port *pp,
837 struct mvneta_rx_queue *rxq,
838 int offset)
839{
840 u32 val;
841
842 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
843 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
844
845
846 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
847 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
848}
849
850
851
852
853
854static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
855 struct mvneta_tx_queue *txq,
856 int pend_desc)
857{
858 u32 val;
859
860 pend_desc += txq->pending;
861
862
863 do {
864 val = min(pend_desc, 255);
865 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
866 pend_desc -= val;
867 } while (pend_desc > 0);
868 txq->pending = 0;
869}
870
871
872static struct mvneta_tx_desc *
873mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
874{
875 int tx_desc = txq->next_desc_to_proc;
876
877 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
878 return txq->descs + tx_desc;
879}
880
881
882
883
884static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
885{
886 if (txq->next_desc_to_proc == 0)
887 txq->next_desc_to_proc = txq->last_desc - 1;
888 else
889 txq->next_desc_to_proc--;
890}
891
892
893static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
894 struct mvneta_rx_queue *rxq,
895 int buf_size)
896{
897 u32 val;
898
899 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
900
901 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
902 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
903
904 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
905}
906
907
908static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
909 struct mvneta_rx_queue *rxq)
910{
911 u32 val;
912
913 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
914 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
915 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
916}
917
918
919static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
920 struct mvneta_rx_queue *rxq)
921{
922 u32 val;
923
924 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
925 val |= MVNETA_RXQ_HW_BUF_ALLOC;
926 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
927}
928
929
930static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
931 struct mvneta_rx_queue *rxq)
932{
933 u32 val;
934
935 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
936 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
937 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
938
939 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
940}
941
942
943static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
944 struct mvneta_rx_queue *rxq)
945{
946 u32 val;
947
948 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
949 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
950 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
951
952 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
953}
954
955
956static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
957 int buf_size,
958 u8 pool_id)
959{
960 u32 val;
961
962 if (!IS_ALIGNED(buf_size, 8)) {
963 dev_warn(pp->dev->dev.parent,
964 "illegal buf_size value %d, round to %d\n",
965 buf_size, ALIGN(buf_size, 8));
966 buf_size = ALIGN(buf_size, 8);
967 }
968
969 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
970 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
971 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
972}
973
974
975static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
976 u8 target, u8 attr)
977{
978 u32 win_enable, win_protect;
979 int i;
980
981 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
982
983 if (pp->bm_win_id < 0) {
984
985 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
986 if (win_enable & (1 << i)) {
987 pp->bm_win_id = i;
988 break;
989 }
990 }
991 if (i == MVNETA_MAX_DECODE_WIN)
992 return -ENOMEM;
993 } else {
994 i = pp->bm_win_id;
995 }
996
997 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
998 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
999
1000 if (i < 4)
1001 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1002
1003 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1004 (attr << 8) | target);
1005
1006 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1007
1008 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1009 win_protect |= 3 << (2 * i);
1010 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1011
1012 win_enable &= ~(1 << i);
1013 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1014
1015 return 0;
1016}
1017
1018static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1019{
1020 u32 wsize;
1021 u8 target, attr;
1022 int err;
1023
1024
1025 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1026 &target, &attr);
1027 if (err < 0)
1028 return err;
1029
1030 pp->bm_win_id = -1;
1031
1032
1033 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1034 target, attr);
1035 if (err < 0) {
1036 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1037 return err;
1038 }
1039 return 0;
1040}
1041
1042
1043
1044
1045static int mvneta_bm_port_init(struct platform_device *pdev,
1046 struct mvneta_port *pp)
1047{
1048 struct device_node *dn = pdev->dev.of_node;
1049 u32 long_pool_id, short_pool_id;
1050
1051 if (!pp->neta_armada3700) {
1052 int ret;
1053
1054 ret = mvneta_bm_port_mbus_init(pp);
1055 if (ret)
1056 return ret;
1057 }
1058
1059 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1060 netdev_info(pp->dev, "missing long pool id\n");
1061 return -EINVAL;
1062 }
1063
1064
1065 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1066 MVNETA_BM_LONG, pp->id,
1067 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1068 if (!pp->pool_long) {
1069 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1070 return -ENOMEM;
1071 }
1072
1073 pp->pool_long->port_map |= 1 << pp->id;
1074
1075 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1076 pp->pool_long->id);
1077
1078
1079 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1080 short_pool_id = long_pool_id;
1081
1082
1083 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1084 MVNETA_BM_SHORT, pp->id,
1085 MVNETA_BM_SHORT_PKT_SIZE);
1086 if (!pp->pool_short) {
1087 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1088 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1089 return -ENOMEM;
1090 }
1091
1092 if (short_pool_id != long_pool_id) {
1093 pp->pool_short->port_map |= 1 << pp->id;
1094 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1095 pp->pool_short->id);
1096 }
1097
1098 return 0;
1099}
1100
1101
1102static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1103{
1104 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1105 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1106 int num;
1107
1108
1109 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1110 if (hwbm_pool->buf_num) {
1111 WARN(1, "cannot free all buffers in pool %d\n",
1112 bm_pool->id);
1113 goto bm_mtu_err;
1114 }
1115
1116 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1117 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1118 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1119 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1120
1121
1122 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1123 if (num != hwbm_pool->size) {
1124 WARN(1, "pool %d: %d of %d allocated\n",
1125 bm_pool->id, num, hwbm_pool->size);
1126 goto bm_mtu_err;
1127 }
1128 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1129
1130 return;
1131
1132bm_mtu_err:
1133 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1134 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1135
1136 pp->bm_priv = NULL;
1137 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1138 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1139}
1140
1141
1142static void mvneta_port_up(struct mvneta_port *pp)
1143{
1144 int queue;
1145 u32 q_map;
1146
1147
1148 q_map = 0;
1149 for (queue = 0; queue < txq_number; queue++) {
1150 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1151 if (txq->descs)
1152 q_map |= (1 << queue);
1153 }
1154 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1155
1156 q_map = 0;
1157
1158 for (queue = 0; queue < rxq_number; queue++) {
1159 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1160
1161 if (rxq->descs)
1162 q_map |= (1 << queue);
1163 }
1164 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1165}
1166
1167
1168static void mvneta_port_down(struct mvneta_port *pp)
1169{
1170 u32 val;
1171 int count;
1172
1173
1174 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1175
1176
1177 if (val != 0)
1178 mvreg_write(pp, MVNETA_RXQ_CMD,
1179 val << MVNETA_RXQ_DISABLE_SHIFT);
1180
1181
1182 count = 0;
1183 do {
1184 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1185 netdev_warn(pp->dev,
1186 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1187 val);
1188 break;
1189 }
1190 mdelay(1);
1191
1192 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1193 } while (val & MVNETA_RXQ_ENABLE_MASK);
1194
1195
1196
1197
1198 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1199
1200 if (val != 0)
1201 mvreg_write(pp, MVNETA_TXQ_CMD,
1202 (val << MVNETA_TXQ_DISABLE_SHIFT));
1203
1204
1205 count = 0;
1206 do {
1207 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1208 netdev_warn(pp->dev,
1209 "TIMEOUT for TX stopped status=0x%08x\n",
1210 val);
1211 break;
1212 }
1213 mdelay(1);
1214
1215
1216 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1217
1218 } while (val & MVNETA_TXQ_ENABLE_MASK);
1219
1220
1221 count = 0;
1222 do {
1223 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1224 netdev_warn(pp->dev,
1225 "TX FIFO empty timeout status=0x%08x\n",
1226 val);
1227 break;
1228 }
1229 mdelay(1);
1230
1231 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1232 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1233 (val & MVNETA_TX_IN_PRGRS));
1234
1235 udelay(200);
1236}
1237
1238
1239static void mvneta_port_enable(struct mvneta_port *pp)
1240{
1241 u32 val;
1242
1243
1244 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1245 val |= MVNETA_GMAC0_PORT_ENABLE;
1246 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1247}
1248
1249
1250static void mvneta_port_disable(struct mvneta_port *pp)
1251{
1252 u32 val;
1253
1254
1255 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1256 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1257 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1258
1259 udelay(200);
1260}
1261
1262
1263
1264
1265static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1266{
1267 int offset;
1268 u32 val;
1269
1270 if (queue == -1) {
1271 val = 0;
1272 } else {
1273 val = 0x1 | (queue << 1);
1274 val |= (val << 24) | (val << 16) | (val << 8);
1275 }
1276
1277 for (offset = 0; offset <= 0xc; offset += 4)
1278 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1279}
1280
1281
1282static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1283{
1284 int offset;
1285 u32 val;
1286
1287 if (queue == -1) {
1288 val = 0;
1289 } else {
1290 val = 0x1 | (queue << 1);
1291 val |= (val << 24) | (val << 16) | (val << 8);
1292 }
1293
1294 for (offset = 0; offset <= 0xfc; offset += 4)
1295 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1296
1297}
1298
1299
1300static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1301{
1302 int offset;
1303 u32 val;
1304
1305 if (queue == -1) {
1306 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1307 val = 0;
1308 } else {
1309 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1310 val = 0x1 | (queue << 1);
1311 val |= (val << 24) | (val << 16) | (val << 8);
1312 }
1313
1314 for (offset = 0; offset <= 0xfc; offset += 4)
1315 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1316}
1317
1318static void mvneta_percpu_unmask_interrupt(void *arg)
1319{
1320 struct mvneta_port *pp = arg;
1321
1322
1323
1324
1325 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1326 MVNETA_RX_INTR_MASK_ALL |
1327 MVNETA_TX_INTR_MASK_ALL |
1328 MVNETA_MISCINTR_INTR_MASK);
1329}
1330
1331static void mvneta_percpu_mask_interrupt(void *arg)
1332{
1333 struct mvneta_port *pp = arg;
1334
1335
1336
1337
1338 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1339 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1340 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1341}
1342
1343static void mvneta_percpu_clear_intr_cause(void *arg)
1344{
1345 struct mvneta_port *pp = arg;
1346
1347
1348
1349
1350 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1351 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1352 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364static void mvneta_defaults_set(struct mvneta_port *pp)
1365{
1366 int cpu;
1367 int queue;
1368 u32 val;
1369 int max_cpu = num_present_cpus();
1370
1371
1372 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1373
1374
1375 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1376 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1377
1378
1379 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1380
1381
1382
1383
1384
1385
1386 for_each_present_cpu(cpu) {
1387 int rxq_map = 0, txq_map = 0;
1388 int rxq, txq;
1389 if (!pp->neta_armada3700) {
1390 for (rxq = 0; rxq < rxq_number; rxq++)
1391 if ((rxq % max_cpu) == cpu)
1392 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1393
1394 for (txq = 0; txq < txq_number; txq++)
1395 if ((txq % max_cpu) == cpu)
1396 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1397
1398
1399
1400
1401
1402 if (txq_number == 1)
1403 txq_map = (cpu == pp->rxq_def) ?
1404 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1405
1406 } else {
1407 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1408 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1409 }
1410
1411 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1412 }
1413
1414
1415 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1416 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1417
1418
1419 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1420 for (queue = 0; queue < txq_number; queue++) {
1421 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1422 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1423 }
1424
1425 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1426 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1427
1428
1429 if (pp->bm_priv)
1430
1431 val = MVNETA_ACC_MODE_EXT2;
1432 else
1433
1434 val = MVNETA_ACC_MODE_EXT1;
1435 mvreg_write(pp, MVNETA_ACC_MODE, val);
1436
1437 if (pp->bm_priv)
1438 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1439
1440
1441 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1442 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1443
1444 val = 0;
1445 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1446 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1447
1448
1449 val = 0;
1450
1451
1452 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1453 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1454 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1455
1456#if defined(__BIG_ENDIAN)
1457 val |= MVNETA_DESC_SWAP;
1458#endif
1459
1460
1461 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1462
1463
1464
1465
1466 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1467 val &= ~MVNETA_PHY_POLLING_ENABLE;
1468 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1469
1470 mvneta_set_ucast_table(pp, -1);
1471 mvneta_set_special_mcast_table(pp, -1);
1472 mvneta_set_other_mcast_table(pp, -1);
1473
1474
1475 mvreg_write(pp, MVNETA_INTR_ENABLE,
1476 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1477 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1478
1479 mvneta_mib_counters_clear(pp);
1480}
1481
1482
1483static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1484
1485{
1486 u32 val, size, mtu;
1487 int queue;
1488
1489 mtu = max_tx_size * 8;
1490 if (mtu > MVNETA_TX_MTU_MAX)
1491 mtu = MVNETA_TX_MTU_MAX;
1492
1493
1494 val = mvreg_read(pp, MVNETA_TX_MTU);
1495 val &= ~MVNETA_TX_MTU_MAX;
1496 val |= mtu;
1497 mvreg_write(pp, MVNETA_TX_MTU, val);
1498
1499
1500 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1501
1502 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1503 if (size < mtu) {
1504 size = mtu;
1505 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1506 val |= size;
1507 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1508 }
1509 for (queue = 0; queue < txq_number; queue++) {
1510 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1511
1512 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1513 if (size < mtu) {
1514 size = mtu;
1515 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1516 val |= size;
1517 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1518 }
1519 }
1520}
1521
1522
1523static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1524 int queue)
1525{
1526 unsigned int unicast_reg;
1527 unsigned int tbl_offset;
1528 unsigned int reg_offset;
1529
1530
1531 last_nibble = (0xf & last_nibble);
1532
1533
1534 tbl_offset = (last_nibble / 4) * 4;
1535
1536
1537 reg_offset = last_nibble % 4;
1538
1539 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1540
1541 if (queue == -1) {
1542
1543 unicast_reg &= ~(0xff << (8 * reg_offset));
1544 } else {
1545 unicast_reg &= ~(0xff << (8 * reg_offset));
1546 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1547 }
1548
1549 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1550}
1551
1552
1553static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1554 int queue)
1555{
1556 unsigned int mac_h;
1557 unsigned int mac_l;
1558
1559 if (queue != -1) {
1560 mac_l = (addr[4] << 8) | (addr[5]);
1561 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1562 (addr[2] << 8) | (addr[3] << 0);
1563
1564 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1565 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1566 }
1567
1568
1569 mvneta_set_ucast_addr(pp, addr[5], queue);
1570}
1571
1572
1573
1574
1575static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1576 struct mvneta_rx_queue *rxq, u32 value)
1577{
1578 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1579 value | MVNETA_RXQ_NON_OCCUPIED(0));
1580}
1581
1582
1583
1584
1585static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1586 struct mvneta_rx_queue *rxq, u32 value)
1587{
1588 u32 val;
1589 unsigned long clk_rate;
1590
1591 clk_rate = clk_get_rate(pp->clk);
1592 val = (clk_rate / 1000000) * value;
1593
1594 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1595}
1596
1597
1598static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1599 struct mvneta_tx_queue *txq, u32 value)
1600{
1601 u32 val;
1602
1603 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1604
1605 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1606 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1607
1608 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1609}
1610
1611
1612static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1613 u32 phys_addr, void *virt_addr,
1614 struct mvneta_rx_queue *rxq)
1615{
1616 int i;
1617
1618 rx_desc->buf_phys_addr = phys_addr;
1619 i = rx_desc - rxq->descs;
1620 rxq->buf_virt_addr[i] = virt_addr;
1621}
1622
1623
1624static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1625 struct mvneta_tx_queue *txq,
1626 int sent_desc)
1627{
1628 u32 val;
1629
1630
1631 while (sent_desc > 0xff) {
1632 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1633 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1634 sent_desc = sent_desc - 0xff;
1635 }
1636
1637 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1638 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1639}
1640
1641
1642static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1643 struct mvneta_tx_queue *txq)
1644{
1645 u32 val;
1646 int sent_desc;
1647
1648 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1649 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1650 MVNETA_TXQ_SENT_DESC_SHIFT;
1651
1652 return sent_desc;
1653}
1654
1655
1656
1657
1658static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1659 struct mvneta_tx_queue *txq)
1660{
1661 int sent_desc;
1662
1663
1664 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1665
1666
1667 if (sent_desc)
1668 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1669
1670 return sent_desc;
1671}
1672
1673
1674static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1675 int ip_hdr_len, int l4_proto)
1676{
1677 u32 command;
1678
1679
1680
1681
1682
1683 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1684 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1685
1686 if (l3_proto == htons(ETH_P_IP))
1687 command |= MVNETA_TXD_IP_CSUM;
1688 else
1689 command |= MVNETA_TX_L3_IP6;
1690
1691 if (l4_proto == IPPROTO_TCP)
1692 command |= MVNETA_TX_L4_CSUM_FULL;
1693 else if (l4_proto == IPPROTO_UDP)
1694 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1695 else
1696 command |= MVNETA_TX_L4_CSUM_NOT;
1697
1698 return command;
1699}
1700
1701
1702
1703static void mvneta_rx_error(struct mvneta_port *pp,
1704 struct mvneta_rx_desc *rx_desc)
1705{
1706 u32 status = rx_desc->status;
1707
1708 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1709 case MVNETA_RXD_ERR_CRC:
1710 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1711 status, rx_desc->data_size);
1712 break;
1713 case MVNETA_RXD_ERR_OVERRUN:
1714 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1715 status, rx_desc->data_size);
1716 break;
1717 case MVNETA_RXD_ERR_LEN:
1718 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1719 status, rx_desc->data_size);
1720 break;
1721 case MVNETA_RXD_ERR_RESOURCE:
1722 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1723 status, rx_desc->data_size);
1724 break;
1725 }
1726}
1727
1728
1729static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1730 struct sk_buff *skb)
1731{
1732 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1733 (status & MVNETA_RXD_L3_IP4) &&
1734 (status & MVNETA_RXD_L4_CSUM_OK)) {
1735 skb->csum = 0;
1736 skb->ip_summed = CHECKSUM_UNNECESSARY;
1737 return;
1738 }
1739
1740 skb->ip_summed = CHECKSUM_NONE;
1741}
1742
1743
1744
1745
1746
1747static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1748 u32 cause)
1749{
1750 int queue = fls(cause) - 1;
1751
1752 return &pp->txqs[queue];
1753}
1754
1755
1756static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1757 struct mvneta_tx_queue *txq, int num,
1758 struct netdev_queue *nq)
1759{
1760 unsigned int bytes_compl = 0, pkts_compl = 0;
1761 int i;
1762
1763 for (i = 0; i < num; i++) {
1764 struct mvneta_tx_desc *tx_desc = txq->descs +
1765 txq->txq_get_index;
1766 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1767
1768 if (skb) {
1769 bytes_compl += skb->len;
1770 pkts_compl++;
1771 }
1772
1773 mvneta_txq_inc_get(txq);
1774
1775 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1776 dma_unmap_single(pp->dev->dev.parent,
1777 tx_desc->buf_phys_addr,
1778 tx_desc->data_size, DMA_TO_DEVICE);
1779 if (!skb)
1780 continue;
1781 dev_kfree_skb_any(skb);
1782 }
1783
1784 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1785}
1786
1787
1788static void mvneta_txq_done(struct mvneta_port *pp,
1789 struct mvneta_tx_queue *txq)
1790{
1791 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1792 int tx_done;
1793
1794 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1795 if (!tx_done)
1796 return;
1797
1798 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1799
1800 txq->count -= tx_done;
1801
1802 if (netif_tx_queue_stopped(nq)) {
1803 if (txq->count <= txq->tx_wake_threshold)
1804 netif_tx_wake_queue(nq);
1805 }
1806}
1807
1808
1809
1810static int mvneta_rx_refill(struct mvneta_port *pp,
1811 struct mvneta_rx_desc *rx_desc,
1812 struct mvneta_rx_queue *rxq,
1813 gfp_t gfp_mask)
1814{
1815 dma_addr_t phys_addr;
1816 struct page *page;
1817
1818 page = __dev_alloc_page(gfp_mask);
1819 if (!page)
1820 return -ENOMEM;
1821
1822
1823 phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1824 DMA_FROM_DEVICE);
1825 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1826 __free_page(page);
1827 return -ENOMEM;
1828 }
1829
1830 phys_addr += pp->rx_offset_correction;
1831 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1832 return 0;
1833}
1834
1835
1836static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1837{
1838 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1839 int ip_hdr_len = 0;
1840 __be16 l3_proto = vlan_get_protocol(skb);
1841 u8 l4_proto;
1842
1843 if (l3_proto == htons(ETH_P_IP)) {
1844 struct iphdr *ip4h = ip_hdr(skb);
1845
1846
1847 ip_hdr_len = ip4h->ihl;
1848 l4_proto = ip4h->protocol;
1849 } else if (l3_proto == htons(ETH_P_IPV6)) {
1850 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1851
1852
1853 if (skb_network_header_len(skb) > 0)
1854 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1855 l4_proto = ip6h->nexthdr;
1856 } else
1857 return MVNETA_TX_L4_CSUM_NOT;
1858
1859 return mvneta_txq_desc_csum(skb_network_offset(skb),
1860 l3_proto, ip_hdr_len, l4_proto);
1861 }
1862
1863 return MVNETA_TX_L4_CSUM_NOT;
1864}
1865
1866
1867static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1868 struct mvneta_rx_queue *rxq)
1869{
1870 int rx_done, i;
1871
1872 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1873 if (rx_done)
1874 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1875
1876 if (pp->bm_priv) {
1877 for (i = 0; i < rx_done; i++) {
1878 struct mvneta_rx_desc *rx_desc =
1879 mvneta_rxq_next_desc_get(rxq);
1880 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1881 struct mvneta_bm_pool *bm_pool;
1882
1883 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1884
1885 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1886 rx_desc->buf_phys_addr);
1887 }
1888 return;
1889 }
1890
1891 for (i = 0; i < rxq->size; i++) {
1892 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1893 void *data = rxq->buf_virt_addr[i];
1894 if (!data || !(rx_desc->buf_phys_addr))
1895 continue;
1896
1897 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1898 PAGE_SIZE, DMA_FROM_DEVICE);
1899 __free_page(data);
1900 }
1901}
1902
1903static inline
1904int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1905{
1906 struct mvneta_rx_desc *rx_desc;
1907 int curr_desc = rxq->first_to_refill;
1908 int i;
1909
1910 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1911 rx_desc = rxq->descs + curr_desc;
1912 if (!(rx_desc->buf_phys_addr)) {
1913 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
1914 pr_err("Can't refill queue %d. Done %d from %d\n",
1915 rxq->id, i, rxq->refill_num);
1916 rxq->refill_err++;
1917 break;
1918 }
1919 }
1920 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1921 }
1922 rxq->refill_num -= i;
1923 rxq->first_to_refill = curr_desc;
1924
1925 return i;
1926}
1927
1928
1929static int mvneta_rx_swbm(struct napi_struct *napi,
1930 struct mvneta_port *pp, int budget,
1931 struct mvneta_rx_queue *rxq)
1932{
1933 struct net_device *dev = pp->dev;
1934 int rx_todo, rx_proc;
1935 int refill = 0;
1936 u32 rcvd_pkts = 0;
1937 u32 rcvd_bytes = 0;
1938
1939
1940 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
1941 rx_proc = 0;
1942
1943
1944 while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
1945 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1946 unsigned char *data;
1947 struct page *page;
1948 dma_addr_t phys_addr;
1949 u32 rx_status, index;
1950 int rx_bytes, skb_size, copy_size;
1951 int frag_num, frag_size, frag_offset;
1952
1953 index = rx_desc - rxq->descs;
1954 page = (struct page *)rxq->buf_virt_addr[index];
1955 data = page_address(page);
1956
1957 prefetch(data);
1958
1959 phys_addr = rx_desc->buf_phys_addr;
1960 rx_status = rx_desc->status;
1961 rx_proc++;
1962 rxq->refill_num++;
1963
1964 if (rx_status & MVNETA_RXD_FIRST_DESC) {
1965
1966 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
1967 mvneta_rx_error(pp, rx_desc);
1968 dev->stats.rx_errors++;
1969
1970 continue;
1971 }
1972 rx_bytes = rx_desc->data_size -
1973 (ETH_FCS_LEN + MVNETA_MH_SIZE);
1974
1975
1976 skb_size = max(rx_copybreak, rx_header_size);
1977 rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
1978 if (unlikely(!rxq->skb)) {
1979 netdev_err(dev,
1980 "Can't allocate skb on queue %d\n",
1981 rxq->id);
1982 dev->stats.rx_dropped++;
1983 rxq->skb_alloc_err++;
1984 continue;
1985 }
1986 copy_size = min(skb_size, rx_bytes);
1987
1988
1989 memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
1990 copy_size);
1991 skb_put(rxq->skb, copy_size);
1992 rxq->left_size = rx_bytes - copy_size;
1993
1994 mvneta_rx_csum(pp, rx_status, rxq->skb);
1995 if (rxq->left_size == 0) {
1996 int size = copy_size + MVNETA_MH_SIZE;
1997
1998 dma_sync_single_range_for_cpu(dev->dev.parent,
1999 phys_addr, 0,
2000 size,
2001 DMA_FROM_DEVICE);
2002
2003
2004 } else {
2005
2006 rx_desc->buf_phys_addr = 0;
2007
2008 frag_num = 0;
2009 frag_offset = copy_size + MVNETA_MH_SIZE;
2010 frag_size = min(rxq->left_size,
2011 (int)(PAGE_SIZE - frag_offset));
2012 skb_add_rx_frag(rxq->skb, frag_num, page,
2013 frag_offset, frag_size,
2014 PAGE_SIZE);
2015 dma_unmap_page(dev->dev.parent, phys_addr,
2016 PAGE_SIZE, DMA_FROM_DEVICE);
2017 rxq->left_size -= frag_size;
2018 }
2019 } else {
2020
2021 if (unlikely(!rxq->skb)) {
2022 pr_debug("no skb for rx_status 0x%x\n",
2023 rx_status);
2024 continue;
2025 }
2026 if (!rxq->left_size) {
2027
2028
2029 dma_sync_single_range_for_cpu(dev->dev.parent,
2030 phys_addr, 0,
2031 ETH_FCS_LEN,
2032 DMA_FROM_DEVICE);
2033
2034 } else {
2035
2036 rx_desc->buf_phys_addr = 0;
2037
2038 frag_num = skb_shinfo(rxq->skb)->nr_frags;
2039 frag_offset = 0;
2040 frag_size = min(rxq->left_size,
2041 (int)(PAGE_SIZE - frag_offset));
2042 skb_add_rx_frag(rxq->skb, frag_num, page,
2043 frag_offset, frag_size,
2044 PAGE_SIZE);
2045
2046 dma_unmap_page(dev->dev.parent, phys_addr,
2047 PAGE_SIZE, DMA_FROM_DEVICE);
2048
2049 rxq->left_size -= frag_size;
2050 }
2051 }
2052
2053 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2054
2055 continue;
2056
2057 if (rxq->left_size) {
2058 pr_err("get last desc, but left_size (%d) != 0\n",
2059 rxq->left_size);
2060 dev_kfree_skb_any(rxq->skb);
2061 rxq->left_size = 0;
2062 rxq->skb = NULL;
2063 continue;
2064 }
2065 rcvd_pkts++;
2066 rcvd_bytes += rxq->skb->len;
2067
2068
2069 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2070
2071 napi_gro_receive(napi, rxq->skb);
2072
2073
2074 rxq->skb = NULL;
2075 rxq->left_size = 0;
2076 }
2077
2078 if (rcvd_pkts) {
2079 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2080
2081 u64_stats_update_begin(&stats->syncp);
2082 stats->rx_packets += rcvd_pkts;
2083 stats->rx_bytes += rcvd_bytes;
2084 u64_stats_update_end(&stats->syncp);
2085 }
2086
2087
2088 refill = mvneta_rx_refill_queue(pp, rxq);
2089
2090
2091 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2092
2093 return rcvd_pkts;
2094}
2095
2096
2097static int mvneta_rx_hwbm(struct napi_struct *napi,
2098 struct mvneta_port *pp, int rx_todo,
2099 struct mvneta_rx_queue *rxq)
2100{
2101 struct net_device *dev = pp->dev;
2102 int rx_done;
2103 u32 rcvd_pkts = 0;
2104 u32 rcvd_bytes = 0;
2105
2106
2107 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2108
2109 if (rx_todo > rx_done)
2110 rx_todo = rx_done;
2111
2112 rx_done = 0;
2113
2114
2115 while (rx_done < rx_todo) {
2116 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2117 struct mvneta_bm_pool *bm_pool = NULL;
2118 struct sk_buff *skb;
2119 unsigned char *data;
2120 dma_addr_t phys_addr;
2121 u32 rx_status, frag_size;
2122 int rx_bytes, err;
2123 u8 pool_id;
2124
2125 rx_done++;
2126 rx_status = rx_desc->status;
2127 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2128 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2129 phys_addr = rx_desc->buf_phys_addr;
2130 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2131 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2132
2133 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2134 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2135err_drop_frame_ret_pool:
2136
2137 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2138 rx_desc->buf_phys_addr);
2139err_drop_frame:
2140 dev->stats.rx_errors++;
2141 mvneta_rx_error(pp, rx_desc);
2142
2143 continue;
2144 }
2145
2146 if (rx_bytes <= rx_copybreak) {
2147
2148 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2149 if (unlikely(!skb))
2150 goto err_drop_frame_ret_pool;
2151
2152 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2153 rx_desc->buf_phys_addr,
2154 MVNETA_MH_SIZE + NET_SKB_PAD,
2155 rx_bytes,
2156 DMA_FROM_DEVICE);
2157 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2158 rx_bytes);
2159
2160 skb->protocol = eth_type_trans(skb, dev);
2161 mvneta_rx_csum(pp, rx_status, skb);
2162 napi_gro_receive(napi, skb);
2163
2164 rcvd_pkts++;
2165 rcvd_bytes += rx_bytes;
2166
2167
2168 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2169 rx_desc->buf_phys_addr);
2170
2171
2172 continue;
2173 }
2174
2175
2176 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2177 if (err) {
2178 netdev_err(dev, "Linux processing - Can't refill\n");
2179 rxq->refill_err++;
2180 goto err_drop_frame_ret_pool;
2181 }
2182
2183 frag_size = bm_pool->hwbm_pool.frag_size;
2184
2185 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2186
2187
2188
2189
2190 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2191 bm_pool->buf_size, DMA_FROM_DEVICE);
2192 if (!skb)
2193 goto err_drop_frame;
2194
2195 rcvd_pkts++;
2196 rcvd_bytes += rx_bytes;
2197
2198
2199 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2200 skb_put(skb, rx_bytes);
2201
2202 skb->protocol = eth_type_trans(skb, dev);
2203
2204 mvneta_rx_csum(pp, rx_status, skb);
2205
2206 napi_gro_receive(napi, skb);
2207 }
2208
2209 if (rcvd_pkts) {
2210 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2211
2212 u64_stats_update_begin(&stats->syncp);
2213 stats->rx_packets += rcvd_pkts;
2214 stats->rx_bytes += rcvd_bytes;
2215 u64_stats_update_end(&stats->syncp);
2216 }
2217
2218
2219 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2220
2221 return rx_done;
2222}
2223
2224static inline void
2225mvneta_tso_put_hdr(struct sk_buff *skb,
2226 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2227{
2228 struct mvneta_tx_desc *tx_desc;
2229 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2230
2231 txq->tx_skb[txq->txq_put_index] = NULL;
2232 tx_desc = mvneta_txq_next_desc_get(txq);
2233 tx_desc->data_size = hdr_len;
2234 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2235 tx_desc->command |= MVNETA_TXD_F_DESC;
2236 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2237 txq->txq_put_index * TSO_HEADER_SIZE;
2238 mvneta_txq_inc_put(txq);
2239}
2240
2241static inline int
2242mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2243 struct sk_buff *skb, char *data, int size,
2244 bool last_tcp, bool is_last)
2245{
2246 struct mvneta_tx_desc *tx_desc;
2247
2248 tx_desc = mvneta_txq_next_desc_get(txq);
2249 tx_desc->data_size = size;
2250 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2251 size, DMA_TO_DEVICE);
2252 if (unlikely(dma_mapping_error(dev->dev.parent,
2253 tx_desc->buf_phys_addr))) {
2254 mvneta_txq_desc_put(txq);
2255 return -ENOMEM;
2256 }
2257
2258 tx_desc->command = 0;
2259 txq->tx_skb[txq->txq_put_index] = NULL;
2260
2261 if (last_tcp) {
2262
2263 tx_desc->command = MVNETA_TXD_L_DESC;
2264
2265
2266 if (is_last)
2267 txq->tx_skb[txq->txq_put_index] = skb;
2268 }
2269 mvneta_txq_inc_put(txq);
2270 return 0;
2271}
2272
2273static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2274 struct mvneta_tx_queue *txq)
2275{
2276 int total_len, data_left;
2277 int desc_count = 0;
2278 struct mvneta_port *pp = netdev_priv(dev);
2279 struct tso_t tso;
2280 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2281 int i;
2282
2283
2284 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2285 return 0;
2286
2287 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2288 pr_info("*** Is this even possible???!?!?\n");
2289 return 0;
2290 }
2291
2292
2293 tso_start(skb, &tso);
2294
2295 total_len = skb->len - hdr_len;
2296 while (total_len > 0) {
2297 char *hdr;
2298
2299 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2300 total_len -= data_left;
2301 desc_count++;
2302
2303
2304 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2305 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2306
2307 mvneta_tso_put_hdr(skb, pp, txq);
2308
2309 while (data_left > 0) {
2310 int size;
2311 desc_count++;
2312
2313 size = min_t(int, tso.size, data_left);
2314
2315 if (mvneta_tso_put_data(dev, txq, skb,
2316 tso.data, size,
2317 size == data_left,
2318 total_len == 0))
2319 goto err_release;
2320 data_left -= size;
2321
2322 tso_build_data(skb, &tso, size);
2323 }
2324 }
2325
2326 return desc_count;
2327
2328err_release:
2329
2330
2331
2332 for (i = desc_count - 1; i >= 0; i--) {
2333 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2334 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2335 dma_unmap_single(pp->dev->dev.parent,
2336 tx_desc->buf_phys_addr,
2337 tx_desc->data_size,
2338 DMA_TO_DEVICE);
2339 mvneta_txq_desc_put(txq);
2340 }
2341 return 0;
2342}
2343
2344
2345static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2346 struct mvneta_tx_queue *txq)
2347{
2348 struct mvneta_tx_desc *tx_desc;
2349 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2350
2351 for (i = 0; i < nr_frags; i++) {
2352 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2353 void *addr = skb_frag_address(frag);
2354
2355 tx_desc = mvneta_txq_next_desc_get(txq);
2356 tx_desc->data_size = skb_frag_size(frag);
2357
2358 tx_desc->buf_phys_addr =
2359 dma_map_single(pp->dev->dev.parent, addr,
2360 tx_desc->data_size, DMA_TO_DEVICE);
2361
2362 if (dma_mapping_error(pp->dev->dev.parent,
2363 tx_desc->buf_phys_addr)) {
2364 mvneta_txq_desc_put(txq);
2365 goto error;
2366 }
2367
2368 if (i == nr_frags - 1) {
2369
2370 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2371 txq->tx_skb[txq->txq_put_index] = skb;
2372 } else {
2373
2374 tx_desc->command = 0;
2375 txq->tx_skb[txq->txq_put_index] = NULL;
2376 }
2377 mvneta_txq_inc_put(txq);
2378 }
2379
2380 return 0;
2381
2382error:
2383
2384
2385
2386 for (i = i - 1; i >= 0; i--) {
2387 tx_desc = txq->descs + i;
2388 dma_unmap_single(pp->dev->dev.parent,
2389 tx_desc->buf_phys_addr,
2390 tx_desc->data_size,
2391 DMA_TO_DEVICE);
2392 mvneta_txq_desc_put(txq);
2393 }
2394
2395 return -ENOMEM;
2396}
2397
2398
2399static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2400{
2401 struct mvneta_port *pp = netdev_priv(dev);
2402 u16 txq_id = skb_get_queue_mapping(skb);
2403 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2404 struct mvneta_tx_desc *tx_desc;
2405 int len = skb->len;
2406 int frags = 0;
2407 u32 tx_cmd;
2408
2409 if (!netif_running(dev))
2410 goto out;
2411
2412 if (skb_is_gso(skb)) {
2413 frags = mvneta_tx_tso(skb, dev, txq);
2414 goto out;
2415 }
2416
2417 frags = skb_shinfo(skb)->nr_frags + 1;
2418
2419
2420 tx_desc = mvneta_txq_next_desc_get(txq);
2421
2422 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2423
2424 tx_desc->data_size = skb_headlen(skb);
2425
2426 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2427 tx_desc->data_size,
2428 DMA_TO_DEVICE);
2429 if (unlikely(dma_mapping_error(dev->dev.parent,
2430 tx_desc->buf_phys_addr))) {
2431 mvneta_txq_desc_put(txq);
2432 frags = 0;
2433 goto out;
2434 }
2435
2436 if (frags == 1) {
2437
2438 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2439 tx_desc->command = tx_cmd;
2440 txq->tx_skb[txq->txq_put_index] = skb;
2441 mvneta_txq_inc_put(txq);
2442 } else {
2443
2444 tx_cmd |= MVNETA_TXD_F_DESC;
2445 txq->tx_skb[txq->txq_put_index] = NULL;
2446 mvneta_txq_inc_put(txq);
2447 tx_desc->command = tx_cmd;
2448
2449 if (mvneta_tx_frag_process(pp, skb, txq)) {
2450 dma_unmap_single(dev->dev.parent,
2451 tx_desc->buf_phys_addr,
2452 tx_desc->data_size,
2453 DMA_TO_DEVICE);
2454 mvneta_txq_desc_put(txq);
2455 frags = 0;
2456 goto out;
2457 }
2458 }
2459
2460out:
2461 if (frags > 0) {
2462 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2463 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2464
2465 netdev_tx_sent_queue(nq, len);
2466
2467 txq->count += frags;
2468 if (txq->count >= txq->tx_stop_threshold)
2469 netif_tx_stop_queue(nq);
2470
2471 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2472 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2473 mvneta_txq_pend_desc_add(pp, txq, frags);
2474 else
2475 txq->pending += frags;
2476
2477 u64_stats_update_begin(&stats->syncp);
2478 stats->tx_packets++;
2479 stats->tx_bytes += len;
2480 u64_stats_update_end(&stats->syncp);
2481 } else {
2482 dev->stats.tx_dropped++;
2483 dev_kfree_skb_any(skb);
2484 }
2485
2486 return NETDEV_TX_OK;
2487}
2488
2489
2490
2491static void mvneta_txq_done_force(struct mvneta_port *pp,
2492 struct mvneta_tx_queue *txq)
2493
2494{
2495 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2496 int tx_done = txq->count;
2497
2498 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2499
2500
2501 txq->count = 0;
2502 txq->txq_put_index = 0;
2503 txq->txq_get_index = 0;
2504}
2505
2506
2507
2508
2509static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2510{
2511 struct mvneta_tx_queue *txq;
2512 struct netdev_queue *nq;
2513 int cpu = smp_processor_id();
2514
2515 while (cause_tx_done) {
2516 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2517
2518 nq = netdev_get_tx_queue(pp->dev, txq->id);
2519 __netif_tx_lock(nq, cpu);
2520
2521 if (txq->count)
2522 mvneta_txq_done(pp, txq);
2523
2524 __netif_tx_unlock(nq);
2525 cause_tx_done &= ~((1 << txq->id));
2526 }
2527}
2528
2529
2530
2531
2532static int mvneta_addr_crc(unsigned char *addr)
2533{
2534 int crc = 0;
2535 int i;
2536
2537 for (i = 0; i < ETH_ALEN; i++) {
2538 int j;
2539
2540 crc = (crc ^ addr[i]) << 8;
2541 for (j = 7; j >= 0; j--) {
2542 if (crc & (0x100 << j))
2543 crc ^= 0x107 << j;
2544 }
2545 }
2546
2547 return crc;
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2558 unsigned char last_byte,
2559 int queue)
2560{
2561 unsigned int smc_table_reg;
2562 unsigned int tbl_offset;
2563 unsigned int reg_offset;
2564
2565
2566 tbl_offset = (last_byte / 4);
2567
2568 reg_offset = last_byte % 4;
2569
2570 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2571 + tbl_offset * 4));
2572
2573 if (queue == -1)
2574 smc_table_reg &= ~(0xff << (8 * reg_offset));
2575 else {
2576 smc_table_reg &= ~(0xff << (8 * reg_offset));
2577 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2578 }
2579
2580 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2581 smc_table_reg);
2582}
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2593 unsigned char crc8,
2594 int queue)
2595{
2596 unsigned int omc_table_reg;
2597 unsigned int tbl_offset;
2598 unsigned int reg_offset;
2599
2600 tbl_offset = (crc8 / 4) * 4;
2601 reg_offset = crc8 % 4;
2602
2603 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2604
2605 if (queue == -1) {
2606
2607 omc_table_reg &= ~(0xff << (8 * reg_offset));
2608 } else {
2609 omc_table_reg &= ~(0xff << (8 * reg_offset));
2610 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2611 }
2612
2613 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2614}
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2626 int queue)
2627{
2628 unsigned char crc_result = 0;
2629
2630 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2631 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2632 return 0;
2633 }
2634
2635 crc_result = mvneta_addr_crc(p_addr);
2636 if (queue == -1) {
2637 if (pp->mcast_count[crc_result] == 0) {
2638 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2639 crc_result);
2640 return -EINVAL;
2641 }
2642
2643 pp->mcast_count[crc_result]--;
2644 if (pp->mcast_count[crc_result] != 0) {
2645 netdev_info(pp->dev,
2646 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2647 pp->mcast_count[crc_result], crc_result);
2648 return -EINVAL;
2649 }
2650 } else
2651 pp->mcast_count[crc_result]++;
2652
2653 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2654
2655 return 0;
2656}
2657
2658
2659static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2660 int is_promisc)
2661{
2662 u32 port_cfg_reg, val;
2663
2664 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2665
2666 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2667
2668
2669 if (is_promisc) {
2670
2671 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2672 val |= MVNETA_FORCE_UNI;
2673 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2674 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2675 } else {
2676
2677 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2678 val &= ~MVNETA_FORCE_UNI;
2679 }
2680
2681 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2682 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2683}
2684
2685
2686static void mvneta_set_rx_mode(struct net_device *dev)
2687{
2688 struct mvneta_port *pp = netdev_priv(dev);
2689 struct netdev_hw_addr *ha;
2690
2691 if (dev->flags & IFF_PROMISC) {
2692
2693 mvneta_rx_unicast_promisc_set(pp, 1);
2694 mvneta_set_ucast_table(pp, pp->rxq_def);
2695 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2696 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2697 } else {
2698
2699 mvneta_rx_unicast_promisc_set(pp, 0);
2700 mvneta_set_ucast_table(pp, -1);
2701 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2702
2703 if (dev->flags & IFF_ALLMULTI) {
2704
2705 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2706 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2707 } else {
2708
2709 mvneta_set_special_mcast_table(pp, -1);
2710 mvneta_set_other_mcast_table(pp, -1);
2711
2712 if (!netdev_mc_empty(dev)) {
2713 netdev_for_each_mc_addr(ha, dev) {
2714 mvneta_mcast_addr_set(pp, ha->addr,
2715 pp->rxq_def);
2716 }
2717 }
2718 }
2719 }
2720}
2721
2722
2723static irqreturn_t mvneta_isr(int irq, void *dev_id)
2724{
2725 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2726
2727 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2728 napi_schedule(&pp->napi);
2729
2730 return IRQ_HANDLED;
2731}
2732
2733
2734static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2735{
2736 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2737
2738 disable_percpu_irq(port->pp->dev->irq);
2739 napi_schedule(&port->napi);
2740
2741 return IRQ_HANDLED;
2742}
2743
2744static void mvneta_link_change(struct mvneta_port *pp)
2745{
2746 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2747
2748 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2749}
2750
2751
2752
2753
2754
2755
2756
2757
2758static int mvneta_poll(struct napi_struct *napi, int budget)
2759{
2760 int rx_done = 0;
2761 u32 cause_rx_tx;
2762 int rx_queue;
2763 struct mvneta_port *pp = netdev_priv(napi->dev);
2764 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2765
2766 if (!netif_running(pp->dev)) {
2767 napi_complete(napi);
2768 return rx_done;
2769 }
2770
2771
2772 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2773 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2774 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2775
2776 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2777
2778 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2779 MVNETA_CAUSE_LINK_CHANGE))
2780 mvneta_link_change(pp);
2781 }
2782
2783
2784 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2785 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2786 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2787 }
2788
2789
2790
2791
2792 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2793
2794 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2795 port->cause_rx_tx;
2796
2797 if (rx_queue) {
2798 rx_queue = rx_queue - 1;
2799 if (pp->bm_priv)
2800 rx_done = mvneta_rx_hwbm(napi, pp, budget,
2801 &pp->rxqs[rx_queue]);
2802 else
2803 rx_done = mvneta_rx_swbm(napi, pp, budget,
2804 &pp->rxqs[rx_queue]);
2805 }
2806
2807 if (rx_done < budget) {
2808 cause_rx_tx = 0;
2809 napi_complete_done(napi, rx_done);
2810
2811 if (pp->neta_armada3700) {
2812 unsigned long flags;
2813
2814 local_irq_save(flags);
2815 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2816 MVNETA_RX_INTR_MASK(rxq_number) |
2817 MVNETA_TX_INTR_MASK(txq_number) |
2818 MVNETA_MISCINTR_INTR_MASK);
2819 local_irq_restore(flags);
2820 } else {
2821 enable_percpu_irq(pp->dev->irq, 0);
2822 }
2823 }
2824
2825 if (pp->neta_armada3700)
2826 pp->cause_rx_tx = cause_rx_tx;
2827 else
2828 port->cause_rx_tx = cause_rx_tx;
2829
2830 return rx_done;
2831}
2832
2833
2834static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2835 int num)
2836{
2837 int i;
2838
2839 for (i = 0; i < num; i++) {
2840 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2841 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2842 GFP_KERNEL) != 0) {
2843 netdev_err(pp->dev,
2844 "%s:rxq %d, %d of %d buffs filled\n",
2845 __func__, rxq->id, i, num);
2846 break;
2847 }
2848 }
2849
2850
2851
2852
2853 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2854
2855 return i;
2856}
2857
2858
2859static void mvneta_tx_reset(struct mvneta_port *pp)
2860{
2861 int queue;
2862
2863
2864 for (queue = 0; queue < txq_number; queue++)
2865 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2866
2867 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2868 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2869}
2870
2871static void mvneta_rx_reset(struct mvneta_port *pp)
2872{
2873 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2874 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2875}
2876
2877
2878
2879static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2880 struct mvneta_rx_queue *rxq)
2881{
2882 rxq->size = pp->rx_ring_size;
2883
2884
2885 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2886 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2887 &rxq->descs_phys, GFP_KERNEL);
2888 if (!rxq->descs)
2889 return -ENOMEM;
2890
2891 rxq->last_desc = rxq->size - 1;
2892
2893 return 0;
2894}
2895
2896static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2897 struct mvneta_rx_queue *rxq)
2898{
2899
2900 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2901 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2902
2903
2904 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2905 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2906
2907 if (!pp->bm_priv) {
2908
2909 mvneta_rxq_offset_set(pp, rxq, 0);
2910 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
2911 PAGE_SIZE :
2912 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2913 mvneta_rxq_bm_disable(pp, rxq);
2914 mvneta_rxq_fill(pp, rxq, rxq->size);
2915 } else {
2916
2917 mvneta_rxq_offset_set(pp, rxq,
2918 NET_SKB_PAD - pp->rx_offset_correction);
2919
2920 mvneta_rxq_bm_enable(pp, rxq);
2921
2922 mvneta_rxq_long_pool_set(pp, rxq);
2923 mvneta_rxq_short_pool_set(pp, rxq);
2924 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2925 }
2926}
2927
2928
2929static int mvneta_rxq_init(struct mvneta_port *pp,
2930 struct mvneta_rx_queue *rxq)
2931
2932{
2933 int ret;
2934
2935 ret = mvneta_rxq_sw_init(pp, rxq);
2936 if (ret < 0)
2937 return ret;
2938
2939 mvneta_rxq_hw_init(pp, rxq);
2940
2941 return 0;
2942}
2943
2944
2945static void mvneta_rxq_deinit(struct mvneta_port *pp,
2946 struct mvneta_rx_queue *rxq)
2947{
2948 mvneta_rxq_drop_pkts(pp, rxq);
2949
2950 if (rxq->skb)
2951 dev_kfree_skb_any(rxq->skb);
2952
2953 if (rxq->descs)
2954 dma_free_coherent(pp->dev->dev.parent,
2955 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2956 rxq->descs,
2957 rxq->descs_phys);
2958
2959 rxq->descs = NULL;
2960 rxq->last_desc = 0;
2961 rxq->next_desc_to_proc = 0;
2962 rxq->descs_phys = 0;
2963 rxq->first_to_refill = 0;
2964 rxq->refill_num = 0;
2965 rxq->skb = NULL;
2966 rxq->left_size = 0;
2967}
2968
2969static int mvneta_txq_sw_init(struct mvneta_port *pp,
2970 struct mvneta_tx_queue *txq)
2971{
2972 int cpu;
2973
2974 txq->size = pp->tx_ring_size;
2975
2976
2977
2978
2979
2980 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2981 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2982
2983
2984 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2985 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2986 &txq->descs_phys, GFP_KERNEL);
2987 if (!txq->descs)
2988 return -ENOMEM;
2989
2990 txq->last_desc = txq->size - 1;
2991
2992 txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2993 GFP_KERNEL);
2994 if (!txq->tx_skb) {
2995 dma_free_coherent(pp->dev->dev.parent,
2996 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2997 txq->descs, txq->descs_phys);
2998 return -ENOMEM;
2999 }
3000
3001
3002 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3003 txq->size * TSO_HEADER_SIZE,
3004 &txq->tso_hdrs_phys, GFP_KERNEL);
3005 if (!txq->tso_hdrs) {
3006 kfree(txq->tx_skb);
3007 dma_free_coherent(pp->dev->dev.parent,
3008 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3009 txq->descs, txq->descs_phys);
3010 return -ENOMEM;
3011 }
3012
3013
3014 if (txq_number > 1)
3015 cpu = txq->id % num_present_cpus();
3016 else
3017 cpu = pp->rxq_def % num_present_cpus();
3018 cpumask_set_cpu(cpu, &txq->affinity_mask);
3019 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3020
3021 return 0;
3022}
3023
3024static void mvneta_txq_hw_init(struct mvneta_port *pp,
3025 struct mvneta_tx_queue *txq)
3026{
3027
3028 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3029 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3030
3031
3032 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3033 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3034
3035 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3036}
3037
3038
3039static int mvneta_txq_init(struct mvneta_port *pp,
3040 struct mvneta_tx_queue *txq)
3041{
3042 int ret;
3043
3044 ret = mvneta_txq_sw_init(pp, txq);
3045 if (ret < 0)
3046 return ret;
3047
3048 mvneta_txq_hw_init(pp, txq);
3049
3050 return 0;
3051}
3052
3053
3054static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3055 struct mvneta_tx_queue *txq)
3056{
3057 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3058
3059 kfree(txq->tx_skb);
3060
3061 if (txq->tso_hdrs)
3062 dma_free_coherent(pp->dev->dev.parent,
3063 txq->size * TSO_HEADER_SIZE,
3064 txq->tso_hdrs, txq->tso_hdrs_phys);
3065 if (txq->descs)
3066 dma_free_coherent(pp->dev->dev.parent,
3067 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3068 txq->descs, txq->descs_phys);
3069
3070 netdev_tx_reset_queue(nq);
3071
3072 txq->descs = NULL;
3073 txq->last_desc = 0;
3074 txq->next_desc_to_proc = 0;
3075 txq->descs_phys = 0;
3076}
3077
3078static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3079 struct mvneta_tx_queue *txq)
3080{
3081
3082 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3083 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3084
3085
3086 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3087 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3088}
3089
3090static void mvneta_txq_deinit(struct mvneta_port *pp,
3091 struct mvneta_tx_queue *txq)
3092{
3093 mvneta_txq_sw_deinit(pp, txq);
3094 mvneta_txq_hw_deinit(pp, txq);
3095}
3096
3097
3098static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3099{
3100 int queue;
3101
3102 for (queue = 0; queue < txq_number; queue++)
3103 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3104}
3105
3106
3107static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3108{
3109 int queue;
3110
3111 for (queue = 0; queue < rxq_number; queue++)
3112 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3113}
3114
3115
3116
3117static int mvneta_setup_rxqs(struct mvneta_port *pp)
3118{
3119 int queue;
3120
3121 for (queue = 0; queue < rxq_number; queue++) {
3122 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3123
3124 if (err) {
3125 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3126 __func__, queue);
3127 mvneta_cleanup_rxqs(pp);
3128 return err;
3129 }
3130 }
3131
3132 return 0;
3133}
3134
3135
3136static int mvneta_setup_txqs(struct mvneta_port *pp)
3137{
3138 int queue;
3139
3140 for (queue = 0; queue < txq_number; queue++) {
3141 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3142 if (err) {
3143 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3144 __func__, queue);
3145 mvneta_cleanup_txqs(pp);
3146 return err;
3147 }
3148 }
3149
3150 return 0;
3151}
3152
3153static int mvneta_comphy_init(struct mvneta_port *pp)
3154{
3155 int ret;
3156
3157 if (!pp->comphy)
3158 return 0;
3159
3160 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3161 pp->phy_interface);
3162 if (ret)
3163 return ret;
3164
3165 return phy_power_on(pp->comphy);
3166}
3167
3168static void mvneta_start_dev(struct mvneta_port *pp)
3169{
3170 int cpu;
3171
3172 WARN_ON(mvneta_comphy_init(pp));
3173
3174 mvneta_max_rx_size_set(pp, pp->pkt_size);
3175 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3176
3177
3178 mvneta_port_enable(pp);
3179
3180 if (!pp->neta_armada3700) {
3181
3182 for_each_online_cpu(cpu) {
3183 struct mvneta_pcpu_port *port =
3184 per_cpu_ptr(pp->ports, cpu);
3185
3186 napi_enable(&port->napi);
3187 }
3188 } else {
3189 napi_enable(&pp->napi);
3190 }
3191
3192
3193 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3194
3195 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3196 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3197 MVNETA_CAUSE_LINK_CHANGE);
3198
3199 phylink_start(pp->phylink);
3200 netif_tx_start_all_queues(pp->dev);
3201}
3202
3203static void mvneta_stop_dev(struct mvneta_port *pp)
3204{
3205 unsigned int cpu;
3206
3207 phylink_stop(pp->phylink);
3208
3209 if (!pp->neta_armada3700) {
3210 for_each_online_cpu(cpu) {
3211 struct mvneta_pcpu_port *port =
3212 per_cpu_ptr(pp->ports, cpu);
3213
3214 napi_disable(&port->napi);
3215 }
3216 } else {
3217 napi_disable(&pp->napi);
3218 }
3219
3220 netif_carrier_off(pp->dev);
3221
3222 mvneta_port_down(pp);
3223 netif_tx_stop_all_queues(pp->dev);
3224
3225
3226 mvneta_port_disable(pp);
3227
3228
3229 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3230
3231
3232 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3233
3234 mvneta_tx_reset(pp);
3235 mvneta_rx_reset(pp);
3236
3237 WARN_ON(phy_power_off(pp->comphy));
3238}
3239
3240static void mvneta_percpu_enable(void *arg)
3241{
3242 struct mvneta_port *pp = arg;
3243
3244 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3245}
3246
3247static void mvneta_percpu_disable(void *arg)
3248{
3249 struct mvneta_port *pp = arg;
3250
3251 disable_percpu_irq(pp->dev->irq);
3252}
3253
3254
3255static int mvneta_change_mtu(struct net_device *dev, int mtu)
3256{
3257 struct mvneta_port *pp = netdev_priv(dev);
3258 int ret;
3259
3260 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3261 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3262 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3263 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3264 }
3265
3266 dev->mtu = mtu;
3267
3268 if (!netif_running(dev)) {
3269 if (pp->bm_priv)
3270 mvneta_bm_update_mtu(pp, mtu);
3271
3272 netdev_update_features(dev);
3273 return 0;
3274 }
3275
3276
3277
3278
3279 mvneta_stop_dev(pp);
3280 on_each_cpu(mvneta_percpu_disable, pp, true);
3281
3282 mvneta_cleanup_txqs(pp);
3283 mvneta_cleanup_rxqs(pp);
3284
3285 if (pp->bm_priv)
3286 mvneta_bm_update_mtu(pp, mtu);
3287
3288 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3289
3290 ret = mvneta_setup_rxqs(pp);
3291 if (ret) {
3292 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3293 return ret;
3294 }
3295
3296 ret = mvneta_setup_txqs(pp);
3297 if (ret) {
3298 netdev_err(dev, "unable to setup txqs after MTU change\n");
3299 return ret;
3300 }
3301
3302 on_each_cpu(mvneta_percpu_enable, pp, true);
3303 mvneta_start_dev(pp);
3304
3305 netdev_update_features(dev);
3306
3307 return 0;
3308}
3309
3310static netdev_features_t mvneta_fix_features(struct net_device *dev,
3311 netdev_features_t features)
3312{
3313 struct mvneta_port *pp = netdev_priv(dev);
3314
3315 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3316 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3317 netdev_info(dev,
3318 "Disable IP checksum for MTU greater than %dB\n",
3319 pp->tx_csum_limit);
3320 }
3321
3322 return features;
3323}
3324
3325
3326static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3327{
3328 u32 mac_addr_l, mac_addr_h;
3329
3330 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3331 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3332 addr[0] = (mac_addr_h >> 24) & 0xFF;
3333 addr[1] = (mac_addr_h >> 16) & 0xFF;
3334 addr[2] = (mac_addr_h >> 8) & 0xFF;
3335 addr[3] = mac_addr_h & 0xFF;
3336 addr[4] = (mac_addr_l >> 8) & 0xFF;
3337 addr[5] = mac_addr_l & 0xFF;
3338}
3339
3340
3341static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3342{
3343 struct mvneta_port *pp = netdev_priv(dev);
3344 struct sockaddr *sockaddr = addr;
3345 int ret;
3346
3347 ret = eth_prepare_mac_addr_change(dev, addr);
3348 if (ret < 0)
3349 return ret;
3350
3351 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3352
3353
3354 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3355
3356 eth_commit_mac_addr_change(dev, addr);
3357 return 0;
3358}
3359
3360static void mvneta_validate(struct phylink_config *config,
3361 unsigned long *supported,
3362 struct phylink_link_state *state)
3363{
3364 struct net_device *ndev = to_net_dev(config->dev);
3365 struct mvneta_port *pp = netdev_priv(ndev);
3366 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3367
3368
3369 if (state->interface != PHY_INTERFACE_MODE_NA &&
3370 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3371 state->interface != PHY_INTERFACE_MODE_SGMII &&
3372 !phy_interface_mode_is_8023z(state->interface) &&
3373 !phy_interface_mode_is_rgmii(state->interface)) {
3374 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3375 return;
3376 }
3377
3378
3379 phylink_set(mask, Autoneg);
3380 phylink_set_port_modes(mask);
3381
3382
3383 phylink_set(mask, Pause);
3384
3385
3386 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3387 phylink_set(mask, 1000baseT_Full);
3388 phylink_set(mask, 1000baseX_Full);
3389 }
3390 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3391 phylink_set(mask, 2500baseT_Full);
3392 phylink_set(mask, 2500baseX_Full);
3393 }
3394
3395 if (!phy_interface_mode_is_8023z(state->interface)) {
3396
3397 phylink_set(mask, 10baseT_Half);
3398 phylink_set(mask, 10baseT_Full);
3399 phylink_set(mask, 100baseT_Half);
3400 phylink_set(mask, 100baseT_Full);
3401 }
3402
3403 bitmap_and(supported, supported, mask,
3404 __ETHTOOL_LINK_MODE_MASK_NBITS);
3405 bitmap_and(state->advertising, state->advertising, mask,
3406 __ETHTOOL_LINK_MODE_MASK_NBITS);
3407
3408
3409
3410
3411 phylink_helper_basex_speed(state);
3412}
3413
3414static int mvneta_mac_link_state(struct phylink_config *config,
3415 struct phylink_link_state *state)
3416{
3417 struct net_device *ndev = to_net_dev(config->dev);
3418 struct mvneta_port *pp = netdev_priv(ndev);
3419 u32 gmac_stat;
3420
3421 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3422
3423 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3424 state->speed =
3425 state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3426 SPEED_2500 : SPEED_1000;
3427 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3428 state->speed = SPEED_100;
3429 else
3430 state->speed = SPEED_10;
3431
3432 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3433 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3434 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3435
3436 state->pause = 0;
3437 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3438 state->pause |= MLO_PAUSE_RX;
3439 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3440 state->pause |= MLO_PAUSE_TX;
3441
3442 return 1;
3443}
3444
3445static void mvneta_mac_an_restart(struct phylink_config *config)
3446{
3447 struct net_device *ndev = to_net_dev(config->dev);
3448 struct mvneta_port *pp = netdev_priv(ndev);
3449 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3450
3451 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3452 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3453 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3454 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3455}
3456
3457static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3458 const struct phylink_link_state *state)
3459{
3460 struct net_device *ndev = to_net_dev(config->dev);
3461 struct mvneta_port *pp = netdev_priv(ndev);
3462 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3463 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3464 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3465 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3466 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3467
3468 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3469 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3470 MVNETA_GMAC2_PORT_RESET);
3471 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3472 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3473 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3474 MVNETA_GMAC_INBAND_RESTART_AN |
3475 MVNETA_GMAC_CONFIG_MII_SPEED |
3476 MVNETA_GMAC_CONFIG_GMII_SPEED |
3477 MVNETA_GMAC_AN_SPEED_EN |
3478 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3479 MVNETA_GMAC_CONFIG_FLOW_CTRL |
3480 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3481 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3482 MVNETA_GMAC_AN_DUPLEX_EN);
3483
3484
3485
3486
3487 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3488
3489 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3490 state->interface == PHY_INTERFACE_MODE_SGMII ||
3491 phy_interface_mode_is_8023z(state->interface))
3492 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3493
3494 if (phylink_test(state->advertising, Pause))
3495 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3496 if (state->pause & MLO_PAUSE_TXRX_MASK)
3497 new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3498
3499 if (!phylink_autoneg_inband(mode)) {
3500
3501 if (state->duplex)
3502 new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3503
3504 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
3505 new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3506 else if (state->speed == SPEED_100)
3507 new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
3508 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3509
3510 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3511 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3512 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3513 MVNETA_GMAC_FORCE_LINK_PASS)) |
3514 MVNETA_GMAC_INBAND_AN_ENABLE |
3515 MVNETA_GMAC_AN_SPEED_EN |
3516 MVNETA_GMAC_AN_DUPLEX_EN;
3517 } else {
3518
3519 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3520 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3521 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3522 MVNETA_GMAC_FORCE_LINK_PASS)) |
3523 MVNETA_GMAC_INBAND_AN_ENABLE |
3524 MVNETA_GMAC_CONFIG_GMII_SPEED |
3525
3526 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3527
3528 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3529 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3530 }
3531
3532
3533
3534
3535 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3536 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3537 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3538 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3539 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3540 MVNETA_GMAC_FORCE_LINK_DOWN);
3541 }
3542
3543
3544
3545
3546
3547 if (state->speed == SPEED_2500)
3548 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3549
3550 if (pp->comphy && pp->phy_interface != state->interface &&
3551 (state->interface == PHY_INTERFACE_MODE_SGMII ||
3552 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3553 state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3554 pp->phy_interface = state->interface;
3555
3556 WARN_ON(phy_power_off(pp->comphy));
3557 WARN_ON(mvneta_comphy_init(pp));
3558 }
3559
3560 if (new_ctrl0 != gmac_ctrl0)
3561 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3562 if (new_ctrl2 != gmac_ctrl2)
3563 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3564 if (new_ctrl4 != gmac_ctrl4)
3565 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
3566 if (new_clk != gmac_clk)
3567 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3568 if (new_an != gmac_an)
3569 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3570
3571 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3572 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3573 MVNETA_GMAC2_PORT_RESET) != 0)
3574 continue;
3575 }
3576}
3577
3578static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3579{
3580 u32 lpi_ctl1;
3581
3582 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3583 if (enable)
3584 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3585 else
3586 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3587 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3588}
3589
3590static void mvneta_mac_link_down(struct phylink_config *config,
3591 unsigned int mode, phy_interface_t interface)
3592{
3593 struct net_device *ndev = to_net_dev(config->dev);
3594 struct mvneta_port *pp = netdev_priv(ndev);
3595 u32 val;
3596
3597 mvneta_port_down(pp);
3598
3599 if (!phylink_autoneg_inband(mode)) {
3600 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3601 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3602 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3603 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3604 }
3605
3606 pp->eee_active = false;
3607 mvneta_set_eee(pp, false);
3608}
3609
3610static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
3611 phy_interface_t interface,
3612 struct phy_device *phy)
3613{
3614 struct net_device *ndev = to_net_dev(config->dev);
3615 struct mvneta_port *pp = netdev_priv(ndev);
3616 u32 val;
3617
3618 if (!phylink_autoneg_inband(mode)) {
3619 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3620 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3621 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3622 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3623 }
3624
3625 mvneta_port_up(pp);
3626
3627 if (phy && pp->eee_enabled) {
3628 pp->eee_active = phy_init_eee(phy, 0) >= 0;
3629 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3630 }
3631}
3632
3633static const struct phylink_mac_ops mvneta_phylink_ops = {
3634 .validate = mvneta_validate,
3635 .mac_link_state = mvneta_mac_link_state,
3636 .mac_an_restart = mvneta_mac_an_restart,
3637 .mac_config = mvneta_mac_config,
3638 .mac_link_down = mvneta_mac_link_down,
3639 .mac_link_up = mvneta_mac_link_up,
3640};
3641
3642static int mvneta_mdio_probe(struct mvneta_port *pp)
3643{
3644 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3645 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3646
3647 if (err)
3648 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3649
3650 phylink_ethtool_get_wol(pp->phylink, &wol);
3651 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3652
3653 return err;
3654}
3655
3656static void mvneta_mdio_remove(struct mvneta_port *pp)
3657{
3658 phylink_disconnect_phy(pp->phylink);
3659}
3660
3661
3662
3663
3664
3665static void mvneta_percpu_elect(struct mvneta_port *pp)
3666{
3667 int elected_cpu = 0, max_cpu, cpu, i = 0;
3668
3669
3670
3671
3672 if (cpu_online(pp->rxq_def))
3673 elected_cpu = pp->rxq_def;
3674
3675 max_cpu = num_present_cpus();
3676
3677 for_each_online_cpu(cpu) {
3678 int rxq_map = 0, txq_map = 0;
3679 int rxq;
3680
3681 for (rxq = 0; rxq < rxq_number; rxq++)
3682 if ((rxq % max_cpu) == cpu)
3683 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3684
3685 if (cpu == elected_cpu)
3686
3687
3688
3689 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3690
3691
3692
3693
3694
3695 if (txq_number == 1)
3696 txq_map = (cpu == elected_cpu) ?
3697 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3698 else
3699 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3700 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3701
3702 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3703
3704
3705
3706
3707 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3708 pp, true);
3709 i++;
3710
3711 }
3712};
3713
3714static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3715{
3716 int other_cpu;
3717 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3718 node_online);
3719 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3720
3721
3722 spin_lock(&pp->lock);
3723
3724
3725
3726
3727 if (pp->is_stopped) {
3728 spin_unlock(&pp->lock);
3729 return 0;
3730 }
3731 netif_tx_stop_all_queues(pp->dev);
3732
3733
3734
3735
3736
3737 for_each_online_cpu(other_cpu) {
3738 if (other_cpu != cpu) {
3739 struct mvneta_pcpu_port *other_port =
3740 per_cpu_ptr(pp->ports, other_cpu);
3741
3742 napi_synchronize(&other_port->napi);
3743 }
3744 }
3745
3746
3747 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3748 napi_enable(&port->napi);
3749
3750
3751
3752
3753
3754 mvneta_percpu_enable(pp);
3755
3756
3757
3758
3759
3760 mvneta_percpu_elect(pp);
3761
3762
3763 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3764 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3765 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3766 MVNETA_CAUSE_LINK_CHANGE);
3767 netif_tx_start_all_queues(pp->dev);
3768 spin_unlock(&pp->lock);
3769 return 0;
3770}
3771
3772static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3773{
3774 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3775 node_online);
3776 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3777
3778
3779
3780
3781
3782 spin_lock(&pp->lock);
3783
3784 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3785 spin_unlock(&pp->lock);
3786
3787 napi_synchronize(&port->napi);
3788 napi_disable(&port->napi);
3789
3790 mvneta_percpu_disable(pp);
3791 return 0;
3792}
3793
3794static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3795{
3796 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3797 node_dead);
3798
3799
3800 spin_lock(&pp->lock);
3801 mvneta_percpu_elect(pp);
3802 spin_unlock(&pp->lock);
3803
3804 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3805 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3806 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3807 MVNETA_CAUSE_LINK_CHANGE);
3808 netif_tx_start_all_queues(pp->dev);
3809 return 0;
3810}
3811
3812static int mvneta_open(struct net_device *dev)
3813{
3814 struct mvneta_port *pp = netdev_priv(dev);
3815 int ret;
3816
3817 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3818
3819 ret = mvneta_setup_rxqs(pp);
3820 if (ret)
3821 return ret;
3822
3823 ret = mvneta_setup_txqs(pp);
3824 if (ret)
3825 goto err_cleanup_rxqs;
3826
3827
3828 if (pp->neta_armada3700)
3829 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3830 dev->name, pp);
3831 else
3832 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3833 dev->name, pp->ports);
3834 if (ret) {
3835 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3836 goto err_cleanup_txqs;
3837 }
3838
3839 if (!pp->neta_armada3700) {
3840
3841
3842
3843 on_each_cpu(mvneta_percpu_enable, pp, true);
3844
3845 pp->is_stopped = false;
3846
3847
3848
3849 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3850 &pp->node_online);
3851 if (ret)
3852 goto err_free_irq;
3853
3854 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3855 &pp->node_dead);
3856 if (ret)
3857 goto err_free_online_hp;
3858 }
3859
3860 ret = mvneta_mdio_probe(pp);
3861 if (ret < 0) {
3862 netdev_err(dev, "cannot probe MDIO bus\n");
3863 goto err_free_dead_hp;
3864 }
3865
3866 mvneta_start_dev(pp);
3867
3868 return 0;
3869
3870err_free_dead_hp:
3871 if (!pp->neta_armada3700)
3872 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3873 &pp->node_dead);
3874err_free_online_hp:
3875 if (!pp->neta_armada3700)
3876 cpuhp_state_remove_instance_nocalls(online_hpstate,
3877 &pp->node_online);
3878err_free_irq:
3879 if (pp->neta_armada3700) {
3880 free_irq(pp->dev->irq, pp);
3881 } else {
3882 on_each_cpu(mvneta_percpu_disable, pp, true);
3883 free_percpu_irq(pp->dev->irq, pp->ports);
3884 }
3885err_cleanup_txqs:
3886 mvneta_cleanup_txqs(pp);
3887err_cleanup_rxqs:
3888 mvneta_cleanup_rxqs(pp);
3889 return ret;
3890}
3891
3892
3893static int mvneta_stop(struct net_device *dev)
3894{
3895 struct mvneta_port *pp = netdev_priv(dev);
3896
3897 if (!pp->neta_armada3700) {
3898
3899
3900
3901
3902
3903 spin_lock(&pp->lock);
3904 pp->is_stopped = true;
3905 spin_unlock(&pp->lock);
3906
3907 mvneta_stop_dev(pp);
3908 mvneta_mdio_remove(pp);
3909
3910 cpuhp_state_remove_instance_nocalls(online_hpstate,
3911 &pp->node_online);
3912 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3913 &pp->node_dead);
3914 on_each_cpu(mvneta_percpu_disable, pp, true);
3915 free_percpu_irq(dev->irq, pp->ports);
3916 } else {
3917 mvneta_stop_dev(pp);
3918 mvneta_mdio_remove(pp);
3919 free_irq(dev->irq, pp);
3920 }
3921
3922 mvneta_cleanup_rxqs(pp);
3923 mvneta_cleanup_txqs(pp);
3924
3925 return 0;
3926}
3927
3928static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3929{
3930 struct mvneta_port *pp = netdev_priv(dev);
3931
3932 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
3933}
3934
3935
3936
3937
3938static int
3939mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3940 const struct ethtool_link_ksettings *cmd)
3941{
3942 struct mvneta_port *pp = netdev_priv(ndev);
3943
3944 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
3945}
3946
3947
3948static int
3949mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
3950 struct ethtool_link_ksettings *cmd)
3951{
3952 struct mvneta_port *pp = netdev_priv(ndev);
3953
3954 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
3955}
3956
3957static int mvneta_ethtool_nway_reset(struct net_device *dev)
3958{
3959 struct mvneta_port *pp = netdev_priv(dev);
3960
3961 return phylink_ethtool_nway_reset(pp->phylink);
3962}
3963
3964
3965static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3966 struct ethtool_coalesce *c)
3967{
3968 struct mvneta_port *pp = netdev_priv(dev);
3969 int queue;
3970
3971 for (queue = 0; queue < rxq_number; queue++) {
3972 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3973 rxq->time_coal = c->rx_coalesce_usecs;
3974 rxq->pkts_coal = c->rx_max_coalesced_frames;
3975 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3976 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3977 }
3978
3979 for (queue = 0; queue < txq_number; queue++) {
3980 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3981 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3982 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3983 }
3984
3985 return 0;
3986}
3987
3988
3989static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3990 struct ethtool_coalesce *c)
3991{
3992 struct mvneta_port *pp = netdev_priv(dev);
3993
3994 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3995 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3996
3997 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3998 return 0;
3999}
4000
4001
4002static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4003 struct ethtool_drvinfo *drvinfo)
4004{
4005 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4006 sizeof(drvinfo->driver));
4007 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4008 sizeof(drvinfo->version));
4009 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4010 sizeof(drvinfo->bus_info));
4011}
4012
4013
4014static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4015 struct ethtool_ringparam *ring)
4016{
4017 struct mvneta_port *pp = netdev_priv(netdev);
4018
4019 ring->rx_max_pending = MVNETA_MAX_RXD;
4020 ring->tx_max_pending = MVNETA_MAX_TXD;
4021 ring->rx_pending = pp->rx_ring_size;
4022 ring->tx_pending = pp->tx_ring_size;
4023}
4024
4025static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4026 struct ethtool_ringparam *ring)
4027{
4028 struct mvneta_port *pp = netdev_priv(dev);
4029
4030 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4031 return -EINVAL;
4032 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4033 ring->rx_pending : MVNETA_MAX_RXD;
4034
4035 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4036 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4037 if (pp->tx_ring_size != ring->tx_pending)
4038 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4039 pp->tx_ring_size, ring->tx_pending);
4040
4041 if (netif_running(dev)) {
4042 mvneta_stop(dev);
4043 if (mvneta_open(dev)) {
4044 netdev_err(dev,
4045 "error on opening device after ring param change\n");
4046 return -ENOMEM;
4047 }
4048 }
4049
4050 return 0;
4051}
4052
4053static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4054 struct ethtool_pauseparam *pause)
4055{
4056 struct mvneta_port *pp = netdev_priv(dev);
4057
4058 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4059}
4060
4061static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4062 struct ethtool_pauseparam *pause)
4063{
4064 struct mvneta_port *pp = netdev_priv(dev);
4065
4066 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4067}
4068
4069static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4070 u8 *data)
4071{
4072 if (sset == ETH_SS_STATS) {
4073 int i;
4074
4075 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4076 memcpy(data + i * ETH_GSTRING_LEN,
4077 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4078 }
4079}
4080
4081static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4082{
4083 const struct mvneta_statistic *s;
4084 void __iomem *base = pp->base;
4085 u32 high, low;
4086 u64 val;
4087 int i;
4088
4089 for (i = 0, s = mvneta_statistics;
4090 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4091 s++, i++) {
4092 val = 0;
4093
4094 switch (s->type) {
4095 case T_REG_32:
4096 val = readl_relaxed(base + s->offset);
4097 break;
4098 case T_REG_64:
4099
4100 low = readl_relaxed(base + s->offset);
4101 high = readl_relaxed(base + s->offset + 4);
4102 val = (u64)high << 32 | low;
4103 break;
4104 case T_SW:
4105 switch (s->offset) {
4106 case ETHTOOL_STAT_EEE_WAKEUP:
4107 val = phylink_get_eee_err(pp->phylink);
4108 break;
4109 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4110 val = pp->rxqs[0].skb_alloc_err;
4111 break;
4112 case ETHTOOL_STAT_REFILL_ERR:
4113 val = pp->rxqs[0].refill_err;
4114 break;
4115 }
4116 break;
4117 }
4118
4119 pp->ethtool_stats[i] += val;
4120 }
4121}
4122
4123static void mvneta_ethtool_get_stats(struct net_device *dev,
4124 struct ethtool_stats *stats, u64 *data)
4125{
4126 struct mvneta_port *pp = netdev_priv(dev);
4127 int i;
4128
4129 mvneta_ethtool_update_stats(pp);
4130
4131 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4132 *data++ = pp->ethtool_stats[i];
4133}
4134
4135static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4136{
4137 if (sset == ETH_SS_STATS)
4138 return ARRAY_SIZE(mvneta_statistics);
4139 return -EOPNOTSUPP;
4140}
4141
4142static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4143{
4144 return MVNETA_RSS_LU_TABLE_SIZE;
4145}
4146
4147static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4148 struct ethtool_rxnfc *info,
4149 u32 *rules __always_unused)
4150{
4151 switch (info->cmd) {
4152 case ETHTOOL_GRXRINGS:
4153 info->data = rxq_number;
4154 return 0;
4155 case ETHTOOL_GRXFH:
4156 return -EOPNOTSUPP;
4157 default:
4158 return -EOPNOTSUPP;
4159 }
4160}
4161
4162static int mvneta_config_rss(struct mvneta_port *pp)
4163{
4164 int cpu;
4165 u32 val;
4166
4167 netif_tx_stop_all_queues(pp->dev);
4168
4169 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4170
4171 if (!pp->neta_armada3700) {
4172
4173 for_each_online_cpu(cpu) {
4174 struct mvneta_pcpu_port *pcpu_port =
4175 per_cpu_ptr(pp->ports, cpu);
4176
4177 napi_synchronize(&pcpu_port->napi);
4178 napi_disable(&pcpu_port->napi);
4179 }
4180 } else {
4181 napi_synchronize(&pp->napi);
4182 napi_disable(&pp->napi);
4183 }
4184
4185 pp->rxq_def = pp->indir[0];
4186
4187
4188 mvneta_set_rx_mode(pp->dev);
4189
4190
4191 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4192 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4193
4194
4195 spin_lock(&pp->lock);
4196 mvneta_percpu_elect(pp);
4197 spin_unlock(&pp->lock);
4198
4199 if (!pp->neta_armada3700) {
4200
4201 for_each_online_cpu(cpu) {
4202 struct mvneta_pcpu_port *pcpu_port =
4203 per_cpu_ptr(pp->ports, cpu);
4204
4205 napi_enable(&pcpu_port->napi);
4206 }
4207 } else {
4208 napi_enable(&pp->napi);
4209 }
4210
4211 netif_tx_start_all_queues(pp->dev);
4212
4213 return 0;
4214}
4215
4216static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4217 const u8 *key, const u8 hfunc)
4218{
4219 struct mvneta_port *pp = netdev_priv(dev);
4220
4221
4222 if (pp->neta_armada3700)
4223 return -EOPNOTSUPP;
4224
4225
4226
4227
4228 if (key ||
4229 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4230 return -EOPNOTSUPP;
4231
4232 if (!indir)
4233 return 0;
4234
4235 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4236
4237 return mvneta_config_rss(pp);
4238}
4239
4240static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4241 u8 *hfunc)
4242{
4243 struct mvneta_port *pp = netdev_priv(dev);
4244
4245
4246 if (pp->neta_armada3700)
4247 return -EOPNOTSUPP;
4248
4249 if (hfunc)
4250 *hfunc = ETH_RSS_HASH_TOP;
4251
4252 if (!indir)
4253 return 0;
4254
4255 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4256
4257 return 0;
4258}
4259
4260static void mvneta_ethtool_get_wol(struct net_device *dev,
4261 struct ethtool_wolinfo *wol)
4262{
4263 struct mvneta_port *pp = netdev_priv(dev);
4264
4265 phylink_ethtool_get_wol(pp->phylink, wol);
4266}
4267
4268static int mvneta_ethtool_set_wol(struct net_device *dev,
4269 struct ethtool_wolinfo *wol)
4270{
4271 struct mvneta_port *pp = netdev_priv(dev);
4272 int ret;
4273
4274 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4275 if (!ret)
4276 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4277
4278 return ret;
4279}
4280
4281static int mvneta_ethtool_get_eee(struct net_device *dev,
4282 struct ethtool_eee *eee)
4283{
4284 struct mvneta_port *pp = netdev_priv(dev);
4285 u32 lpi_ctl0;
4286
4287 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4288
4289 eee->eee_enabled = pp->eee_enabled;
4290 eee->eee_active = pp->eee_active;
4291 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4292 eee->tx_lpi_timer = (lpi_ctl0) >> 8;
4293
4294 return phylink_ethtool_get_eee(pp->phylink, eee);
4295}
4296
4297static int mvneta_ethtool_set_eee(struct net_device *dev,
4298 struct ethtool_eee *eee)
4299{
4300 struct mvneta_port *pp = netdev_priv(dev);
4301 u32 lpi_ctl0;
4302
4303
4304
4305 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4306 return -EINVAL;
4307
4308 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4309 lpi_ctl0 &= ~(0xff << 8);
4310 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4311 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4312
4313 pp->eee_enabled = eee->eee_enabled;
4314 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4315
4316 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4317
4318 return phylink_ethtool_set_eee(pp->phylink, eee);
4319}
4320
4321static const struct net_device_ops mvneta_netdev_ops = {
4322 .ndo_open = mvneta_open,
4323 .ndo_stop = mvneta_stop,
4324 .ndo_start_xmit = mvneta_tx,
4325 .ndo_set_rx_mode = mvneta_set_rx_mode,
4326 .ndo_set_mac_address = mvneta_set_mac_addr,
4327 .ndo_change_mtu = mvneta_change_mtu,
4328 .ndo_fix_features = mvneta_fix_features,
4329 .ndo_get_stats64 = mvneta_get_stats64,
4330 .ndo_do_ioctl = mvneta_ioctl,
4331};
4332
4333static const struct ethtool_ops mvneta_eth_tool_ops = {
4334 .nway_reset = mvneta_ethtool_nway_reset,
4335 .get_link = ethtool_op_get_link,
4336 .set_coalesce = mvneta_ethtool_set_coalesce,
4337 .get_coalesce = mvneta_ethtool_get_coalesce,
4338 .get_drvinfo = mvneta_ethtool_get_drvinfo,
4339 .get_ringparam = mvneta_ethtool_get_ringparam,
4340 .set_ringparam = mvneta_ethtool_set_ringparam,
4341 .get_pauseparam = mvneta_ethtool_get_pauseparam,
4342 .set_pauseparam = mvneta_ethtool_set_pauseparam,
4343 .get_strings = mvneta_ethtool_get_strings,
4344 .get_ethtool_stats = mvneta_ethtool_get_stats,
4345 .get_sset_count = mvneta_ethtool_get_sset_count,
4346 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4347 .get_rxnfc = mvneta_ethtool_get_rxnfc,
4348 .get_rxfh = mvneta_ethtool_get_rxfh,
4349 .set_rxfh = mvneta_ethtool_set_rxfh,
4350 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4351 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4352 .get_wol = mvneta_ethtool_get_wol,
4353 .set_wol = mvneta_ethtool_set_wol,
4354 .get_eee = mvneta_ethtool_get_eee,
4355 .set_eee = mvneta_ethtool_set_eee,
4356};
4357
4358
4359static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4360{
4361 int queue;
4362
4363
4364 mvneta_port_disable(pp);
4365
4366
4367 mvneta_defaults_set(pp);
4368
4369 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4370 if (!pp->txqs)
4371 return -ENOMEM;
4372
4373
4374 for (queue = 0; queue < txq_number; queue++) {
4375 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4376 txq->id = queue;
4377 txq->size = pp->tx_ring_size;
4378 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4379 }
4380
4381 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4382 if (!pp->rxqs)
4383 return -ENOMEM;
4384
4385
4386 for (queue = 0; queue < rxq_number; queue++) {
4387 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4388 rxq->id = queue;
4389 rxq->size = pp->rx_ring_size;
4390 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4391 rxq->time_coal = MVNETA_RX_COAL_USEC;
4392 rxq->buf_virt_addr
4393 = devm_kmalloc_array(pp->dev->dev.parent,
4394 rxq->size,
4395 sizeof(*rxq->buf_virt_addr),
4396 GFP_KERNEL);
4397 if (!rxq->buf_virt_addr)
4398 return -ENOMEM;
4399 }
4400
4401 return 0;
4402}
4403
4404
4405static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4406 const struct mbus_dram_target_info *dram)
4407{
4408 u32 win_enable;
4409 u32 win_protect;
4410 int i;
4411
4412 for (i = 0; i < 6; i++) {
4413 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4414 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4415
4416 if (i < 4)
4417 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4418 }
4419
4420 win_enable = 0x3f;
4421 win_protect = 0;
4422
4423 if (dram) {
4424 for (i = 0; i < dram->num_cs; i++) {
4425 const struct mbus_dram_window *cs = dram->cs + i;
4426
4427 mvreg_write(pp, MVNETA_WIN_BASE(i),
4428 (cs->base & 0xffff0000) |
4429 (cs->mbus_attr << 8) |
4430 dram->mbus_dram_target_id);
4431
4432 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4433 (cs->size - 1) & 0xffff0000);
4434
4435 win_enable &= ~(1 << i);
4436 win_protect |= 3 << (2 * i);
4437 }
4438 } else {
4439
4440
4441
4442
4443 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4444 win_enable &= ~BIT(0);
4445 win_protect = 3;
4446 }
4447
4448 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4449 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4450}
4451
4452
4453static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4454{
4455
4456 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4457
4458 if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4459 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4460 else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4461 phy_interface_mode_is_8023z(phy_mode))
4462 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4463 else if (!phy_interface_mode_is_rgmii(phy_mode))
4464 return -EINVAL;
4465
4466 return 0;
4467}
4468
4469
4470static int mvneta_probe(struct platform_device *pdev)
4471{
4472 struct device_node *dn = pdev->dev.of_node;
4473 struct device_node *bm_node;
4474 struct mvneta_port *pp;
4475 struct net_device *dev;
4476 struct phylink *phylink;
4477 struct phy *comphy;
4478 const char *dt_mac_addr;
4479 char hw_mac_addr[ETH_ALEN];
4480 const char *mac_from;
4481 int tx_csum_limit;
4482 int phy_mode;
4483 int err;
4484 int cpu;
4485
4486 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
4487 txq_number, rxq_number);
4488 if (!dev)
4489 return -ENOMEM;
4490
4491 dev->irq = irq_of_parse_and_map(dn, 0);
4492 if (dev->irq == 0)
4493 return -EINVAL;
4494
4495 phy_mode = of_get_phy_mode(dn);
4496 if (phy_mode < 0) {
4497 dev_err(&pdev->dev, "incorrect phy-mode\n");
4498 err = -EINVAL;
4499 goto err_free_irq;
4500 }
4501
4502 comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
4503 if (comphy == ERR_PTR(-EPROBE_DEFER)) {
4504 err = -EPROBE_DEFER;
4505 goto err_free_irq;
4506 } else if (IS_ERR(comphy)) {
4507 comphy = NULL;
4508 }
4509
4510 pp = netdev_priv(dev);
4511 spin_lock_init(&pp->lock);
4512
4513 pp->phylink_config.dev = &dev->dev;
4514 pp->phylink_config.type = PHYLINK_NETDEV;
4515
4516 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
4517 phy_mode, &mvneta_phylink_ops);
4518 if (IS_ERR(phylink)) {
4519 err = PTR_ERR(phylink);
4520 goto err_free_irq;
4521 }
4522
4523 dev->tx_queue_len = MVNETA_MAX_TXD;
4524 dev->watchdog_timeo = 5 * HZ;
4525 dev->netdev_ops = &mvneta_netdev_ops;
4526
4527 dev->ethtool_ops = &mvneta_eth_tool_ops;
4528
4529 pp->phylink = phylink;
4530 pp->comphy = comphy;
4531 pp->phy_interface = phy_mode;
4532 pp->dn = dn;
4533
4534 pp->rxq_def = rxq_def;
4535 pp->indir[0] = rxq_def;
4536
4537
4538 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4539 pp->neta_armada3700 = true;
4540
4541 pp->clk = devm_clk_get(&pdev->dev, "core");
4542 if (IS_ERR(pp->clk))
4543 pp->clk = devm_clk_get(&pdev->dev, NULL);
4544 if (IS_ERR(pp->clk)) {
4545 err = PTR_ERR(pp->clk);
4546 goto err_free_phylink;
4547 }
4548
4549 clk_prepare_enable(pp->clk);
4550
4551 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4552 if (!IS_ERR(pp->clk_bus))
4553 clk_prepare_enable(pp->clk_bus);
4554
4555 pp->base = devm_platform_ioremap_resource(pdev, 0);
4556 if (IS_ERR(pp->base)) {
4557 err = PTR_ERR(pp->base);
4558 goto err_clk;
4559 }
4560
4561
4562 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4563 if (!pp->ports) {
4564 err = -ENOMEM;
4565 goto err_clk;
4566 }
4567
4568
4569 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4570 if (!pp->stats) {
4571 err = -ENOMEM;
4572 goto err_free_ports;
4573 }
4574
4575 dt_mac_addr = of_get_mac_address(dn);
4576 if (!IS_ERR(dt_mac_addr)) {
4577 mac_from = "device tree";
4578 ether_addr_copy(dev->dev_addr, dt_mac_addr);
4579 } else {
4580 mvneta_get_mac_addr(pp, hw_mac_addr);
4581 if (is_valid_ether_addr(hw_mac_addr)) {
4582 mac_from = "hardware";
4583 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4584 } else {
4585 mac_from = "random";
4586 eth_hw_addr_random(dev);
4587 }
4588 }
4589
4590 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4591 if (tx_csum_limit < 0 ||
4592 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4593 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4594 dev_info(&pdev->dev,
4595 "Wrong TX csum limit in DT, set to %dB\n",
4596 MVNETA_TX_CSUM_DEF_SIZE);
4597 }
4598 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4599 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4600 } else {
4601 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4602 }
4603
4604 pp->tx_csum_limit = tx_csum_limit;
4605
4606 pp->dram_target_info = mv_mbus_dram_info();
4607
4608
4609
4610
4611 if (pp->dram_target_info || pp->neta_armada3700)
4612 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4613
4614 pp->tx_ring_size = MVNETA_MAX_TXD;
4615 pp->rx_ring_size = MVNETA_MAX_RXD;
4616
4617 pp->dev = dev;
4618 SET_NETDEV_DEV(dev, &pdev->dev);
4619
4620 pp->id = global_port_id++;
4621 pp->rx_offset_correction = 0;
4622
4623
4624 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4625 if (bm_node) {
4626 pp->bm_priv = mvneta_bm_get(bm_node);
4627 if (pp->bm_priv) {
4628 err = mvneta_bm_port_init(pdev, pp);
4629 if (err < 0) {
4630 dev_info(&pdev->dev,
4631 "use SW buffer management\n");
4632 mvneta_bm_put(pp->bm_priv);
4633 pp->bm_priv = NULL;
4634 }
4635 }
4636
4637
4638
4639
4640 pp->rx_offset_correction = max(0,
4641 NET_SKB_PAD -
4642 MVNETA_RX_PKT_OFFSET_CORRECTION);
4643 }
4644 of_node_put(bm_node);
4645
4646 err = mvneta_init(&pdev->dev, pp);
4647 if (err < 0)
4648 goto err_netdev;
4649
4650 err = mvneta_port_power_up(pp, phy_mode);
4651 if (err < 0) {
4652 dev_err(&pdev->dev, "can't power up port\n");
4653 goto err_netdev;
4654 }
4655
4656
4657
4658
4659 if (pp->neta_armada3700) {
4660 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4661 } else {
4662 for_each_present_cpu(cpu) {
4663 struct mvneta_pcpu_port *port =
4664 per_cpu_ptr(pp->ports, cpu);
4665
4666 netif_napi_add(dev, &port->napi, mvneta_poll,
4667 NAPI_POLL_WEIGHT);
4668 port->pp = pp;
4669 }
4670 }
4671
4672 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4673 NETIF_F_TSO | NETIF_F_RXCSUM;
4674 dev->hw_features |= dev->features;
4675 dev->vlan_features |= dev->features;
4676 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4677 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4678
4679
4680 dev->min_mtu = ETH_MIN_MTU;
4681
4682 dev->max_mtu = 9676;
4683
4684 err = register_netdev(dev);
4685 if (err < 0) {
4686 dev_err(&pdev->dev, "failed to register\n");
4687 goto err_netdev;
4688 }
4689
4690 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4691 dev->dev_addr);
4692
4693 platform_set_drvdata(pdev, pp->dev);
4694
4695 return 0;
4696
4697err_netdev:
4698 if (pp->bm_priv) {
4699 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4700 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4701 1 << pp->id);
4702 mvneta_bm_put(pp->bm_priv);
4703 }
4704 free_percpu(pp->stats);
4705err_free_ports:
4706 free_percpu(pp->ports);
4707err_clk:
4708 clk_disable_unprepare(pp->clk_bus);
4709 clk_disable_unprepare(pp->clk);
4710err_free_phylink:
4711 if (pp->phylink)
4712 phylink_destroy(pp->phylink);
4713err_free_irq:
4714 irq_dispose_mapping(dev->irq);
4715 return err;
4716}
4717
4718
4719static int mvneta_remove(struct platform_device *pdev)
4720{
4721 struct net_device *dev = platform_get_drvdata(pdev);
4722 struct mvneta_port *pp = netdev_priv(dev);
4723
4724 unregister_netdev(dev);
4725 clk_disable_unprepare(pp->clk_bus);
4726 clk_disable_unprepare(pp->clk);
4727 free_percpu(pp->ports);
4728 free_percpu(pp->stats);
4729 irq_dispose_mapping(dev->irq);
4730 phylink_destroy(pp->phylink);
4731
4732 if (pp->bm_priv) {
4733 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4734 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4735 1 << pp->id);
4736 mvneta_bm_put(pp->bm_priv);
4737 }
4738
4739 return 0;
4740}
4741
4742#ifdef CONFIG_PM_SLEEP
4743static int mvneta_suspend(struct device *device)
4744{
4745 int queue;
4746 struct net_device *dev = dev_get_drvdata(device);
4747 struct mvneta_port *pp = netdev_priv(dev);
4748
4749 if (!netif_running(dev))
4750 goto clean_exit;
4751
4752 if (!pp->neta_armada3700) {
4753 spin_lock(&pp->lock);
4754 pp->is_stopped = true;
4755 spin_unlock(&pp->lock);
4756
4757 cpuhp_state_remove_instance_nocalls(online_hpstate,
4758 &pp->node_online);
4759 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4760 &pp->node_dead);
4761 }
4762
4763 rtnl_lock();
4764 mvneta_stop_dev(pp);
4765 rtnl_unlock();
4766
4767 for (queue = 0; queue < rxq_number; queue++) {
4768 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4769
4770 mvneta_rxq_drop_pkts(pp, rxq);
4771 }
4772
4773 for (queue = 0; queue < txq_number; queue++) {
4774 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4775
4776 mvneta_txq_hw_deinit(pp, txq);
4777 }
4778
4779clean_exit:
4780 netif_device_detach(dev);
4781 clk_disable_unprepare(pp->clk_bus);
4782 clk_disable_unprepare(pp->clk);
4783
4784 return 0;
4785}
4786
4787static int mvneta_resume(struct device *device)
4788{
4789 struct platform_device *pdev = to_platform_device(device);
4790 struct net_device *dev = dev_get_drvdata(device);
4791 struct mvneta_port *pp = netdev_priv(dev);
4792 int err, queue;
4793
4794 clk_prepare_enable(pp->clk);
4795 if (!IS_ERR(pp->clk_bus))
4796 clk_prepare_enable(pp->clk_bus);
4797 if (pp->dram_target_info || pp->neta_armada3700)
4798 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4799 if (pp->bm_priv) {
4800 err = mvneta_bm_port_init(pdev, pp);
4801 if (err < 0) {
4802 dev_info(&pdev->dev, "use SW buffer management\n");
4803 pp->bm_priv = NULL;
4804 }
4805 }
4806 mvneta_defaults_set(pp);
4807 err = mvneta_port_power_up(pp, pp->phy_interface);
4808 if (err < 0) {
4809 dev_err(device, "can't power up port\n");
4810 return err;
4811 }
4812
4813 netif_device_attach(dev);
4814
4815 if (!netif_running(dev))
4816 return 0;
4817
4818 for (queue = 0; queue < rxq_number; queue++) {
4819 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4820
4821 rxq->next_desc_to_proc = 0;
4822 mvneta_rxq_hw_init(pp, rxq);
4823 }
4824
4825 for (queue = 0; queue < txq_number; queue++) {
4826 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4827
4828 txq->next_desc_to_proc = 0;
4829 mvneta_txq_hw_init(pp, txq);
4830 }
4831
4832 if (!pp->neta_armada3700) {
4833 spin_lock(&pp->lock);
4834 pp->is_stopped = false;
4835 spin_unlock(&pp->lock);
4836 cpuhp_state_add_instance_nocalls(online_hpstate,
4837 &pp->node_online);
4838 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4839 &pp->node_dead);
4840 }
4841
4842 rtnl_lock();
4843 mvneta_start_dev(pp);
4844 rtnl_unlock();
4845 mvneta_set_rx_mode(dev);
4846
4847 return 0;
4848}
4849#endif
4850
4851static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4852
4853static const struct of_device_id mvneta_match[] = {
4854 { .compatible = "marvell,armada-370-neta" },
4855 { .compatible = "marvell,armada-xp-neta" },
4856 { .compatible = "marvell,armada-3700-neta" },
4857 { }
4858};
4859MODULE_DEVICE_TABLE(of, mvneta_match);
4860
4861static struct platform_driver mvneta_driver = {
4862 .probe = mvneta_probe,
4863 .remove = mvneta_remove,
4864 .driver = {
4865 .name = MVNETA_DRIVER_NAME,
4866 .of_match_table = mvneta_match,
4867 .pm = &mvneta_pm_ops,
4868 },
4869};
4870
4871static int __init mvneta_driver_init(void)
4872{
4873 int ret;
4874
4875 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4876 mvneta_cpu_online,
4877 mvneta_cpu_down_prepare);
4878 if (ret < 0)
4879 goto out;
4880 online_hpstate = ret;
4881 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4882 NULL, mvneta_cpu_dead);
4883 if (ret)
4884 goto err_dead;
4885
4886 ret = platform_driver_register(&mvneta_driver);
4887 if (ret)
4888 goto err;
4889 return 0;
4890
4891err:
4892 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4893err_dead:
4894 cpuhp_remove_multi_state(online_hpstate);
4895out:
4896 return ret;
4897}
4898module_init(mvneta_driver_init);
4899
4900static void __exit mvneta_driver_exit(void)
4901{
4902 platform_driver_unregister(&mvneta_driver);
4903 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4904 cpuhp_remove_multi_state(online_hpstate);
4905}
4906module_exit(mvneta_driver_exit);
4907
4908MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4909MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4910MODULE_LICENSE("GPL");
4911
4912module_param(rxq_number, int, 0444);
4913module_param(txq_number, int, 0444);
4914
4915module_param(rxq_def, int, 0444);
4916module_param(rx_copybreak, int, 0644);
4917