1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/etherdevice.h>
17#include <linux/if_vlan.h>
18#include <linux/inetdevice.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
22#include <linux/mbus.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
30#include <linux/phy.h>
31#include <linux/phy_fixed.h>
32#include <linux/platform_device.h>
33#include <linux/skbuff.h>
34#include <net/hwbm.h>
35#include "mvneta_bm.h"
36#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/tso.h>
39
40
41#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
42#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
43#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
47#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
60#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
63#define MVNETA_PORT_RX_RESET 0x1cc0
64#define MVNETA_PORT_RX_DMA_RESET BIT(0)
65#define MVNETA_PHY_ADDR 0x2000
66#define MVNETA_PHY_ADDR_MASK 0x1f
67#define MVNETA_MBUS_RETRY 0x2010
68#define MVNETA_UNIT_INTR_CAUSE 0x2080
69#define MVNETA_UNIT_CONTROL 0x20B0
70#define MVNETA_PHY_POLLING_ENABLE BIT(1)
71#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74#define MVNETA_BASE_ADDR_ENABLE 0x2290
75#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
76#define MVNETA_PORT_CONFIG 0x2400
77#define MVNETA_UNI_PROMISC_MODE BIT(0)
78#define MVNETA_DEF_RXQ(q) ((q) << 1)
79#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92#define MVNETA_PORT_CONFIG_EXTEND 0x2404
93#define MVNETA_MAC_ADDR_LOW 0x2414
94#define MVNETA_MAC_ADDR_HIGH 0x2418
95#define MVNETA_SDMA_CONFIG 0x241c
96#define MVNETA_SDMA_BRST_SIZE_16 4
97#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98#define MVNETA_RX_NO_DATA_SWAP BIT(4)
99#define MVNETA_TX_NO_DATA_SWAP BIT(5)
100#define MVNETA_DESC_SWAP BIT(6)
101#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102#define MVNETA_PORT_STATUS 0x2444
103#define MVNETA_TX_IN_PRGRS BIT(1)
104#define MVNETA_TX_FIFO_EMPTY BIT(8)
105#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
106#define MVNETA_SERDES_CFG 0x24A0
107#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
108#define MVNETA_QSGMII_SERDES_PROTO 0x0667
109#define MVNETA_TYPE_PRIO 0x24bc
110#define MVNETA_FORCE_UNI BIT(21)
111#define MVNETA_TXQ_CMD_1 0x24e4
112#define MVNETA_TXQ_CMD 0x2448
113#define MVNETA_TXQ_DISABLE_SHIFT 8
114#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
115#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
117#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
119#define MVNETA_ACC_MODE 0x2500
120#define MVNETA_BM_ADDRESS 0x2504
121#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
124#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
125#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
126#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
127
128
129
130
131
132
133
134
135
136#define MVNETA_INTR_NEW_CAUSE 0x25a0
137#define MVNETA_INTR_NEW_MASK 0x25a4
138
139
140
141
142
143
144
145
146#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
150#define MVNETA_MISCINTR_INTR_MASK BIT(31)
151
152#define MVNETA_INTR_OLD_CAUSE 0x25a8
153#define MVNETA_INTR_OLD_MASK 0x25ac
154
155
156#define MVNETA_INTR_MISC_CAUSE 0x25b0
157#define MVNETA_INTR_MISC_MASK 0x25b4
158
159#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161#define MVNETA_CAUSE_PTP BIT(4)
162
163#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168#define MVNETA_CAUSE_PRBS_ERR BIT(12)
169#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
171
172#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175
176#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179
180#define MVNETA_INTR_ENABLE 0x25b8
181#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
182#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
183
184#define MVNETA_RXQ_CMD 0x2680
185#define MVNETA_RXQ_DISABLE_SHIFT 8
186#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189#define MVNETA_GMAC_CTRL_0 0x2c00
190#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
192#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
193#define MVNETA_GMAC_CTRL_2 0x2c08
194#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
195#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
196#define MVNETA_GMAC2_PORT_RGMII BIT(4)
197#define MVNETA_GMAC2_PORT_RESET BIT(6)
198#define MVNETA_GMAC_STATUS 0x2c10
199#define MVNETA_GMAC_LINK_UP BIT(0)
200#define MVNETA_GMAC_SPEED_1000 BIT(1)
201#define MVNETA_GMAC_SPEED_100 BIT(2)
202#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
203#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
204#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
205#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
206#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
207#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
208#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
209#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
210#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
211#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
212#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
213#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
214#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
215#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
216#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
217#define MVNETA_MIB_COUNTERS_BASE 0x3000
218#define MVNETA_MIB_LATE_COLLISION 0x7c
219#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
220#define MVNETA_DA_FILT_OTH_MCAST 0x3500
221#define MVNETA_DA_FILT_UCAST_BASE 0x3600
222#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
223#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
224#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
225#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
226#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
227#define MVNETA_TXQ_DEC_SENT_SHIFT 16
228#define MVNETA_TXQ_DEC_SENT_MASK 0xff
229#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
230#define MVNETA_TXQ_SENT_DESC_SHIFT 16
231#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
232#define MVNETA_PORT_TX_RESET 0x3cf0
233#define MVNETA_PORT_TX_DMA_RESET BIT(0)
234#define MVNETA_TX_MTU 0x3e0c
235#define MVNETA_TX_TOKEN_SIZE 0x3e14
236#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
237#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
238#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
239
240#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
241
242
243#define MVNETA_QUEUE_NEXT_DESC(q, index) \
244 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
245
246
247
248
249#define MVNETA_TXDONE_COAL_PKTS 0
250#define MVNETA_RX_COAL_PKTS 32
251#define MVNETA_RX_COAL_USEC 100
252
253
254
255
256
257
258
259
260
261#define MVNETA_MH_SIZE 2
262
263#define MVNETA_VLAN_TAG_LEN 4
264
265#define MVNETA_TX_CSUM_DEF_SIZE 1600
266#define MVNETA_TX_CSUM_MAX_SIZE 9800
267#define MVNETA_ACC_MODE_EXT1 1
268#define MVNETA_ACC_MODE_EXT2 2
269
270#define MVNETA_MAX_DECODE_WIN 6
271
272
273#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
274#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
275#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
276
277#define MVNETA_TX_MTU_MAX 0x3ffff
278
279
280
281
282#define MVNETA_RSS_LU_TABLE_SIZE 1
283
284
285#define TSO_HEADER_SIZE 128
286
287
288#define MVNETA_MAX_RXD 128
289
290
291#define MVNETA_MAX_TXD 532
292
293
294#define MVNETA_MAX_TSO_SEGS 100
295
296#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
297
298
299#define MVNETA_DESC_ALIGNED_SIZE 32
300
301
302
303
304
305#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
306
307#define MVNETA_RX_PKT_SIZE(mtu) \
308 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
309 ETH_HLEN + ETH_FCS_LEN, \
310 cache_line_size())
311
312#define IS_TSO_HEADER(txq, addr) \
313 ((addr >= txq->tso_hdrs_phys) && \
314 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
315
316#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
317 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
318
319struct mvneta_statistic {
320 unsigned short offset;
321 unsigned short type;
322 const char name[ETH_GSTRING_LEN];
323};
324
325#define T_REG_32 32
326#define T_REG_64 64
327
328static const struct mvneta_statistic mvneta_statistics[] = {
329 { 0x3000, T_REG_64, "good_octets_received", },
330 { 0x3010, T_REG_32, "good_frames_received", },
331 { 0x3008, T_REG_32, "bad_octets_received", },
332 { 0x3014, T_REG_32, "bad_frames_received", },
333 { 0x3018, T_REG_32, "broadcast_frames_received", },
334 { 0x301c, T_REG_32, "multicast_frames_received", },
335 { 0x3050, T_REG_32, "unrec_mac_control_received", },
336 { 0x3058, T_REG_32, "good_fc_received", },
337 { 0x305c, T_REG_32, "bad_fc_received", },
338 { 0x3060, T_REG_32, "undersize_received", },
339 { 0x3064, T_REG_32, "fragments_received", },
340 { 0x3068, T_REG_32, "oversize_received", },
341 { 0x306c, T_REG_32, "jabber_received", },
342 { 0x3070, T_REG_32, "mac_receive_error", },
343 { 0x3074, T_REG_32, "bad_crc_event", },
344 { 0x3078, T_REG_32, "collision", },
345 { 0x307c, T_REG_32, "late_collision", },
346 { 0x2484, T_REG_32, "rx_discard", },
347 { 0x2488, T_REG_32, "rx_overrun", },
348 { 0x3020, T_REG_32, "frames_64_octets", },
349 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
350 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
351 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
352 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
353 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
354 { 0x3038, T_REG_64, "good_octets_sent", },
355 { 0x3040, T_REG_32, "good_frames_sent", },
356 { 0x3044, T_REG_32, "excessive_collision", },
357 { 0x3048, T_REG_32, "multicast_frames_sent", },
358 { 0x304c, T_REG_32, "broadcast_frames_sent", },
359 { 0x3054, T_REG_32, "fc_sent", },
360 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
361};
362
363struct mvneta_pcpu_stats {
364 struct u64_stats_sync syncp;
365 u64 rx_packets;
366 u64 rx_bytes;
367 u64 tx_packets;
368 u64 tx_bytes;
369};
370
371struct mvneta_pcpu_port {
372
373 struct mvneta_port *pp;
374
375
376 struct napi_struct napi;
377
378
379 u32 cause_rx_tx;
380};
381
382struct mvneta_port {
383 u8 id;
384 struct mvneta_pcpu_port __percpu *ports;
385 struct mvneta_pcpu_stats __percpu *stats;
386
387 int pkt_size;
388 unsigned int frag_size;
389 void __iomem *base;
390 struct mvneta_rx_queue *rxqs;
391 struct mvneta_tx_queue *txqs;
392 struct net_device *dev;
393 struct hlist_node node_online;
394 struct hlist_node node_dead;
395 int rxq_def;
396
397
398
399 spinlock_t lock;
400 bool is_stopped;
401
402 u32 cause_rx_tx;
403 struct napi_struct napi;
404
405
406 struct clk *clk;
407
408 struct clk *clk_bus;
409 u8 mcast_count[256];
410 u16 tx_ring_size;
411 u16 rx_ring_size;
412
413 struct mii_bus *mii_bus;
414 phy_interface_t phy_interface;
415 struct device_node *phy_node;
416 unsigned int link;
417 unsigned int duplex;
418 unsigned int speed;
419 unsigned int tx_csum_limit;
420 unsigned int use_inband_status:1;
421
422 struct mvneta_bm *bm_priv;
423 struct mvneta_bm_pool *pool_long;
424 struct mvneta_bm_pool *pool_short;
425 int bm_win_id;
426
427 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
428
429 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
430
431
432 bool neta_armada3700;
433 u16 rx_offset_correction;
434 const struct mbus_dram_target_info *dram_target_info;
435};
436
437
438
439
440
441
442#define MVNETA_TX_L3_OFF_SHIFT 0
443#define MVNETA_TX_IP_HLEN_SHIFT 8
444#define MVNETA_TX_L4_UDP BIT(16)
445#define MVNETA_TX_L3_IP6 BIT(17)
446#define MVNETA_TXD_IP_CSUM BIT(18)
447#define MVNETA_TXD_Z_PAD BIT(19)
448#define MVNETA_TXD_L_DESC BIT(20)
449#define MVNETA_TXD_F_DESC BIT(21)
450#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
451 MVNETA_TXD_L_DESC | \
452 MVNETA_TXD_F_DESC)
453#define MVNETA_TX_L4_CSUM_FULL BIT(30)
454#define MVNETA_TX_L4_CSUM_NOT BIT(31)
455
456#define MVNETA_RXD_ERR_CRC 0x0
457#define MVNETA_RXD_BM_POOL_SHIFT 13
458#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
459#define MVNETA_RXD_ERR_SUMMARY BIT(16)
460#define MVNETA_RXD_ERR_OVERRUN BIT(17)
461#define MVNETA_RXD_ERR_LEN BIT(18)
462#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
463#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
464#define MVNETA_RXD_L3_IP4 BIT(25)
465#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
466#define MVNETA_RXD_L4_CSUM_OK BIT(30)
467
468#if defined(__LITTLE_ENDIAN)
469struct mvneta_tx_desc {
470 u32 command;
471 u16 reserverd1;
472 u16 data_size;
473 u32 buf_phys_addr;
474 u32 reserved2;
475 u32 reserved3[4];
476};
477
478struct mvneta_rx_desc {
479 u32 status;
480 u16 reserved1;
481 u16 data_size;
482
483 u32 buf_phys_addr;
484 u32 reserved2;
485
486 u32 buf_cookie;
487 u16 reserved3;
488 u16 reserved4;
489
490 u32 reserved5;
491 u32 reserved6;
492};
493#else
494struct mvneta_tx_desc {
495 u16 data_size;
496 u16 reserverd1;
497 u32 command;
498 u32 reserved2;
499 u32 buf_phys_addr;
500 u32 reserved3[4];
501};
502
503struct mvneta_rx_desc {
504 u16 data_size;
505 u16 reserved1;
506 u32 status;
507
508 u32 reserved2;
509 u32 buf_phys_addr;
510
511 u16 reserved4;
512 u16 reserved3;
513 u32 buf_cookie;
514
515 u32 reserved5;
516 u32 reserved6;
517};
518#endif
519
520struct mvneta_tx_queue {
521
522 u8 id;
523
524
525 int size;
526
527
528
529
530 int count;
531 int pending;
532 int tx_stop_threshold;
533 int tx_wake_threshold;
534
535
536 struct sk_buff **tx_skb;
537
538
539 int txq_put_index;
540
541
542 int txq_get_index;
543
544 u32 done_pkts_coal;
545
546
547 struct mvneta_tx_desc *descs;
548
549
550 dma_addr_t descs_phys;
551
552
553 int last_desc;
554
555
556 int next_desc_to_proc;
557
558
559 char *tso_hdrs;
560
561
562 dma_addr_t tso_hdrs_phys;
563
564
565 cpumask_t affinity_mask;
566};
567
568struct mvneta_rx_queue {
569
570 u8 id;
571
572
573 int size;
574
575
576 int missed;
577
578 u32 pkts_coal;
579 u32 time_coal;
580
581
582 void **buf_virt_addr;
583
584
585 struct mvneta_rx_desc *descs;
586
587
588 dma_addr_t descs_phys;
589
590
591 int last_desc;
592
593
594 int next_desc_to_proc;
595};
596
597static enum cpuhp_state online_hpstate;
598
599
600
601static int rxq_number = 8;
602static int txq_number = 8;
603
604static int rxq_def;
605
606static int rx_copybreak __read_mostly = 256;
607
608
609static int global_port_id;
610
611#define MVNETA_DRIVER_NAME "mvneta"
612#define MVNETA_DRIVER_VERSION "1.0"
613
614
615
616
617static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
618{
619 writel(data, pp->base + offset);
620}
621
622
623static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
624{
625 return readl(pp->base + offset);
626}
627
628
629static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
630{
631 txq->txq_get_index++;
632 if (txq->txq_get_index == txq->size)
633 txq->txq_get_index = 0;
634}
635
636
637static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
638{
639 txq->txq_put_index++;
640 if (txq->txq_put_index == txq->size)
641 txq->txq_put_index = 0;
642}
643
644
645
646static void mvneta_mib_counters_clear(struct mvneta_port *pp)
647{
648 int i;
649 u32 dummy;
650
651
652 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
653 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
654 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
655 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
656}
657
658
659static void
660mvneta_get_stats64(struct net_device *dev,
661 struct rtnl_link_stats64 *stats)
662{
663 struct mvneta_port *pp = netdev_priv(dev);
664 unsigned int start;
665 int cpu;
666
667 for_each_possible_cpu(cpu) {
668 struct mvneta_pcpu_stats *cpu_stats;
669 u64 rx_packets;
670 u64 rx_bytes;
671 u64 tx_packets;
672 u64 tx_bytes;
673
674 cpu_stats = per_cpu_ptr(pp->stats, cpu);
675 do {
676 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
677 rx_packets = cpu_stats->rx_packets;
678 rx_bytes = cpu_stats->rx_bytes;
679 tx_packets = cpu_stats->tx_packets;
680 tx_bytes = cpu_stats->tx_bytes;
681 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
682
683 stats->rx_packets += rx_packets;
684 stats->rx_bytes += rx_bytes;
685 stats->tx_packets += tx_packets;
686 stats->tx_bytes += tx_bytes;
687 }
688
689 stats->rx_errors = dev->stats.rx_errors;
690 stats->rx_dropped = dev->stats.rx_dropped;
691
692 stats->tx_dropped = dev->stats.tx_dropped;
693}
694
695
696
697
698
699
700
701
702static int mvneta_rxq_desc_is_first_last(u32 status)
703{
704 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
705 MVNETA_RXD_FIRST_LAST_DESC;
706}
707
708
709static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
710 struct mvneta_rx_queue *rxq,
711 int ndescs)
712{
713
714
715
716 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
717 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
718 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
719 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
720 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
721 }
722
723 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
724 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
725}
726
727
728static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
729 struct mvneta_rx_queue *rxq)
730{
731 u32 val;
732
733 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
734 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
735}
736
737
738
739
740static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
741 struct mvneta_rx_queue *rxq,
742 int rx_done, int rx_filled)
743{
744 u32 val;
745
746 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
747 val = rx_done |
748 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
749 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
750 return;
751 }
752
753
754 while ((rx_done > 0) || (rx_filled > 0)) {
755 if (rx_done <= 0xff) {
756 val = rx_done;
757 rx_done = 0;
758 } else {
759 val = 0xff;
760 rx_done -= 0xff;
761 }
762 if (rx_filled <= 0xff) {
763 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
764 rx_filled = 0;
765 } else {
766 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
767 rx_filled -= 0xff;
768 }
769 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
770 }
771}
772
773
774static struct mvneta_rx_desc *
775mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
776{
777 int rx_desc = rxq->next_desc_to_proc;
778
779 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
780 prefetch(rxq->descs + rxq->next_desc_to_proc);
781 return rxq->descs + rx_desc;
782}
783
784
785static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
786{
787 u32 val;
788
789 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
790 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
791 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
792 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
793 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
794}
795
796
797
798static void mvneta_rxq_offset_set(struct mvneta_port *pp,
799 struct mvneta_rx_queue *rxq,
800 int offset)
801{
802 u32 val;
803
804 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
805 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
806
807
808 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
809 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
810}
811
812
813
814
815
816static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
817 struct mvneta_tx_queue *txq,
818 int pend_desc)
819{
820 u32 val;
821
822
823
824
825 val = pend_desc + txq->pending;
826 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
827 txq->pending = 0;
828}
829
830
831static struct mvneta_tx_desc *
832mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
833{
834 int tx_desc = txq->next_desc_to_proc;
835
836 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
837 return txq->descs + tx_desc;
838}
839
840
841
842
843static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
844{
845 if (txq->next_desc_to_proc == 0)
846 txq->next_desc_to_proc = txq->last_desc - 1;
847 else
848 txq->next_desc_to_proc--;
849}
850
851
852static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
853 struct mvneta_rx_queue *rxq,
854 int buf_size)
855{
856 u32 val;
857
858 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
859
860 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
861 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
862
863 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
864}
865
866
867static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
868 struct mvneta_rx_queue *rxq)
869{
870 u32 val;
871
872 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
873 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
874 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
875}
876
877
878static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
879 struct mvneta_rx_queue *rxq)
880{
881 u32 val;
882
883 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
884 val |= MVNETA_RXQ_HW_BUF_ALLOC;
885 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
886}
887
888
889static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
890 struct mvneta_rx_queue *rxq)
891{
892 u32 val;
893
894 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
895 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
896 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
897
898 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
899}
900
901
902static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
903 struct mvneta_rx_queue *rxq)
904{
905 u32 val;
906
907 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
908 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
909 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
910
911 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
912}
913
914
915static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
916 int buf_size,
917 u8 pool_id)
918{
919 u32 val;
920
921 if (!IS_ALIGNED(buf_size, 8)) {
922 dev_warn(pp->dev->dev.parent,
923 "illegal buf_size value %d, round to %d\n",
924 buf_size, ALIGN(buf_size, 8));
925 buf_size = ALIGN(buf_size, 8);
926 }
927
928 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
929 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
930 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
931}
932
933
934static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
935 u8 target, u8 attr)
936{
937 u32 win_enable, win_protect;
938 int i;
939
940 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
941
942 if (pp->bm_win_id < 0) {
943
944 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
945 if (win_enable & (1 << i)) {
946 pp->bm_win_id = i;
947 break;
948 }
949 }
950 if (i == MVNETA_MAX_DECODE_WIN)
951 return -ENOMEM;
952 } else {
953 i = pp->bm_win_id;
954 }
955
956 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
957 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
958
959 if (i < 4)
960 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
961
962 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
963 (attr << 8) | target);
964
965 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
966
967 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
968 win_protect |= 3 << (2 * i);
969 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
970
971 win_enable &= ~(1 << i);
972 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
973
974 return 0;
975}
976
977static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
978{
979 u32 wsize;
980 u8 target, attr;
981 int err;
982
983
984 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
985 &target, &attr);
986 if (err < 0)
987 return err;
988
989 pp->bm_win_id = -1;
990
991
992 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
993 target, attr);
994 if (err < 0) {
995 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
996 return err;
997 }
998 return 0;
999}
1000
1001
1002
1003
1004static int mvneta_bm_port_init(struct platform_device *pdev,
1005 struct mvneta_port *pp)
1006{
1007 struct device_node *dn = pdev->dev.of_node;
1008 u32 long_pool_id, short_pool_id;
1009
1010 if (!pp->neta_armada3700) {
1011 int ret;
1012
1013 ret = mvneta_bm_port_mbus_init(pp);
1014 if (ret)
1015 return ret;
1016 }
1017
1018 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1019 netdev_info(pp->dev, "missing long pool id\n");
1020 return -EINVAL;
1021 }
1022
1023
1024 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1025 MVNETA_BM_LONG, pp->id,
1026 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1027 if (!pp->pool_long) {
1028 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1029 return -ENOMEM;
1030 }
1031
1032 pp->pool_long->port_map |= 1 << pp->id;
1033
1034 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1035 pp->pool_long->id);
1036
1037
1038 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1039 short_pool_id = long_pool_id;
1040
1041
1042 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1043 MVNETA_BM_SHORT, pp->id,
1044 MVNETA_BM_SHORT_PKT_SIZE);
1045 if (!pp->pool_short) {
1046 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1047 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1048 return -ENOMEM;
1049 }
1050
1051 if (short_pool_id != long_pool_id) {
1052 pp->pool_short->port_map |= 1 << pp->id;
1053 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1054 pp->pool_short->id);
1055 }
1056
1057 return 0;
1058}
1059
1060
1061static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1062{
1063 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1064 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1065 int num;
1066
1067
1068 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1069 if (hwbm_pool->buf_num) {
1070 WARN(1, "cannot free all buffers in pool %d\n",
1071 bm_pool->id);
1072 goto bm_mtu_err;
1073 }
1074
1075 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1076 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1077 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1078 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1079
1080
1081 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1082 if (num != hwbm_pool->size) {
1083 WARN(1, "pool %d: %d of %d allocated\n",
1084 bm_pool->id, num, hwbm_pool->size);
1085 goto bm_mtu_err;
1086 }
1087 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1088
1089 return;
1090
1091bm_mtu_err:
1092 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1093 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1094
1095 pp->bm_priv = NULL;
1096 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1097 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1098}
1099
1100
1101static void mvneta_port_up(struct mvneta_port *pp)
1102{
1103 int queue;
1104 u32 q_map;
1105
1106
1107 q_map = 0;
1108 for (queue = 0; queue < txq_number; queue++) {
1109 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1110 if (txq->descs)
1111 q_map |= (1 << queue);
1112 }
1113 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1114
1115
1116 for (queue = 0; queue < rxq_number; queue++) {
1117 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1118
1119 if (rxq->descs)
1120 q_map |= (1 << queue);
1121 }
1122 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1123}
1124
1125
1126static void mvneta_port_down(struct mvneta_port *pp)
1127{
1128 u32 val;
1129 int count;
1130
1131
1132 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1133
1134
1135 if (val != 0)
1136 mvreg_write(pp, MVNETA_RXQ_CMD,
1137 val << MVNETA_RXQ_DISABLE_SHIFT);
1138
1139
1140 count = 0;
1141 do {
1142 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1143 netdev_warn(pp->dev,
1144 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1145 val);
1146 break;
1147 }
1148 mdelay(1);
1149
1150 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1151 } while (val & MVNETA_RXQ_ENABLE_MASK);
1152
1153
1154
1155
1156 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1157
1158 if (val != 0)
1159 mvreg_write(pp, MVNETA_TXQ_CMD,
1160 (val << MVNETA_TXQ_DISABLE_SHIFT));
1161
1162
1163 count = 0;
1164 do {
1165 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1166 netdev_warn(pp->dev,
1167 "TIMEOUT for TX stopped status=0x%08x\n",
1168 val);
1169 break;
1170 }
1171 mdelay(1);
1172
1173
1174 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1175
1176 } while (val & MVNETA_TXQ_ENABLE_MASK);
1177
1178
1179 count = 0;
1180 do {
1181 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1182 netdev_warn(pp->dev,
1183 "TX FIFO empty timeout status=0x%08x\n",
1184 val);
1185 break;
1186 }
1187 mdelay(1);
1188
1189 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1190 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1191 (val & MVNETA_TX_IN_PRGRS));
1192
1193 udelay(200);
1194}
1195
1196
1197static void mvneta_port_enable(struct mvneta_port *pp)
1198{
1199 u32 val;
1200
1201
1202 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1203 val |= MVNETA_GMAC0_PORT_ENABLE;
1204 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1205}
1206
1207
1208static void mvneta_port_disable(struct mvneta_port *pp)
1209{
1210 u32 val;
1211
1212
1213 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1214 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1216
1217 udelay(200);
1218}
1219
1220
1221
1222
1223static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1224{
1225 int offset;
1226 u32 val;
1227
1228 if (queue == -1) {
1229 val = 0;
1230 } else {
1231 val = 0x1 | (queue << 1);
1232 val |= (val << 24) | (val << 16) | (val << 8);
1233 }
1234
1235 for (offset = 0; offset <= 0xc; offset += 4)
1236 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1237}
1238
1239
1240static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1241{
1242 int offset;
1243 u32 val;
1244
1245 if (queue == -1) {
1246 val = 0;
1247 } else {
1248 val = 0x1 | (queue << 1);
1249 val |= (val << 24) | (val << 16) | (val << 8);
1250 }
1251
1252 for (offset = 0; offset <= 0xfc; offset += 4)
1253 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1254
1255}
1256
1257
1258static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1259{
1260 int offset;
1261 u32 val;
1262
1263 if (queue == -1) {
1264 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1265 val = 0;
1266 } else {
1267 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1268 val = 0x1 | (queue << 1);
1269 val |= (val << 24) | (val << 16) | (val << 8);
1270 }
1271
1272 for (offset = 0; offset <= 0xfc; offset += 4)
1273 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1274}
1275
1276static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1277{
1278 u32 val;
1279
1280 if (enable) {
1281 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1282 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1283 MVNETA_GMAC_FORCE_LINK_DOWN |
1284 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1285 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1286 MVNETA_GMAC_AN_SPEED_EN |
1287 MVNETA_GMAC_AN_DUPLEX_EN;
1288 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1289
1290 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1291 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1292 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1293
1294 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1295 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1296 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1297 } else {
1298 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1299 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1300 MVNETA_GMAC_AN_SPEED_EN |
1301 MVNETA_GMAC_AN_DUPLEX_EN);
1302 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1303
1304 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1305 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1306 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1307
1308 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1309 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1310 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1311 }
1312}
1313
1314static void mvneta_percpu_unmask_interrupt(void *arg)
1315{
1316 struct mvneta_port *pp = arg;
1317
1318
1319
1320
1321 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1322 MVNETA_RX_INTR_MASK_ALL |
1323 MVNETA_TX_INTR_MASK_ALL |
1324 MVNETA_MISCINTR_INTR_MASK);
1325}
1326
1327static void mvneta_percpu_mask_interrupt(void *arg)
1328{
1329 struct mvneta_port *pp = arg;
1330
1331
1332
1333
1334 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1335 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1336 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1337}
1338
1339static void mvneta_percpu_clear_intr_cause(void *arg)
1340{
1341 struct mvneta_port *pp = arg;
1342
1343
1344
1345
1346 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1347 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1348 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360static void mvneta_defaults_set(struct mvneta_port *pp)
1361{
1362 int cpu;
1363 int queue;
1364 u32 val;
1365 int max_cpu = num_present_cpus();
1366
1367
1368 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1369
1370
1371 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1372 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1373
1374
1375 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1376
1377
1378
1379
1380
1381
1382 for_each_present_cpu(cpu) {
1383 int rxq_map = 0, txq_map = 0;
1384 int rxq, txq;
1385 if (!pp->neta_armada3700) {
1386 for (rxq = 0; rxq < rxq_number; rxq++)
1387 if ((rxq % max_cpu) == cpu)
1388 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1389
1390 for (txq = 0; txq < txq_number; txq++)
1391 if ((txq % max_cpu) == cpu)
1392 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1393
1394
1395
1396
1397
1398 if (txq_number == 1)
1399 txq_map = (cpu == pp->rxq_def) ?
1400 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1401
1402 } else {
1403 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1404 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1405 }
1406
1407 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1408 }
1409
1410
1411 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1412 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1413
1414
1415 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1416 for (queue = 0; queue < txq_number; queue++) {
1417 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1418 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1419 }
1420
1421 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1422 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1423
1424
1425 if (pp->bm_priv)
1426
1427 val = MVNETA_ACC_MODE_EXT2;
1428 else
1429
1430 val = MVNETA_ACC_MODE_EXT1;
1431 mvreg_write(pp, MVNETA_ACC_MODE, val);
1432
1433 if (pp->bm_priv)
1434 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1435
1436
1437 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1438 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1439
1440 val = 0;
1441 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1442 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1443
1444
1445 val = 0;
1446
1447
1448 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1449 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1450 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1451
1452#if defined(__BIG_ENDIAN)
1453 val |= MVNETA_DESC_SWAP;
1454#endif
1455
1456
1457 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1458
1459
1460
1461
1462 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1463 val &= ~MVNETA_PHY_POLLING_ENABLE;
1464 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1465
1466 mvneta_set_autoneg(pp, pp->use_inband_status);
1467 mvneta_set_ucast_table(pp, -1);
1468 mvneta_set_special_mcast_table(pp, -1);
1469 mvneta_set_other_mcast_table(pp, -1);
1470
1471
1472 mvreg_write(pp, MVNETA_INTR_ENABLE,
1473 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1474 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1475
1476 mvneta_mib_counters_clear(pp);
1477}
1478
1479
1480static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1481
1482{
1483 u32 val, size, mtu;
1484 int queue;
1485
1486 mtu = max_tx_size * 8;
1487 if (mtu > MVNETA_TX_MTU_MAX)
1488 mtu = MVNETA_TX_MTU_MAX;
1489
1490
1491 val = mvreg_read(pp, MVNETA_TX_MTU);
1492 val &= ~MVNETA_TX_MTU_MAX;
1493 val |= mtu;
1494 mvreg_write(pp, MVNETA_TX_MTU, val);
1495
1496
1497 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1498
1499 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1500 if (size < mtu) {
1501 size = mtu;
1502 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1503 val |= size;
1504 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1505 }
1506 for (queue = 0; queue < txq_number; queue++) {
1507 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1508
1509 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1510 if (size < mtu) {
1511 size = mtu;
1512 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1513 val |= size;
1514 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1515 }
1516 }
1517}
1518
1519
1520static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1521 int queue)
1522{
1523 unsigned int unicast_reg;
1524 unsigned int tbl_offset;
1525 unsigned int reg_offset;
1526
1527
1528 last_nibble = (0xf & last_nibble);
1529
1530
1531 tbl_offset = (last_nibble / 4) * 4;
1532
1533
1534 reg_offset = last_nibble % 4;
1535
1536 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1537
1538 if (queue == -1) {
1539
1540 unicast_reg &= ~(0xff << (8 * reg_offset));
1541 } else {
1542 unicast_reg &= ~(0xff << (8 * reg_offset));
1543 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1544 }
1545
1546 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1547}
1548
1549
1550static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1551 int queue)
1552{
1553 unsigned int mac_h;
1554 unsigned int mac_l;
1555
1556 if (queue != -1) {
1557 mac_l = (addr[4] << 8) | (addr[5]);
1558 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1559 (addr[2] << 8) | (addr[3] << 0);
1560
1561 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1562 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1563 }
1564
1565
1566 mvneta_set_ucast_addr(pp, addr[5], queue);
1567}
1568
1569
1570
1571
1572static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1573 struct mvneta_rx_queue *rxq, u32 value)
1574{
1575 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1576 value | MVNETA_RXQ_NON_OCCUPIED(0));
1577 rxq->pkts_coal = value;
1578}
1579
1580
1581
1582
1583static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1584 struct mvneta_rx_queue *rxq, u32 value)
1585{
1586 u32 val;
1587 unsigned long clk_rate;
1588
1589 clk_rate = clk_get_rate(pp->clk);
1590 val = (clk_rate / 1000000) * value;
1591
1592 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1593 rxq->time_coal = value;
1594}
1595
1596
1597static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1598 struct mvneta_tx_queue *txq, u32 value)
1599{
1600 u32 val;
1601
1602 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1603
1604 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1605 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1606
1607 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1608
1609 txq->done_pkts_coal = value;
1610}
1611
1612
1613static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1614 u32 phys_addr, void *virt_addr,
1615 struct mvneta_rx_queue *rxq)
1616{
1617 int i;
1618
1619 rx_desc->buf_phys_addr = phys_addr;
1620 i = rx_desc - rxq->descs;
1621 rxq->buf_virt_addr[i] = virt_addr;
1622}
1623
1624
1625static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1626 struct mvneta_tx_queue *txq,
1627 int sent_desc)
1628{
1629 u32 val;
1630
1631
1632 while (sent_desc > 0xff) {
1633 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1634 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1635 sent_desc = sent_desc - 0xff;
1636 }
1637
1638 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1639 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1640}
1641
1642
1643static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1644 struct mvneta_tx_queue *txq)
1645{
1646 u32 val;
1647 int sent_desc;
1648
1649 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1650 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1651 MVNETA_TXQ_SENT_DESC_SHIFT;
1652
1653 return sent_desc;
1654}
1655
1656
1657
1658
1659static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1660 struct mvneta_tx_queue *txq)
1661{
1662 int sent_desc;
1663
1664
1665 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1666
1667
1668 if (sent_desc)
1669 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1670
1671 return sent_desc;
1672}
1673
1674
1675static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1676 int ip_hdr_len, int l4_proto)
1677{
1678 u32 command;
1679
1680
1681
1682
1683
1684 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1685 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1686
1687 if (l3_proto == htons(ETH_P_IP))
1688 command |= MVNETA_TXD_IP_CSUM;
1689 else
1690 command |= MVNETA_TX_L3_IP6;
1691
1692 if (l4_proto == IPPROTO_TCP)
1693 command |= MVNETA_TX_L4_CSUM_FULL;
1694 else if (l4_proto == IPPROTO_UDP)
1695 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1696 else
1697 command |= MVNETA_TX_L4_CSUM_NOT;
1698
1699 return command;
1700}
1701
1702
1703
1704static void mvneta_rx_error(struct mvneta_port *pp,
1705 struct mvneta_rx_desc *rx_desc)
1706{
1707 u32 status = rx_desc->status;
1708
1709 if (!mvneta_rxq_desc_is_first_last(status)) {
1710 netdev_err(pp->dev,
1711 "bad rx status %08x (buffer oversize), size=%d\n",
1712 status, rx_desc->data_size);
1713 return;
1714 }
1715
1716 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1717 case MVNETA_RXD_ERR_CRC:
1718 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1719 status, rx_desc->data_size);
1720 break;
1721 case MVNETA_RXD_ERR_OVERRUN:
1722 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1723 status, rx_desc->data_size);
1724 break;
1725 case MVNETA_RXD_ERR_LEN:
1726 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1727 status, rx_desc->data_size);
1728 break;
1729 case MVNETA_RXD_ERR_RESOURCE:
1730 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1731 status, rx_desc->data_size);
1732 break;
1733 }
1734}
1735
1736
1737static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1738 struct sk_buff *skb)
1739{
1740 if ((status & MVNETA_RXD_L3_IP4) &&
1741 (status & MVNETA_RXD_L4_CSUM_OK)) {
1742 skb->csum = 0;
1743 skb->ip_summed = CHECKSUM_UNNECESSARY;
1744 return;
1745 }
1746
1747 skb->ip_summed = CHECKSUM_NONE;
1748}
1749
1750
1751
1752
1753
1754static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1755 u32 cause)
1756{
1757 int queue = fls(cause) - 1;
1758
1759 return &pp->txqs[queue];
1760}
1761
1762
1763static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1764 struct mvneta_tx_queue *txq, int num,
1765 struct netdev_queue *nq)
1766{
1767 unsigned int bytes_compl = 0, pkts_compl = 0;
1768 int i;
1769
1770 for (i = 0; i < num; i++) {
1771 struct mvneta_tx_desc *tx_desc = txq->descs +
1772 txq->txq_get_index;
1773 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1774
1775 if (skb) {
1776 bytes_compl += skb->len;
1777 pkts_compl++;
1778 }
1779
1780 mvneta_txq_inc_get(txq);
1781
1782 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1783 dma_unmap_single(pp->dev->dev.parent,
1784 tx_desc->buf_phys_addr,
1785 tx_desc->data_size, DMA_TO_DEVICE);
1786 if (!skb)
1787 continue;
1788 dev_kfree_skb_any(skb);
1789 }
1790
1791 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1792}
1793
1794
1795static void mvneta_txq_done(struct mvneta_port *pp,
1796 struct mvneta_tx_queue *txq)
1797{
1798 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1799 int tx_done;
1800
1801 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1802 if (!tx_done)
1803 return;
1804
1805 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1806
1807 txq->count -= tx_done;
1808
1809 if (netif_tx_queue_stopped(nq)) {
1810 if (txq->count <= txq->tx_wake_threshold)
1811 netif_tx_wake_queue(nq);
1812 }
1813}
1814
1815void *mvneta_frag_alloc(unsigned int frag_size)
1816{
1817 if (likely(frag_size <= PAGE_SIZE))
1818 return netdev_alloc_frag(frag_size);
1819 else
1820 return kmalloc(frag_size, GFP_ATOMIC);
1821}
1822EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
1823
1824void mvneta_frag_free(unsigned int frag_size, void *data)
1825{
1826 if (likely(frag_size <= PAGE_SIZE))
1827 skb_free_frag(data);
1828 else
1829 kfree(data);
1830}
1831EXPORT_SYMBOL_GPL(mvneta_frag_free);
1832
1833
1834static int mvneta_rx_refill(struct mvneta_port *pp,
1835 struct mvneta_rx_desc *rx_desc,
1836 struct mvneta_rx_queue *rxq)
1837
1838{
1839 dma_addr_t phys_addr;
1840 void *data;
1841
1842 data = mvneta_frag_alloc(pp->frag_size);
1843 if (!data)
1844 return -ENOMEM;
1845
1846 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1847 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1848 DMA_FROM_DEVICE);
1849 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1850 mvneta_frag_free(pp->frag_size, data);
1851 return -ENOMEM;
1852 }
1853
1854 phys_addr += pp->rx_offset_correction;
1855 mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
1856 return 0;
1857}
1858
1859
1860static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1861{
1862 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1863 int ip_hdr_len = 0;
1864 __be16 l3_proto = vlan_get_protocol(skb);
1865 u8 l4_proto;
1866
1867 if (l3_proto == htons(ETH_P_IP)) {
1868 struct iphdr *ip4h = ip_hdr(skb);
1869
1870
1871 ip_hdr_len = ip4h->ihl;
1872 l4_proto = ip4h->protocol;
1873 } else if (l3_proto == htons(ETH_P_IPV6)) {
1874 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1875
1876
1877 if (skb_network_header_len(skb) > 0)
1878 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1879 l4_proto = ip6h->nexthdr;
1880 } else
1881 return MVNETA_TX_L4_CSUM_NOT;
1882
1883 return mvneta_txq_desc_csum(skb_network_offset(skb),
1884 l3_proto, ip_hdr_len, l4_proto);
1885 }
1886
1887 return MVNETA_TX_L4_CSUM_NOT;
1888}
1889
1890
1891static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1892 struct mvneta_rx_queue *rxq)
1893{
1894 int rx_done, i;
1895
1896 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1897 if (rx_done)
1898 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1899
1900 if (pp->bm_priv) {
1901 for (i = 0; i < rx_done; i++) {
1902 struct mvneta_rx_desc *rx_desc =
1903 mvneta_rxq_next_desc_get(rxq);
1904 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1905 struct mvneta_bm_pool *bm_pool;
1906
1907 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1908
1909 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1910 rx_desc->buf_phys_addr);
1911 }
1912 return;
1913 }
1914
1915 for (i = 0; i < rxq->size; i++) {
1916 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1917 void *data = rxq->buf_virt_addr[i];
1918
1919 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1920 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1921 mvneta_frag_free(pp->frag_size, data);
1922 }
1923}
1924
1925
1926static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1927 struct mvneta_rx_queue *rxq)
1928{
1929 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
1930 struct net_device *dev = pp->dev;
1931 int rx_done;
1932 u32 rcvd_pkts = 0;
1933 u32 rcvd_bytes = 0;
1934
1935
1936 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1937
1938 if (rx_todo > rx_done)
1939 rx_todo = rx_done;
1940
1941 rx_done = 0;
1942
1943
1944 while (rx_done < rx_todo) {
1945 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1946 struct sk_buff *skb;
1947 unsigned char *data;
1948 dma_addr_t phys_addr;
1949 u32 rx_status, frag_size;
1950 int rx_bytes, err, index;
1951
1952 rx_done++;
1953 rx_status = rx_desc->status;
1954 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1955 index = rx_desc - rxq->descs;
1956 data = rxq->buf_virt_addr[index];
1957 phys_addr = rx_desc->buf_phys_addr;
1958
1959 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1960 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1961err_drop_frame:
1962 dev->stats.rx_errors++;
1963 mvneta_rx_error(pp, rx_desc);
1964
1965 continue;
1966 }
1967
1968 if (rx_bytes <= rx_copybreak) {
1969
1970 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1971 if (unlikely(!skb))
1972 goto err_drop_frame;
1973
1974 dma_sync_single_range_for_cpu(dev->dev.parent,
1975 phys_addr,
1976 MVNETA_MH_SIZE + NET_SKB_PAD,
1977 rx_bytes,
1978 DMA_FROM_DEVICE);
1979 memcpy(skb_put(skb, rx_bytes),
1980 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1981 rx_bytes);
1982
1983 skb->protocol = eth_type_trans(skb, dev);
1984 mvneta_rx_csum(pp, rx_status, skb);
1985 napi_gro_receive(&port->napi, skb);
1986
1987 rcvd_pkts++;
1988 rcvd_bytes += rx_bytes;
1989
1990
1991 continue;
1992 }
1993
1994
1995 err = mvneta_rx_refill(pp, rx_desc, rxq);
1996 if (err) {
1997 netdev_err(dev, "Linux processing - Can't refill\n");
1998 rxq->missed++;
1999 goto err_drop_frame;
2000 }
2001
2002 frag_size = pp->frag_size;
2003
2004 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2005
2006
2007
2008
2009 dma_unmap_single(dev->dev.parent, phys_addr,
2010 MVNETA_RX_BUF_SIZE(pp->pkt_size),
2011 DMA_FROM_DEVICE);
2012
2013 if (!skb)
2014 goto err_drop_frame;
2015
2016 rcvd_pkts++;
2017 rcvd_bytes += rx_bytes;
2018
2019
2020 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2021 skb_put(skb, rx_bytes);
2022
2023 skb->protocol = eth_type_trans(skb, dev);
2024
2025 mvneta_rx_csum(pp, rx_status, skb);
2026
2027 napi_gro_receive(&port->napi, skb);
2028 }
2029
2030 if (rcvd_pkts) {
2031 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2032
2033 u64_stats_update_begin(&stats->syncp);
2034 stats->rx_packets += rcvd_pkts;
2035 stats->rx_bytes += rcvd_bytes;
2036 u64_stats_update_end(&stats->syncp);
2037 }
2038
2039
2040 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2041
2042 return rx_done;
2043}
2044
2045
2046static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
2047 struct mvneta_rx_queue *rxq)
2048{
2049 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2050 struct net_device *dev = pp->dev;
2051 int rx_done;
2052 u32 rcvd_pkts = 0;
2053 u32 rcvd_bytes = 0;
2054
2055
2056 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2057
2058 if (rx_todo > rx_done)
2059 rx_todo = rx_done;
2060
2061 rx_done = 0;
2062
2063
2064 while (rx_done < rx_todo) {
2065 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2066 struct mvneta_bm_pool *bm_pool = NULL;
2067 struct sk_buff *skb;
2068 unsigned char *data;
2069 dma_addr_t phys_addr;
2070 u32 rx_status, frag_size;
2071 int rx_bytes, err;
2072 u8 pool_id;
2073
2074 rx_done++;
2075 rx_status = rx_desc->status;
2076 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2077 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2078 phys_addr = rx_desc->buf_phys_addr;
2079 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2080 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2081
2082 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2083 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2084err_drop_frame_ret_pool:
2085
2086 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2087 rx_desc->buf_phys_addr);
2088err_drop_frame:
2089 dev->stats.rx_errors++;
2090 mvneta_rx_error(pp, rx_desc);
2091
2092 continue;
2093 }
2094
2095 if (rx_bytes <= rx_copybreak) {
2096
2097 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2098 if (unlikely(!skb))
2099 goto err_drop_frame_ret_pool;
2100
2101 dma_sync_single_range_for_cpu(dev->dev.parent,
2102 rx_desc->buf_phys_addr,
2103 MVNETA_MH_SIZE + NET_SKB_PAD,
2104 rx_bytes,
2105 DMA_FROM_DEVICE);
2106 memcpy(skb_put(skb, rx_bytes),
2107 data + MVNETA_MH_SIZE + NET_SKB_PAD,
2108 rx_bytes);
2109
2110 skb->protocol = eth_type_trans(skb, dev);
2111 mvneta_rx_csum(pp, rx_status, skb);
2112 napi_gro_receive(&port->napi, skb);
2113
2114 rcvd_pkts++;
2115 rcvd_bytes += rx_bytes;
2116
2117
2118 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2119 rx_desc->buf_phys_addr);
2120
2121
2122 continue;
2123 }
2124
2125
2126 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2127 if (err) {
2128 netdev_err(dev, "Linux processing - Can't refill\n");
2129 rxq->missed++;
2130 goto err_drop_frame_ret_pool;
2131 }
2132
2133 frag_size = bm_pool->hwbm_pool.frag_size;
2134
2135 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2136
2137
2138
2139
2140 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2141 bm_pool->buf_size, DMA_FROM_DEVICE);
2142 if (!skb)
2143 goto err_drop_frame;
2144
2145 rcvd_pkts++;
2146 rcvd_bytes += rx_bytes;
2147
2148
2149 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2150 skb_put(skb, rx_bytes);
2151
2152 skb->protocol = eth_type_trans(skb, dev);
2153
2154 mvneta_rx_csum(pp, rx_status, skb);
2155
2156 napi_gro_receive(&port->napi, skb);
2157 }
2158
2159 if (rcvd_pkts) {
2160 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2161
2162 u64_stats_update_begin(&stats->syncp);
2163 stats->rx_packets += rcvd_pkts;
2164 stats->rx_bytes += rcvd_bytes;
2165 u64_stats_update_end(&stats->syncp);
2166 }
2167
2168
2169 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2170
2171 return rx_done;
2172}
2173
2174static inline void
2175mvneta_tso_put_hdr(struct sk_buff *skb,
2176 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2177{
2178 struct mvneta_tx_desc *tx_desc;
2179 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2180
2181 txq->tx_skb[txq->txq_put_index] = NULL;
2182 tx_desc = mvneta_txq_next_desc_get(txq);
2183 tx_desc->data_size = hdr_len;
2184 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2185 tx_desc->command |= MVNETA_TXD_F_DESC;
2186 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2187 txq->txq_put_index * TSO_HEADER_SIZE;
2188 mvneta_txq_inc_put(txq);
2189}
2190
2191static inline int
2192mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2193 struct sk_buff *skb, char *data, int size,
2194 bool last_tcp, bool is_last)
2195{
2196 struct mvneta_tx_desc *tx_desc;
2197
2198 tx_desc = mvneta_txq_next_desc_get(txq);
2199 tx_desc->data_size = size;
2200 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2201 size, DMA_TO_DEVICE);
2202 if (unlikely(dma_mapping_error(dev->dev.parent,
2203 tx_desc->buf_phys_addr))) {
2204 mvneta_txq_desc_put(txq);
2205 return -ENOMEM;
2206 }
2207
2208 tx_desc->command = 0;
2209 txq->tx_skb[txq->txq_put_index] = NULL;
2210
2211 if (last_tcp) {
2212
2213 tx_desc->command = MVNETA_TXD_L_DESC;
2214
2215
2216 if (is_last)
2217 txq->tx_skb[txq->txq_put_index] = skb;
2218 }
2219 mvneta_txq_inc_put(txq);
2220 return 0;
2221}
2222
2223static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2224 struct mvneta_tx_queue *txq)
2225{
2226 int total_len, data_left;
2227 int desc_count = 0;
2228 struct mvneta_port *pp = netdev_priv(dev);
2229 struct tso_t tso;
2230 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2231 int i;
2232
2233
2234 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2235 return 0;
2236
2237 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2238 pr_info("*** Is this even possible???!?!?\n");
2239 return 0;
2240 }
2241
2242
2243 tso_start(skb, &tso);
2244
2245 total_len = skb->len - hdr_len;
2246 while (total_len > 0) {
2247 char *hdr;
2248
2249 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2250 total_len -= data_left;
2251 desc_count++;
2252
2253
2254 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2255 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2256
2257 mvneta_tso_put_hdr(skb, pp, txq);
2258
2259 while (data_left > 0) {
2260 int size;
2261 desc_count++;
2262
2263 size = min_t(int, tso.size, data_left);
2264
2265 if (mvneta_tso_put_data(dev, txq, skb,
2266 tso.data, size,
2267 size == data_left,
2268 total_len == 0))
2269 goto err_release;
2270 data_left -= size;
2271
2272 tso_build_data(skb, &tso, size);
2273 }
2274 }
2275
2276 return desc_count;
2277
2278err_release:
2279
2280
2281
2282 for (i = desc_count - 1; i >= 0; i--) {
2283 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2284 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2285 dma_unmap_single(pp->dev->dev.parent,
2286 tx_desc->buf_phys_addr,
2287 tx_desc->data_size,
2288 DMA_TO_DEVICE);
2289 mvneta_txq_desc_put(txq);
2290 }
2291 return 0;
2292}
2293
2294
2295static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2296 struct mvneta_tx_queue *txq)
2297{
2298 struct mvneta_tx_desc *tx_desc;
2299 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2300
2301 for (i = 0; i < nr_frags; i++) {
2302 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2303 void *addr = page_address(frag->page.p) + frag->page_offset;
2304
2305 tx_desc = mvneta_txq_next_desc_get(txq);
2306 tx_desc->data_size = frag->size;
2307
2308 tx_desc->buf_phys_addr =
2309 dma_map_single(pp->dev->dev.parent, addr,
2310 tx_desc->data_size, DMA_TO_DEVICE);
2311
2312 if (dma_mapping_error(pp->dev->dev.parent,
2313 tx_desc->buf_phys_addr)) {
2314 mvneta_txq_desc_put(txq);
2315 goto error;
2316 }
2317
2318 if (i == nr_frags - 1) {
2319
2320 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2321 txq->tx_skb[txq->txq_put_index] = skb;
2322 } else {
2323
2324 tx_desc->command = 0;
2325 txq->tx_skb[txq->txq_put_index] = NULL;
2326 }
2327 mvneta_txq_inc_put(txq);
2328 }
2329
2330 return 0;
2331
2332error:
2333
2334
2335
2336 for (i = i - 1; i >= 0; i--) {
2337 tx_desc = txq->descs + i;
2338 dma_unmap_single(pp->dev->dev.parent,
2339 tx_desc->buf_phys_addr,
2340 tx_desc->data_size,
2341 DMA_TO_DEVICE);
2342 mvneta_txq_desc_put(txq);
2343 }
2344
2345 return -ENOMEM;
2346}
2347
2348
2349static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2350{
2351 struct mvneta_port *pp = netdev_priv(dev);
2352 u16 txq_id = skb_get_queue_mapping(skb);
2353 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2354 struct mvneta_tx_desc *tx_desc;
2355 int len = skb->len;
2356 int frags = 0;
2357 u32 tx_cmd;
2358
2359 if (!netif_running(dev))
2360 goto out;
2361
2362 if (skb_is_gso(skb)) {
2363 frags = mvneta_tx_tso(skb, dev, txq);
2364 goto out;
2365 }
2366
2367 frags = skb_shinfo(skb)->nr_frags + 1;
2368
2369
2370 tx_desc = mvneta_txq_next_desc_get(txq);
2371
2372 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2373
2374 tx_desc->data_size = skb_headlen(skb);
2375
2376 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2377 tx_desc->data_size,
2378 DMA_TO_DEVICE);
2379 if (unlikely(dma_mapping_error(dev->dev.parent,
2380 tx_desc->buf_phys_addr))) {
2381 mvneta_txq_desc_put(txq);
2382 frags = 0;
2383 goto out;
2384 }
2385
2386 if (frags == 1) {
2387
2388 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2389 tx_desc->command = tx_cmd;
2390 txq->tx_skb[txq->txq_put_index] = skb;
2391 mvneta_txq_inc_put(txq);
2392 } else {
2393
2394 tx_cmd |= MVNETA_TXD_F_DESC;
2395 txq->tx_skb[txq->txq_put_index] = NULL;
2396 mvneta_txq_inc_put(txq);
2397 tx_desc->command = tx_cmd;
2398
2399 if (mvneta_tx_frag_process(pp, skb, txq)) {
2400 dma_unmap_single(dev->dev.parent,
2401 tx_desc->buf_phys_addr,
2402 tx_desc->data_size,
2403 DMA_TO_DEVICE);
2404 mvneta_txq_desc_put(txq);
2405 frags = 0;
2406 goto out;
2407 }
2408 }
2409
2410out:
2411 if (frags > 0) {
2412 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2413 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2414
2415 netdev_tx_sent_queue(nq, len);
2416
2417 txq->count += frags;
2418 if (txq->count >= txq->tx_stop_threshold)
2419 netif_tx_stop_queue(nq);
2420
2421 if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2422 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2423 mvneta_txq_pend_desc_add(pp, txq, frags);
2424 else
2425 txq->pending += frags;
2426
2427 u64_stats_update_begin(&stats->syncp);
2428 stats->tx_packets++;
2429 stats->tx_bytes += len;
2430 u64_stats_update_end(&stats->syncp);
2431 } else {
2432 dev->stats.tx_dropped++;
2433 dev_kfree_skb_any(skb);
2434 }
2435
2436 return NETDEV_TX_OK;
2437}
2438
2439
2440
2441static void mvneta_txq_done_force(struct mvneta_port *pp,
2442 struct mvneta_tx_queue *txq)
2443
2444{
2445 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2446 int tx_done = txq->count;
2447
2448 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2449
2450
2451 txq->count = 0;
2452 txq->txq_put_index = 0;
2453 txq->txq_get_index = 0;
2454}
2455
2456
2457
2458
2459static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2460{
2461 struct mvneta_tx_queue *txq;
2462 struct netdev_queue *nq;
2463
2464 while (cause_tx_done) {
2465 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2466
2467 nq = netdev_get_tx_queue(pp->dev, txq->id);
2468 __netif_tx_lock(nq, smp_processor_id());
2469
2470 if (txq->count)
2471 mvneta_txq_done(pp, txq);
2472
2473 __netif_tx_unlock(nq);
2474 cause_tx_done &= ~((1 << txq->id));
2475 }
2476}
2477
2478
2479
2480
2481static int mvneta_addr_crc(unsigned char *addr)
2482{
2483 int crc = 0;
2484 int i;
2485
2486 for (i = 0; i < ETH_ALEN; i++) {
2487 int j;
2488
2489 crc = (crc ^ addr[i]) << 8;
2490 for (j = 7; j >= 0; j--) {
2491 if (crc & (0x100 << j))
2492 crc ^= 0x107 << j;
2493 }
2494 }
2495
2496 return crc;
2497}
2498
2499
2500
2501
2502
2503
2504
2505
2506static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2507 unsigned char last_byte,
2508 int queue)
2509{
2510 unsigned int smc_table_reg;
2511 unsigned int tbl_offset;
2512 unsigned int reg_offset;
2513
2514
2515 tbl_offset = (last_byte / 4);
2516
2517 reg_offset = last_byte % 4;
2518
2519 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2520 + tbl_offset * 4));
2521
2522 if (queue == -1)
2523 smc_table_reg &= ~(0xff << (8 * reg_offset));
2524 else {
2525 smc_table_reg &= ~(0xff << (8 * reg_offset));
2526 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2527 }
2528
2529 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2530 smc_table_reg);
2531}
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2542 unsigned char crc8,
2543 int queue)
2544{
2545 unsigned int omc_table_reg;
2546 unsigned int tbl_offset;
2547 unsigned int reg_offset;
2548
2549 tbl_offset = (crc8 / 4) * 4;
2550 reg_offset = crc8 % 4;
2551
2552 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2553
2554 if (queue == -1) {
2555
2556 omc_table_reg &= ~(0xff << (8 * reg_offset));
2557 } else {
2558 omc_table_reg &= ~(0xff << (8 * reg_offset));
2559 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2560 }
2561
2562 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2575 int queue)
2576{
2577 unsigned char crc_result = 0;
2578
2579 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2580 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2581 return 0;
2582 }
2583
2584 crc_result = mvneta_addr_crc(p_addr);
2585 if (queue == -1) {
2586 if (pp->mcast_count[crc_result] == 0) {
2587 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2588 crc_result);
2589 return -EINVAL;
2590 }
2591
2592 pp->mcast_count[crc_result]--;
2593 if (pp->mcast_count[crc_result] != 0) {
2594 netdev_info(pp->dev,
2595 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2596 pp->mcast_count[crc_result], crc_result);
2597 return -EINVAL;
2598 }
2599 } else
2600 pp->mcast_count[crc_result]++;
2601
2602 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2603
2604 return 0;
2605}
2606
2607
2608static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2609 int is_promisc)
2610{
2611 u32 port_cfg_reg, val;
2612
2613 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2614
2615 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2616
2617
2618 if (is_promisc) {
2619
2620 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2621 val |= MVNETA_FORCE_UNI;
2622 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2623 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2624 } else {
2625
2626 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2627 val &= ~MVNETA_FORCE_UNI;
2628 }
2629
2630 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2631 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2632}
2633
2634
2635static void mvneta_set_rx_mode(struct net_device *dev)
2636{
2637 struct mvneta_port *pp = netdev_priv(dev);
2638 struct netdev_hw_addr *ha;
2639
2640 if (dev->flags & IFF_PROMISC) {
2641
2642 mvneta_rx_unicast_promisc_set(pp, 1);
2643 mvneta_set_ucast_table(pp, pp->rxq_def);
2644 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2645 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2646 } else {
2647
2648 mvneta_rx_unicast_promisc_set(pp, 0);
2649 mvneta_set_ucast_table(pp, -1);
2650 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2651
2652 if (dev->flags & IFF_ALLMULTI) {
2653
2654 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2655 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2656 } else {
2657
2658 mvneta_set_special_mcast_table(pp, -1);
2659 mvneta_set_other_mcast_table(pp, -1);
2660
2661 if (!netdev_mc_empty(dev)) {
2662 netdev_for_each_mc_addr(ha, dev) {
2663 mvneta_mcast_addr_set(pp, ha->addr,
2664 pp->rxq_def);
2665 }
2666 }
2667 }
2668 }
2669}
2670
2671
2672static irqreturn_t mvneta_isr(int irq, void *dev_id)
2673{
2674 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2675
2676 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2677 napi_schedule(&pp->napi);
2678
2679 return IRQ_HANDLED;
2680}
2681
2682
2683static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2684{
2685 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2686
2687 disable_percpu_irq(port->pp->dev->irq);
2688 napi_schedule(&port->napi);
2689
2690 return IRQ_HANDLED;
2691}
2692
2693static int mvneta_fixed_link_update(struct mvneta_port *pp,
2694 struct phy_device *phy)
2695{
2696 struct fixed_phy_status status;
2697 struct fixed_phy_status changed = {};
2698 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2699
2700 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2701 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2702 status.speed = SPEED_1000;
2703 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2704 status.speed = SPEED_100;
2705 else
2706 status.speed = SPEED_10;
2707 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2708 changed.link = 1;
2709 changed.speed = 1;
2710 changed.duplex = 1;
2711 fixed_phy_update_state(phy, &status, &changed);
2712 return 0;
2713}
2714
2715
2716
2717
2718
2719
2720
2721
2722static int mvneta_poll(struct napi_struct *napi, int budget)
2723{
2724 int rx_done = 0;
2725 u32 cause_rx_tx;
2726 int rx_queue;
2727 struct mvneta_port *pp = netdev_priv(napi->dev);
2728 struct net_device *ndev = pp->dev;
2729 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2730
2731 if (!netif_running(pp->dev)) {
2732 napi_complete(napi);
2733 return rx_done;
2734 }
2735
2736
2737 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2738 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2739 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2740
2741 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2742 if (pp->use_inband_status && (cause_misc &
2743 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2744 MVNETA_CAUSE_LINK_CHANGE |
2745 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2746 mvneta_fixed_link_update(pp, ndev->phydev);
2747 }
2748 }
2749
2750
2751 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2752 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2753 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2754 }
2755
2756
2757
2758
2759 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2760
2761 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2762 port->cause_rx_tx;
2763
2764 if (rx_queue) {
2765 rx_queue = rx_queue - 1;
2766 if (pp->bm_priv)
2767 rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
2768 else
2769 rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
2770 }
2771
2772 if (rx_done < budget) {
2773 cause_rx_tx = 0;
2774 napi_complete_done(napi, rx_done);
2775
2776 if (pp->neta_armada3700) {
2777 unsigned long flags;
2778
2779 local_irq_save(flags);
2780 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2781 MVNETA_RX_INTR_MASK(rxq_number) |
2782 MVNETA_TX_INTR_MASK(txq_number) |
2783 MVNETA_MISCINTR_INTR_MASK);
2784 local_irq_restore(flags);
2785 } else {
2786 enable_percpu_irq(pp->dev->irq, 0);
2787 }
2788 }
2789
2790 if (pp->neta_armada3700)
2791 pp->cause_rx_tx = cause_rx_tx;
2792 else
2793 port->cause_rx_tx = cause_rx_tx;
2794
2795 return rx_done;
2796}
2797
2798
2799static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2800 int num)
2801{
2802 int i;
2803
2804 for (i = 0; i < num; i++) {
2805 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2806 if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
2807 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2808 __func__, rxq->id, i, num);
2809 break;
2810 }
2811 }
2812
2813
2814
2815
2816 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2817
2818 return i;
2819}
2820
2821
2822static void mvneta_tx_reset(struct mvneta_port *pp)
2823{
2824 int queue;
2825
2826
2827 for (queue = 0; queue < txq_number; queue++)
2828 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2829
2830 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2831 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2832}
2833
2834static void mvneta_rx_reset(struct mvneta_port *pp)
2835{
2836 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2837 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2838}
2839
2840
2841
2842
2843static int mvneta_rxq_init(struct mvneta_port *pp,
2844 struct mvneta_rx_queue *rxq)
2845
2846{
2847 rxq->size = pp->rx_ring_size;
2848
2849
2850 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2851 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2852 &rxq->descs_phys, GFP_KERNEL);
2853 if (!rxq->descs)
2854 return -ENOMEM;
2855
2856 rxq->last_desc = rxq->size - 1;
2857
2858
2859 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2860 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2861
2862
2863 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
2864
2865
2866 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2867 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2868
2869 if (!pp->bm_priv) {
2870
2871 mvneta_rxq_buf_size_set(pp, rxq,
2872 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2873 mvneta_rxq_bm_disable(pp, rxq);
2874 mvneta_rxq_fill(pp, rxq, rxq->size);
2875 } else {
2876 mvneta_rxq_bm_enable(pp, rxq);
2877 mvneta_rxq_long_pool_set(pp, rxq);
2878 mvneta_rxq_short_pool_set(pp, rxq);
2879 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2880 }
2881
2882 return 0;
2883}
2884
2885
2886static void mvneta_rxq_deinit(struct mvneta_port *pp,
2887 struct mvneta_rx_queue *rxq)
2888{
2889 mvneta_rxq_drop_pkts(pp, rxq);
2890
2891 if (rxq->descs)
2892 dma_free_coherent(pp->dev->dev.parent,
2893 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2894 rxq->descs,
2895 rxq->descs_phys);
2896
2897 rxq->descs = NULL;
2898 rxq->last_desc = 0;
2899 rxq->next_desc_to_proc = 0;
2900 rxq->descs_phys = 0;
2901}
2902
2903
2904static int mvneta_txq_init(struct mvneta_port *pp,
2905 struct mvneta_tx_queue *txq)
2906{
2907 int cpu;
2908
2909 txq->size = pp->tx_ring_size;
2910
2911
2912
2913
2914
2915 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2916 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2917
2918
2919
2920 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2921 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2922 &txq->descs_phys, GFP_KERNEL);
2923 if (!txq->descs)
2924 return -ENOMEM;
2925
2926 txq->last_desc = txq->size - 1;
2927
2928
2929 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2930 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2931
2932
2933 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2934 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2935
2936 txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
2937 GFP_KERNEL);
2938 if (!txq->tx_skb) {
2939 dma_free_coherent(pp->dev->dev.parent,
2940 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2941 txq->descs, txq->descs_phys);
2942 return -ENOMEM;
2943 }
2944
2945
2946 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2947 txq->size * TSO_HEADER_SIZE,
2948 &txq->tso_hdrs_phys, GFP_KERNEL);
2949 if (!txq->tso_hdrs) {
2950 kfree(txq->tx_skb);
2951 dma_free_coherent(pp->dev->dev.parent,
2952 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2953 txq->descs, txq->descs_phys);
2954 return -ENOMEM;
2955 }
2956 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2957
2958
2959 if (txq_number > 1)
2960 cpu = txq->id % num_present_cpus();
2961 else
2962 cpu = pp->rxq_def % num_present_cpus();
2963 cpumask_set_cpu(cpu, &txq->affinity_mask);
2964 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2965
2966 return 0;
2967}
2968
2969
2970static void mvneta_txq_deinit(struct mvneta_port *pp,
2971 struct mvneta_tx_queue *txq)
2972{
2973 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2974
2975 kfree(txq->tx_skb);
2976
2977 if (txq->tso_hdrs)
2978 dma_free_coherent(pp->dev->dev.parent,
2979 txq->size * TSO_HEADER_SIZE,
2980 txq->tso_hdrs, txq->tso_hdrs_phys);
2981 if (txq->descs)
2982 dma_free_coherent(pp->dev->dev.parent,
2983 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2984 txq->descs, txq->descs_phys);
2985
2986 netdev_tx_reset_queue(nq);
2987
2988 txq->descs = NULL;
2989 txq->last_desc = 0;
2990 txq->next_desc_to_proc = 0;
2991 txq->descs_phys = 0;
2992
2993
2994 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2995 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2996
2997
2998 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2999 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3000}
3001
3002
3003static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3004{
3005 int queue;
3006
3007 for (queue = 0; queue < txq_number; queue++)
3008 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3009}
3010
3011
3012static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3013{
3014 int queue;
3015
3016 for (queue = 0; queue < txq_number; queue++)
3017 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3018}
3019
3020
3021
3022static int mvneta_setup_rxqs(struct mvneta_port *pp)
3023{
3024 int queue;
3025
3026 for (queue = 0; queue < rxq_number; queue++) {
3027 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3028
3029 if (err) {
3030 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3031 __func__, queue);
3032 mvneta_cleanup_rxqs(pp);
3033 return err;
3034 }
3035 }
3036
3037 return 0;
3038}
3039
3040
3041static int mvneta_setup_txqs(struct mvneta_port *pp)
3042{
3043 int queue;
3044
3045 for (queue = 0; queue < txq_number; queue++) {
3046 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3047 if (err) {
3048 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3049 __func__, queue);
3050 mvneta_cleanup_txqs(pp);
3051 return err;
3052 }
3053 }
3054
3055 return 0;
3056}
3057
3058static void mvneta_start_dev(struct mvneta_port *pp)
3059{
3060 int cpu;
3061 struct net_device *ndev = pp->dev;
3062
3063 mvneta_max_rx_size_set(pp, pp->pkt_size);
3064 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3065
3066
3067 mvneta_port_enable(pp);
3068
3069 if (!pp->neta_armada3700) {
3070
3071 for_each_online_cpu(cpu) {
3072 struct mvneta_pcpu_port *port =
3073 per_cpu_ptr(pp->ports, cpu);
3074
3075 napi_enable(&port->napi);
3076 }
3077 } else {
3078 napi_enable(&pp->napi);
3079 }
3080
3081
3082 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3083
3084 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3085 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3086 MVNETA_CAUSE_LINK_CHANGE |
3087 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3088
3089 phy_start(ndev->phydev);
3090 netif_tx_start_all_queues(pp->dev);
3091}
3092
3093static void mvneta_stop_dev(struct mvneta_port *pp)
3094{
3095 unsigned int cpu;
3096 struct net_device *ndev = pp->dev;
3097
3098 phy_stop(ndev->phydev);
3099
3100 if (!pp->neta_armada3700) {
3101 for_each_online_cpu(cpu) {
3102 struct mvneta_pcpu_port *port =
3103 per_cpu_ptr(pp->ports, cpu);
3104
3105 napi_disable(&port->napi);
3106 }
3107 } else {
3108 napi_disable(&pp->napi);
3109 }
3110
3111 netif_carrier_off(pp->dev);
3112
3113 mvneta_port_down(pp);
3114 netif_tx_stop_all_queues(pp->dev);
3115
3116
3117 mvneta_port_disable(pp);
3118
3119
3120 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3121
3122
3123 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3124
3125 mvneta_tx_reset(pp);
3126 mvneta_rx_reset(pp);
3127}
3128
3129static void mvneta_percpu_enable(void *arg)
3130{
3131 struct mvneta_port *pp = arg;
3132
3133 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3134}
3135
3136static void mvneta_percpu_disable(void *arg)
3137{
3138 struct mvneta_port *pp = arg;
3139
3140 disable_percpu_irq(pp->dev->irq);
3141}
3142
3143
3144static int mvneta_change_mtu(struct net_device *dev, int mtu)
3145{
3146 struct mvneta_port *pp = netdev_priv(dev);
3147 int ret;
3148
3149 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3150 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3151 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3152 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3153 }
3154
3155 dev->mtu = mtu;
3156
3157 if (!netif_running(dev)) {
3158 if (pp->bm_priv)
3159 mvneta_bm_update_mtu(pp, mtu);
3160
3161 netdev_update_features(dev);
3162 return 0;
3163 }
3164
3165
3166
3167
3168 mvneta_stop_dev(pp);
3169 on_each_cpu(mvneta_percpu_disable, pp, true);
3170
3171 mvneta_cleanup_txqs(pp);
3172 mvneta_cleanup_rxqs(pp);
3173
3174 if (pp->bm_priv)
3175 mvneta_bm_update_mtu(pp, mtu);
3176
3177 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3178 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3179 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3180
3181 ret = mvneta_setup_rxqs(pp);
3182 if (ret) {
3183 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3184 return ret;
3185 }
3186
3187 ret = mvneta_setup_txqs(pp);
3188 if (ret) {
3189 netdev_err(dev, "unable to setup txqs after MTU change\n");
3190 return ret;
3191 }
3192
3193 on_each_cpu(mvneta_percpu_enable, pp, true);
3194 mvneta_start_dev(pp);
3195 mvneta_port_up(pp);
3196
3197 netdev_update_features(dev);
3198
3199 return 0;
3200}
3201
3202static netdev_features_t mvneta_fix_features(struct net_device *dev,
3203 netdev_features_t features)
3204{
3205 struct mvneta_port *pp = netdev_priv(dev);
3206
3207 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3208 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3209 netdev_info(dev,
3210 "Disable IP checksum for MTU greater than %dB\n",
3211 pp->tx_csum_limit);
3212 }
3213
3214 return features;
3215}
3216
3217
3218static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3219{
3220 u32 mac_addr_l, mac_addr_h;
3221
3222 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3223 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3224 addr[0] = (mac_addr_h >> 24) & 0xFF;
3225 addr[1] = (mac_addr_h >> 16) & 0xFF;
3226 addr[2] = (mac_addr_h >> 8) & 0xFF;
3227 addr[3] = mac_addr_h & 0xFF;
3228 addr[4] = (mac_addr_l >> 8) & 0xFF;
3229 addr[5] = mac_addr_l & 0xFF;
3230}
3231
3232
3233static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3234{
3235 struct mvneta_port *pp = netdev_priv(dev);
3236 struct sockaddr *sockaddr = addr;
3237 int ret;
3238
3239 ret = eth_prepare_mac_addr_change(dev, addr);
3240 if (ret < 0)
3241 return ret;
3242
3243 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3244
3245
3246 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3247
3248 eth_commit_mac_addr_change(dev, addr);
3249 return 0;
3250}
3251
3252static void mvneta_adjust_link(struct net_device *ndev)
3253{
3254 struct mvneta_port *pp = netdev_priv(ndev);
3255 struct phy_device *phydev = ndev->phydev;
3256 int status_change = 0;
3257
3258 if (phydev->link) {
3259 if ((pp->speed != phydev->speed) ||
3260 (pp->duplex != phydev->duplex)) {
3261 u32 val;
3262
3263 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3264 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3265 MVNETA_GMAC_CONFIG_GMII_SPEED |
3266 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3267
3268 if (phydev->duplex)
3269 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3270
3271 if (phydev->speed == SPEED_1000)
3272 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3273 else if (phydev->speed == SPEED_100)
3274 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3275
3276 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3277
3278 pp->duplex = phydev->duplex;
3279 pp->speed = phydev->speed;
3280 }
3281 }
3282
3283 if (phydev->link != pp->link) {
3284 if (!phydev->link) {
3285 pp->duplex = -1;
3286 pp->speed = 0;
3287 }
3288
3289 pp->link = phydev->link;
3290 status_change = 1;
3291 }
3292
3293 if (status_change) {
3294 if (phydev->link) {
3295 if (!pp->use_inband_status) {
3296 u32 val = mvreg_read(pp,
3297 MVNETA_GMAC_AUTONEG_CONFIG);
3298 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3299 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3300 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3301 val);
3302 }
3303 mvneta_port_up(pp);
3304 } else {
3305 if (!pp->use_inband_status) {
3306 u32 val = mvreg_read(pp,
3307 MVNETA_GMAC_AUTONEG_CONFIG);
3308 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3309 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3310 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3311 val);
3312 }
3313 mvneta_port_down(pp);
3314 }
3315 phy_print_status(phydev);
3316 }
3317}
3318
3319static int mvneta_mdio_probe(struct mvneta_port *pp)
3320{
3321 struct phy_device *phy_dev;
3322 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3323
3324 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
3325 pp->phy_interface);
3326 if (!phy_dev) {
3327 netdev_err(pp->dev, "could not find the PHY\n");
3328 return -ENODEV;
3329 }
3330
3331 phy_ethtool_get_wol(phy_dev, &wol);
3332 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3333
3334 phy_dev->supported &= PHY_GBIT_FEATURES;
3335 phy_dev->advertising = phy_dev->supported;
3336
3337 pp->link = 0;
3338 pp->duplex = 0;
3339 pp->speed = 0;
3340
3341 return 0;
3342}
3343
3344static void mvneta_mdio_remove(struct mvneta_port *pp)
3345{
3346 struct net_device *ndev = pp->dev;
3347
3348 phy_disconnect(ndev->phydev);
3349}
3350
3351
3352
3353
3354
3355static void mvneta_percpu_elect(struct mvneta_port *pp)
3356{
3357 int elected_cpu = 0, max_cpu, cpu, i = 0;
3358
3359
3360
3361
3362 if (cpu_online(pp->rxq_def))
3363 elected_cpu = pp->rxq_def;
3364
3365 max_cpu = num_present_cpus();
3366
3367 for_each_online_cpu(cpu) {
3368 int rxq_map = 0, txq_map = 0;
3369 int rxq;
3370
3371 for (rxq = 0; rxq < rxq_number; rxq++)
3372 if ((rxq % max_cpu) == cpu)
3373 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3374
3375 if (cpu == elected_cpu)
3376
3377
3378
3379 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3380
3381
3382
3383
3384
3385 if (txq_number == 1)
3386 txq_map = (cpu == elected_cpu) ?
3387 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3388 else
3389 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3390 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3391
3392 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3393
3394
3395
3396
3397 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3398 pp, true);
3399 i++;
3400
3401 }
3402};
3403
3404static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3405{
3406 int other_cpu;
3407 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3408 node_online);
3409 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3410
3411
3412 spin_lock(&pp->lock);
3413
3414
3415
3416
3417 if (pp->is_stopped) {
3418 spin_unlock(&pp->lock);
3419 return 0;
3420 }
3421 netif_tx_stop_all_queues(pp->dev);
3422
3423
3424
3425
3426
3427 for_each_online_cpu(other_cpu) {
3428 if (other_cpu != cpu) {
3429 struct mvneta_pcpu_port *other_port =
3430 per_cpu_ptr(pp->ports, other_cpu);
3431
3432 napi_synchronize(&other_port->napi);
3433 }
3434 }
3435
3436
3437 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3438 napi_enable(&port->napi);
3439
3440
3441
3442
3443
3444 mvneta_percpu_enable(pp);
3445
3446
3447
3448
3449
3450 mvneta_percpu_elect(pp);
3451
3452
3453 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3454 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3455 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3456 MVNETA_CAUSE_LINK_CHANGE |
3457 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3458 netif_tx_start_all_queues(pp->dev);
3459 spin_unlock(&pp->lock);
3460 return 0;
3461}
3462
3463static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3464{
3465 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3466 node_online);
3467 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3468
3469
3470
3471
3472
3473 spin_lock(&pp->lock);
3474
3475 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3476 spin_unlock(&pp->lock);
3477
3478 napi_synchronize(&port->napi);
3479 napi_disable(&port->napi);
3480
3481 mvneta_percpu_disable(pp);
3482 return 0;
3483}
3484
3485static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3486{
3487 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3488 node_dead);
3489
3490
3491 spin_lock(&pp->lock);
3492 mvneta_percpu_elect(pp);
3493 spin_unlock(&pp->lock);
3494
3495 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3496 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3497 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3498 MVNETA_CAUSE_LINK_CHANGE |
3499 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3500 netif_tx_start_all_queues(pp->dev);
3501 return 0;
3502}
3503
3504static int mvneta_open(struct net_device *dev)
3505{
3506 struct mvneta_port *pp = netdev_priv(dev);
3507 int ret;
3508
3509 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3510 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3511 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3512
3513 ret = mvneta_setup_rxqs(pp);
3514 if (ret)
3515 return ret;
3516
3517 ret = mvneta_setup_txqs(pp);
3518 if (ret)
3519 goto err_cleanup_rxqs;
3520
3521
3522 if (pp->neta_armada3700)
3523 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3524 dev->name, pp);
3525 else
3526 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3527 dev->name, pp->ports);
3528 if (ret) {
3529 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3530 goto err_cleanup_txqs;
3531 }
3532
3533 if (!pp->neta_armada3700) {
3534
3535
3536
3537 on_each_cpu(mvneta_percpu_enable, pp, true);
3538
3539 pp->is_stopped = false;
3540
3541
3542
3543 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3544 &pp->node_online);
3545 if (ret)
3546 goto err_free_irq;
3547
3548 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3549 &pp->node_dead);
3550 if (ret)
3551 goto err_free_online_hp;
3552 }
3553
3554
3555 netif_carrier_off(pp->dev);
3556
3557 ret = mvneta_mdio_probe(pp);
3558 if (ret < 0) {
3559 netdev_err(dev, "cannot probe MDIO bus\n");
3560 goto err_free_dead_hp;
3561 }
3562
3563 mvneta_start_dev(pp);
3564
3565 return 0;
3566
3567err_free_dead_hp:
3568 if (!pp->neta_armada3700)
3569 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3570 &pp->node_dead);
3571err_free_online_hp:
3572 if (!pp->neta_armada3700)
3573 cpuhp_state_remove_instance_nocalls(online_hpstate,
3574 &pp->node_online);
3575err_free_irq:
3576 if (pp->neta_armada3700) {
3577 free_irq(pp->dev->irq, pp);
3578 } else {
3579 on_each_cpu(mvneta_percpu_disable, pp, true);
3580 free_percpu_irq(pp->dev->irq, pp->ports);
3581 }
3582err_cleanup_txqs:
3583 mvneta_cleanup_txqs(pp);
3584err_cleanup_rxqs:
3585 mvneta_cleanup_rxqs(pp);
3586 return ret;
3587}
3588
3589
3590static int mvneta_stop(struct net_device *dev)
3591{
3592 struct mvneta_port *pp = netdev_priv(dev);
3593
3594 if (!pp->neta_armada3700) {
3595
3596
3597
3598
3599
3600 spin_lock(&pp->lock);
3601 pp->is_stopped = true;
3602 spin_unlock(&pp->lock);
3603
3604 mvneta_stop_dev(pp);
3605 mvneta_mdio_remove(pp);
3606
3607 cpuhp_state_remove_instance_nocalls(online_hpstate,
3608 &pp->node_online);
3609 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3610 &pp->node_dead);
3611 on_each_cpu(mvneta_percpu_disable, pp, true);
3612 free_percpu_irq(dev->irq, pp->ports);
3613 } else {
3614 mvneta_stop_dev(pp);
3615 mvneta_mdio_remove(pp);
3616 free_irq(dev->irq, pp);
3617 }
3618
3619 mvneta_cleanup_rxqs(pp);
3620 mvneta_cleanup_txqs(pp);
3621
3622 return 0;
3623}
3624
3625static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3626{
3627 if (!dev->phydev)
3628 return -ENOTSUPP;
3629
3630 return phy_mii_ioctl(dev->phydev, ifr, cmd);
3631}
3632
3633
3634
3635
3636static int
3637mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3638 const struct ethtool_link_ksettings *cmd)
3639{
3640 struct mvneta_port *pp = netdev_priv(ndev);
3641 struct phy_device *phydev = ndev->phydev;
3642
3643 if (!phydev)
3644 return -ENODEV;
3645
3646 if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3647 u32 val;
3648
3649 mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
3650
3651 if (cmd->base.autoneg == AUTONEG_DISABLE) {
3652 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3653 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3654 MVNETA_GMAC_CONFIG_GMII_SPEED |
3655 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3656
3657 if (phydev->duplex)
3658 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3659
3660 if (phydev->speed == SPEED_1000)
3661 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3662 else if (phydev->speed == SPEED_100)
3663 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3664
3665 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3666 }
3667
3668 pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
3669 netdev_info(pp->dev, "autoneg status set to %i\n",
3670 pp->use_inband_status);
3671
3672 if (netif_running(ndev)) {
3673 mvneta_port_down(pp);
3674 mvneta_port_up(pp);
3675 }
3676 }
3677
3678 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
3679}
3680
3681
3682static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3683 struct ethtool_coalesce *c)
3684{
3685 struct mvneta_port *pp = netdev_priv(dev);
3686 int queue;
3687
3688 for (queue = 0; queue < rxq_number; queue++) {
3689 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3690 rxq->time_coal = c->rx_coalesce_usecs;
3691 rxq->pkts_coal = c->rx_max_coalesced_frames;
3692 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3693 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3694 }
3695
3696 for (queue = 0; queue < txq_number; queue++) {
3697 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3698 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3699 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3700 }
3701
3702 return 0;
3703}
3704
3705
3706static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3707 struct ethtool_coalesce *c)
3708{
3709 struct mvneta_port *pp = netdev_priv(dev);
3710
3711 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3712 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3713
3714 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3715 return 0;
3716}
3717
3718
3719static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3720 struct ethtool_drvinfo *drvinfo)
3721{
3722 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3723 sizeof(drvinfo->driver));
3724 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3725 sizeof(drvinfo->version));
3726 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3727 sizeof(drvinfo->bus_info));
3728}
3729
3730
3731static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3732 struct ethtool_ringparam *ring)
3733{
3734 struct mvneta_port *pp = netdev_priv(netdev);
3735
3736 ring->rx_max_pending = MVNETA_MAX_RXD;
3737 ring->tx_max_pending = MVNETA_MAX_TXD;
3738 ring->rx_pending = pp->rx_ring_size;
3739 ring->tx_pending = pp->tx_ring_size;
3740}
3741
3742static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3743 struct ethtool_ringparam *ring)
3744{
3745 struct mvneta_port *pp = netdev_priv(dev);
3746
3747 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3748 return -EINVAL;
3749 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3750 ring->rx_pending : MVNETA_MAX_RXD;
3751
3752 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3753 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3754 if (pp->tx_ring_size != ring->tx_pending)
3755 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3756 pp->tx_ring_size, ring->tx_pending);
3757
3758 if (netif_running(dev)) {
3759 mvneta_stop(dev);
3760 if (mvneta_open(dev)) {
3761 netdev_err(dev,
3762 "error on opening device after ring param change\n");
3763 return -ENOMEM;
3764 }
3765 }
3766
3767 return 0;
3768}
3769
3770static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3771 u8 *data)
3772{
3773 if (sset == ETH_SS_STATS) {
3774 int i;
3775
3776 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3777 memcpy(data + i * ETH_GSTRING_LEN,
3778 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3779 }
3780}
3781
3782static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3783{
3784 const struct mvneta_statistic *s;
3785 void __iomem *base = pp->base;
3786 u32 high, low, val;
3787 u64 val64;
3788 int i;
3789
3790 for (i = 0, s = mvneta_statistics;
3791 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3792 s++, i++) {
3793 switch (s->type) {
3794 case T_REG_32:
3795 val = readl_relaxed(base + s->offset);
3796 pp->ethtool_stats[i] += val;
3797 break;
3798 case T_REG_64:
3799
3800 low = readl_relaxed(base + s->offset);
3801 high = readl_relaxed(base + s->offset + 4);
3802 val64 = (u64)high << 32 | low;
3803 pp->ethtool_stats[i] += val64;
3804 break;
3805 }
3806 }
3807}
3808
3809static void mvneta_ethtool_get_stats(struct net_device *dev,
3810 struct ethtool_stats *stats, u64 *data)
3811{
3812 struct mvneta_port *pp = netdev_priv(dev);
3813 int i;
3814
3815 mvneta_ethtool_update_stats(pp);
3816
3817 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3818 *data++ = pp->ethtool_stats[i];
3819}
3820
3821static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3822{
3823 if (sset == ETH_SS_STATS)
3824 return ARRAY_SIZE(mvneta_statistics);
3825 return -EOPNOTSUPP;
3826}
3827
3828static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3829{
3830 return MVNETA_RSS_LU_TABLE_SIZE;
3831}
3832
3833static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3834 struct ethtool_rxnfc *info,
3835 u32 *rules __always_unused)
3836{
3837 switch (info->cmd) {
3838 case ETHTOOL_GRXRINGS:
3839 info->data = rxq_number;
3840 return 0;
3841 case ETHTOOL_GRXFH:
3842 return -EOPNOTSUPP;
3843 default:
3844 return -EOPNOTSUPP;
3845 }
3846}
3847
3848static int mvneta_config_rss(struct mvneta_port *pp)
3849{
3850 int cpu;
3851 u32 val;
3852
3853 netif_tx_stop_all_queues(pp->dev);
3854
3855 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3856
3857
3858 for_each_online_cpu(cpu) {
3859 struct mvneta_pcpu_port *pcpu_port =
3860 per_cpu_ptr(pp->ports, cpu);
3861
3862 napi_synchronize(&pcpu_port->napi);
3863 napi_disable(&pcpu_port->napi);
3864 }
3865
3866 pp->rxq_def = pp->indir[0];
3867
3868
3869 mvneta_set_rx_mode(pp->dev);
3870
3871
3872 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3873 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3874
3875
3876 spin_lock(&pp->lock);
3877 mvneta_percpu_elect(pp);
3878 spin_unlock(&pp->lock);
3879
3880
3881 for_each_online_cpu(cpu) {
3882 struct mvneta_pcpu_port *pcpu_port =
3883 per_cpu_ptr(pp->ports, cpu);
3884
3885 napi_enable(&pcpu_port->napi);
3886 }
3887
3888 netif_tx_start_all_queues(pp->dev);
3889
3890 return 0;
3891}
3892
3893static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3894 const u8 *key, const u8 hfunc)
3895{
3896 struct mvneta_port *pp = netdev_priv(dev);
3897
3898
3899 if (pp->neta_armada3700)
3900 return -EOPNOTSUPP;
3901
3902
3903
3904
3905 if (key ||
3906 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3907 return -EOPNOTSUPP;
3908
3909 if (!indir)
3910 return 0;
3911
3912 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3913
3914 return mvneta_config_rss(pp);
3915}
3916
3917static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3918 u8 *hfunc)
3919{
3920 struct mvneta_port *pp = netdev_priv(dev);
3921
3922
3923 if (pp->neta_armada3700)
3924 return -EOPNOTSUPP;
3925
3926 if (hfunc)
3927 *hfunc = ETH_RSS_HASH_TOP;
3928
3929 if (!indir)
3930 return 0;
3931
3932 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3933
3934 return 0;
3935}
3936
3937static void mvneta_ethtool_get_wol(struct net_device *dev,
3938 struct ethtool_wolinfo *wol)
3939{
3940 wol->supported = 0;
3941 wol->wolopts = 0;
3942
3943 if (dev->phydev)
3944 phy_ethtool_get_wol(dev->phydev, wol);
3945}
3946
3947static int mvneta_ethtool_set_wol(struct net_device *dev,
3948 struct ethtool_wolinfo *wol)
3949{
3950 int ret;
3951
3952 if (!dev->phydev)
3953 return -EOPNOTSUPP;
3954
3955 ret = phy_ethtool_set_wol(dev->phydev, wol);
3956 if (!ret)
3957 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
3958
3959 return ret;
3960}
3961
3962static const struct net_device_ops mvneta_netdev_ops = {
3963 .ndo_open = mvneta_open,
3964 .ndo_stop = mvneta_stop,
3965 .ndo_start_xmit = mvneta_tx,
3966 .ndo_set_rx_mode = mvneta_set_rx_mode,
3967 .ndo_set_mac_address = mvneta_set_mac_addr,
3968 .ndo_change_mtu = mvneta_change_mtu,
3969 .ndo_fix_features = mvneta_fix_features,
3970 .ndo_get_stats64 = mvneta_get_stats64,
3971 .ndo_do_ioctl = mvneta_ioctl,
3972};
3973
3974static const struct ethtool_ops mvneta_eth_tool_ops = {
3975 .nway_reset = phy_ethtool_nway_reset,
3976 .get_link = ethtool_op_get_link,
3977 .set_coalesce = mvneta_ethtool_set_coalesce,
3978 .get_coalesce = mvneta_ethtool_get_coalesce,
3979 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3980 .get_ringparam = mvneta_ethtool_get_ringparam,
3981 .set_ringparam = mvneta_ethtool_set_ringparam,
3982 .get_strings = mvneta_ethtool_get_strings,
3983 .get_ethtool_stats = mvneta_ethtool_get_stats,
3984 .get_sset_count = mvneta_ethtool_get_sset_count,
3985 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3986 .get_rxnfc = mvneta_ethtool_get_rxnfc,
3987 .get_rxfh = mvneta_ethtool_get_rxfh,
3988 .set_rxfh = mvneta_ethtool_set_rxfh,
3989 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3990 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
3991 .get_wol = mvneta_ethtool_get_wol,
3992 .set_wol = mvneta_ethtool_set_wol,
3993};
3994
3995
3996static int mvneta_init(struct device *dev, struct mvneta_port *pp)
3997{
3998 int queue;
3999
4000
4001 mvneta_port_disable(pp);
4002
4003
4004 mvneta_defaults_set(pp);
4005
4006 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4007 if (!pp->txqs)
4008 return -ENOMEM;
4009
4010
4011 for (queue = 0; queue < txq_number; queue++) {
4012 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4013 txq->id = queue;
4014 txq->size = pp->tx_ring_size;
4015 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4016 }
4017
4018 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4019 if (!pp->rxqs)
4020 return -ENOMEM;
4021
4022
4023 for (queue = 0; queue < rxq_number; queue++) {
4024 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4025 rxq->id = queue;
4026 rxq->size = pp->rx_ring_size;
4027 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4028 rxq->time_coal = MVNETA_RX_COAL_USEC;
4029 rxq->buf_virt_addr
4030 = devm_kmalloc_array(pp->dev->dev.parent,
4031 rxq->size,
4032 sizeof(*rxq->buf_virt_addr),
4033 GFP_KERNEL);
4034 if (!rxq->buf_virt_addr)
4035 return -ENOMEM;
4036 }
4037
4038 return 0;
4039}
4040
4041
4042static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4043 const struct mbus_dram_target_info *dram)
4044{
4045 u32 win_enable;
4046 u32 win_protect;
4047 int i;
4048
4049 for (i = 0; i < 6; i++) {
4050 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4051 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4052
4053 if (i < 4)
4054 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4055 }
4056
4057 win_enable = 0x3f;
4058 win_protect = 0;
4059
4060 if (dram) {
4061 for (i = 0; i < dram->num_cs; i++) {
4062 const struct mbus_dram_window *cs = dram->cs + i;
4063
4064 mvreg_write(pp, MVNETA_WIN_BASE(i),
4065 (cs->base & 0xffff0000) |
4066 (cs->mbus_attr << 8) |
4067 dram->mbus_dram_target_id);
4068
4069 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4070 (cs->size - 1) & 0xffff0000);
4071
4072 win_enable &= ~(1 << i);
4073 win_protect |= 3 << (2 * i);
4074 }
4075 } else {
4076
4077
4078
4079
4080 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4081 win_enable &= ~BIT(0);
4082 win_protect = 3;
4083 }
4084
4085 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4086 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4087}
4088
4089
4090static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4091{
4092 u32 ctrl;
4093
4094
4095 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4096
4097 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
4098
4099
4100
4101
4102 switch(phy_mode) {
4103 case PHY_INTERFACE_MODE_QSGMII:
4104 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4105 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4106 break;
4107 case PHY_INTERFACE_MODE_SGMII:
4108 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4109 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4110 break;
4111 case PHY_INTERFACE_MODE_RGMII:
4112 case PHY_INTERFACE_MODE_RGMII_ID:
4113 case PHY_INTERFACE_MODE_RGMII_RXID:
4114 case PHY_INTERFACE_MODE_RGMII_TXID:
4115 ctrl |= MVNETA_GMAC2_PORT_RGMII;
4116 break;
4117 default:
4118 return -EINVAL;
4119 }
4120
4121
4122 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
4123 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
4124
4125 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4126 MVNETA_GMAC2_PORT_RESET) != 0)
4127 continue;
4128
4129 return 0;
4130}
4131
4132
4133static int mvneta_probe(struct platform_device *pdev)
4134{
4135 struct resource *res;
4136 struct device_node *dn = pdev->dev.of_node;
4137 struct device_node *phy_node;
4138 struct device_node *bm_node;
4139 struct mvneta_port *pp;
4140 struct net_device *dev;
4141 const char *dt_mac_addr;
4142 char hw_mac_addr[ETH_ALEN];
4143 const char *mac_from;
4144 const char *managed;
4145 int tx_csum_limit;
4146 int phy_mode;
4147 int err;
4148 int cpu;
4149
4150 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
4151 if (!dev)
4152 return -ENOMEM;
4153
4154 dev->irq = irq_of_parse_and_map(dn, 0);
4155 if (dev->irq == 0) {
4156 err = -EINVAL;
4157 goto err_free_netdev;
4158 }
4159
4160 phy_node = of_parse_phandle(dn, "phy", 0);
4161 if (!phy_node) {
4162 if (!of_phy_is_fixed_link(dn)) {
4163 dev_err(&pdev->dev, "no PHY specified\n");
4164 err = -ENODEV;
4165 goto err_free_irq;
4166 }
4167
4168 err = of_phy_register_fixed_link(dn);
4169 if (err < 0) {
4170 dev_err(&pdev->dev, "cannot register fixed PHY\n");
4171 goto err_free_irq;
4172 }
4173
4174
4175
4176
4177 phy_node = of_node_get(dn);
4178 }
4179
4180 phy_mode = of_get_phy_mode(dn);
4181 if (phy_mode < 0) {
4182 dev_err(&pdev->dev, "incorrect phy-mode\n");
4183 err = -EINVAL;
4184 goto err_put_phy_node;
4185 }
4186
4187 dev->tx_queue_len = MVNETA_MAX_TXD;
4188 dev->watchdog_timeo = 5 * HZ;
4189 dev->netdev_ops = &mvneta_netdev_ops;
4190
4191 dev->ethtool_ops = &mvneta_eth_tool_ops;
4192
4193 pp = netdev_priv(dev);
4194 spin_lock_init(&pp->lock);
4195 pp->phy_node = phy_node;
4196 pp->phy_interface = phy_mode;
4197
4198 err = of_property_read_string(dn, "managed", &managed);
4199 pp->use_inband_status = (err == 0 &&
4200 strcmp(managed, "in-band-status") == 0);
4201
4202 pp->rxq_def = rxq_def;
4203
4204
4205
4206
4207
4208 pp->rx_offset_correction =
4209 max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
4210
4211 pp->indir[0] = rxq_def;
4212
4213
4214 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4215 pp->neta_armada3700 = true;
4216
4217 pp->clk = devm_clk_get(&pdev->dev, "core");
4218 if (IS_ERR(pp->clk))
4219 pp->clk = devm_clk_get(&pdev->dev, NULL);
4220 if (IS_ERR(pp->clk)) {
4221 err = PTR_ERR(pp->clk);
4222 goto err_put_phy_node;
4223 }
4224
4225 clk_prepare_enable(pp->clk);
4226
4227 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4228 if (!IS_ERR(pp->clk_bus))
4229 clk_prepare_enable(pp->clk_bus);
4230
4231 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4232 pp->base = devm_ioremap_resource(&pdev->dev, res);
4233 if (IS_ERR(pp->base)) {
4234 err = PTR_ERR(pp->base);
4235 goto err_clk;
4236 }
4237
4238
4239 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4240 if (!pp->ports) {
4241 err = -ENOMEM;
4242 goto err_clk;
4243 }
4244
4245
4246 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4247 if (!pp->stats) {
4248 err = -ENOMEM;
4249 goto err_free_ports;
4250 }
4251
4252 dt_mac_addr = of_get_mac_address(dn);
4253 if (dt_mac_addr) {
4254 mac_from = "device tree";
4255 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4256 } else {
4257 mvneta_get_mac_addr(pp, hw_mac_addr);
4258 if (is_valid_ether_addr(hw_mac_addr)) {
4259 mac_from = "hardware";
4260 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4261 } else {
4262 mac_from = "random";
4263 eth_hw_addr_random(dev);
4264 }
4265 }
4266
4267 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4268 if (tx_csum_limit < 0 ||
4269 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4270 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4271 dev_info(&pdev->dev,
4272 "Wrong TX csum limit in DT, set to %dB\n",
4273 MVNETA_TX_CSUM_DEF_SIZE);
4274 }
4275 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4276 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4277 } else {
4278 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4279 }
4280
4281 pp->tx_csum_limit = tx_csum_limit;
4282
4283 pp->dram_target_info = mv_mbus_dram_info();
4284
4285
4286
4287
4288 if (pp->dram_target_info || pp->neta_armada3700)
4289 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4290
4291 pp->tx_ring_size = MVNETA_MAX_TXD;
4292 pp->rx_ring_size = MVNETA_MAX_RXD;
4293
4294 pp->dev = dev;
4295 SET_NETDEV_DEV(dev, &pdev->dev);
4296
4297 pp->id = global_port_id++;
4298
4299
4300 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4301 if (bm_node && bm_node->data) {
4302 pp->bm_priv = bm_node->data;
4303 err = mvneta_bm_port_init(pdev, pp);
4304 if (err < 0) {
4305 dev_info(&pdev->dev, "use SW buffer management\n");
4306 pp->bm_priv = NULL;
4307 }
4308 }
4309 of_node_put(bm_node);
4310
4311 err = mvneta_init(&pdev->dev, pp);
4312 if (err < 0)
4313 goto err_netdev;
4314
4315 err = mvneta_port_power_up(pp, phy_mode);
4316 if (err < 0) {
4317 dev_err(&pdev->dev, "can't power up port\n");
4318 goto err_netdev;
4319 }
4320
4321
4322
4323
4324 if (pp->neta_armada3700) {
4325 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4326 } else {
4327 for_each_present_cpu(cpu) {
4328 struct mvneta_pcpu_port *port =
4329 per_cpu_ptr(pp->ports, cpu);
4330
4331 netif_napi_add(dev, &port->napi, mvneta_poll,
4332 NAPI_POLL_WEIGHT);
4333 port->pp = pp;
4334 }
4335 }
4336
4337 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
4338 dev->hw_features |= dev->features;
4339 dev->vlan_features |= dev->features;
4340 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4341 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4342
4343
4344 dev->min_mtu = ETH_MIN_MTU;
4345
4346 dev->max_mtu = 9676;
4347
4348 err = register_netdev(dev);
4349 if (err < 0) {
4350 dev_err(&pdev->dev, "failed to register\n");
4351 goto err_free_stats;
4352 }
4353
4354 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4355 dev->dev_addr);
4356
4357 platform_set_drvdata(pdev, pp->dev);
4358
4359 if (pp->use_inband_status) {
4360 struct phy_device *phy = of_phy_find_device(dn);
4361
4362 mvneta_fixed_link_update(pp, phy);
4363
4364 put_device(&phy->mdio.dev);
4365 }
4366
4367 return 0;
4368
4369err_netdev:
4370 unregister_netdev(dev);
4371 if (pp->bm_priv) {
4372 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4373 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4374 1 << pp->id);
4375 }
4376err_free_stats:
4377 free_percpu(pp->stats);
4378err_free_ports:
4379 free_percpu(pp->ports);
4380err_clk:
4381 clk_disable_unprepare(pp->clk_bus);
4382 clk_disable_unprepare(pp->clk);
4383err_put_phy_node:
4384 of_node_put(phy_node);
4385 if (of_phy_is_fixed_link(dn))
4386 of_phy_deregister_fixed_link(dn);
4387err_free_irq:
4388 irq_dispose_mapping(dev->irq);
4389err_free_netdev:
4390 free_netdev(dev);
4391 return err;
4392}
4393
4394
4395static int mvneta_remove(struct platform_device *pdev)
4396{
4397 struct net_device *dev = platform_get_drvdata(pdev);
4398 struct device_node *dn = pdev->dev.of_node;
4399 struct mvneta_port *pp = netdev_priv(dev);
4400
4401 unregister_netdev(dev);
4402 clk_disable_unprepare(pp->clk_bus);
4403 clk_disable_unprepare(pp->clk);
4404 free_percpu(pp->ports);
4405 free_percpu(pp->stats);
4406 if (of_phy_is_fixed_link(dn))
4407 of_phy_deregister_fixed_link(dn);
4408 irq_dispose_mapping(dev->irq);
4409 of_node_put(pp->phy_node);
4410 free_netdev(dev);
4411
4412 if (pp->bm_priv) {
4413 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4414 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4415 1 << pp->id);
4416 }
4417
4418 return 0;
4419}
4420
4421#ifdef CONFIG_PM_SLEEP
4422static int mvneta_suspend(struct device *device)
4423{
4424 struct net_device *dev = dev_get_drvdata(device);
4425 struct mvneta_port *pp = netdev_priv(dev);
4426
4427 if (netif_running(dev))
4428 mvneta_stop(dev);
4429 netif_device_detach(dev);
4430 clk_disable_unprepare(pp->clk_bus);
4431 clk_disable_unprepare(pp->clk);
4432 return 0;
4433}
4434
4435static int mvneta_resume(struct device *device)
4436{
4437 struct platform_device *pdev = to_platform_device(device);
4438 struct net_device *dev = dev_get_drvdata(device);
4439 struct mvneta_port *pp = netdev_priv(dev);
4440 int err;
4441
4442 clk_prepare_enable(pp->clk);
4443 if (!IS_ERR(pp->clk_bus))
4444 clk_prepare_enable(pp->clk_bus);
4445 if (pp->dram_target_info || pp->neta_armada3700)
4446 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4447 if (pp->bm_priv) {
4448 err = mvneta_bm_port_init(pdev, pp);
4449 if (err < 0) {
4450 dev_info(&pdev->dev, "use SW buffer management\n");
4451 pp->bm_priv = NULL;
4452 }
4453 }
4454 mvneta_defaults_set(pp);
4455 err = mvneta_port_power_up(pp, pp->phy_interface);
4456 if (err < 0) {
4457 dev_err(device, "can't power up port\n");
4458 return err;
4459 }
4460
4461 if (pp->use_inband_status)
4462 mvneta_fixed_link_update(pp, dev->phydev);
4463
4464 netif_device_attach(dev);
4465 if (netif_running(dev)) {
4466 mvneta_open(dev);
4467 mvneta_set_rx_mode(dev);
4468 }
4469
4470 return 0;
4471}
4472#endif
4473
4474static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4475
4476static const struct of_device_id mvneta_match[] = {
4477 { .compatible = "marvell,armada-370-neta" },
4478 { .compatible = "marvell,armada-xp-neta" },
4479 { .compatible = "marvell,armada-3700-neta" },
4480 { }
4481};
4482MODULE_DEVICE_TABLE(of, mvneta_match);
4483
4484static struct platform_driver mvneta_driver = {
4485 .probe = mvneta_probe,
4486 .remove = mvneta_remove,
4487 .driver = {
4488 .name = MVNETA_DRIVER_NAME,
4489 .of_match_table = mvneta_match,
4490 .pm = &mvneta_pm_ops,
4491 },
4492};
4493
4494static int __init mvneta_driver_init(void)
4495{
4496 int ret;
4497
4498 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4499 mvneta_cpu_online,
4500 mvneta_cpu_down_prepare);
4501 if (ret < 0)
4502 goto out;
4503 online_hpstate = ret;
4504 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4505 NULL, mvneta_cpu_dead);
4506 if (ret)
4507 goto err_dead;
4508
4509 ret = platform_driver_register(&mvneta_driver);
4510 if (ret)
4511 goto err;
4512 return 0;
4513
4514err:
4515 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4516err_dead:
4517 cpuhp_remove_multi_state(online_hpstate);
4518out:
4519 return ret;
4520}
4521module_init(mvneta_driver_init);
4522
4523static void __exit mvneta_driver_exit(void)
4524{
4525 platform_driver_unregister(&mvneta_driver);
4526 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4527 cpuhp_remove_multi_state(online_hpstate);
4528}
4529module_exit(mvneta_driver_exit);
4530
4531MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4532MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4533MODULE_LICENSE("GPL");
4534
4535module_param(rxq_number, int, S_IRUGO);
4536module_param(txq_number, int, S_IRUGO);
4537
4538module_param(rxq_def, int, S_IRUGO);
4539module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
4540