1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#include <linux/module.h>
118#include <linux/device.h>
119#include <linux/spinlock.h>
120#include <linux/netdevice.h>
121#include <linux/etherdevice.h>
122#include <linux/io.h>
123#include <linux/notifier.h>
124
125#include "xgbe.h"
126#include "xgbe-common.h"
127
128MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
129MODULE_LICENSE("Dual BSD/GPL");
130MODULE_VERSION(XGBE_DRV_VERSION);
131MODULE_DESCRIPTION(XGBE_DRV_DESC);
132
133static int debug = -1;
134module_param(debug, int, 0644);
135MODULE_PARM_DESC(debug, " Network interface message level setting");
136
137static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
138 NETIF_MSG_IFUP);
139
140static void xgbe_default_config(struct xgbe_prv_data *pdata)
141{
142 DBGPR("-->xgbe_default_config\n");
143
144 pdata->blen = DMA_SBMR_BLEN_64;
145 pdata->pbl = DMA_PBL_128;
146 pdata->aal = 1;
147 pdata->rd_osr_limit = 8;
148 pdata->wr_osr_limit = 8;
149 pdata->tx_sf_mode = MTL_TSF_ENABLE;
150 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
151 pdata->tx_osp_mode = DMA_OSP_ENABLE;
152 pdata->rx_sf_mode = MTL_RSF_DISABLE;
153 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
154 pdata->pause_autoneg = 1;
155 pdata->tx_pause = 1;
156 pdata->rx_pause = 1;
157 pdata->phy_speed = SPEED_UNKNOWN;
158 pdata->power_down = 0;
159
160 DBGPR("<--xgbe_default_config\n");
161}
162
163static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
164{
165 xgbe_init_function_ptrs_dev(&pdata->hw_if);
166 xgbe_init_function_ptrs_phy(&pdata->phy_if);
167 xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
168 xgbe_init_function_ptrs_desc(&pdata->desc_if);
169
170 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
171}
172
173struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
174{
175 struct xgbe_prv_data *pdata;
176 struct net_device *netdev;
177
178 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
179 XGBE_MAX_DMA_CHANNELS);
180 if (!netdev) {
181 dev_err(dev, "alloc_etherdev_mq failed\n");
182 return ERR_PTR(-ENOMEM);
183 }
184 SET_NETDEV_DEV(netdev, dev);
185 pdata = netdev_priv(netdev);
186 pdata->netdev = netdev;
187 pdata->dev = dev;
188
189 spin_lock_init(&pdata->lock);
190 spin_lock_init(&pdata->xpcs_lock);
191 mutex_init(&pdata->rss_mutex);
192 spin_lock_init(&pdata->tstamp_lock);
193 mutex_init(&pdata->i2c_mutex);
194 init_completion(&pdata->i2c_complete);
195 init_completion(&pdata->mdio_complete);
196 INIT_LIST_HEAD(&pdata->vxlan_ports);
197
198 pdata->msg_enable = netif_msg_init(debug, default_msg_level);
199
200 set_bit(XGBE_DOWN, &pdata->dev_state);
201 set_bit(XGBE_STOPPED, &pdata->dev_state);
202
203 return pdata;
204}
205
206void xgbe_free_pdata(struct xgbe_prv_data *pdata)
207{
208 struct net_device *netdev = pdata->netdev;
209
210 free_netdev(netdev);
211}
212
213void xgbe_set_counts(struct xgbe_prv_data *pdata)
214{
215
216 xgbe_init_all_fptrs(pdata);
217
218
219 xgbe_get_all_hw_features(pdata);
220
221
222 if (!pdata->tx_max_channel_count)
223 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
224 if (!pdata->rx_max_channel_count)
225 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
226
227 if (!pdata->tx_max_q_count)
228 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
229 if (!pdata->rx_max_q_count)
230 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
231
232
233
234
235
236
237
238
239 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
240 pdata->hw_feat.tx_ch_cnt);
241 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
242 pdata->tx_max_channel_count);
243 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
244 pdata->tx_max_q_count);
245
246 pdata->tx_q_count = pdata->tx_ring_count;
247
248 pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
249 pdata->hw_feat.rx_ch_cnt);
250 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
251 pdata->rx_max_channel_count);
252
253 pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
254 pdata->rx_max_q_count);
255
256 if (netif_msg_probe(pdata)) {
257 dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
258 pdata->tx_ring_count, pdata->rx_ring_count);
259 dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
260 pdata->tx_q_count, pdata->rx_q_count);
261 }
262}
263
264int xgbe_config_netdev(struct xgbe_prv_data *pdata)
265{
266 struct net_device *netdev = pdata->netdev;
267 struct device *dev = pdata->dev;
268 int ret;
269
270 netdev->irq = pdata->dev_irq;
271 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
272 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
273
274
275 pdata->tx_sec_period = jiffies;
276 pdata->tx_ded_period = jiffies;
277 pdata->rx_sec_period = jiffies;
278 pdata->rx_ded_period = jiffies;
279 pdata->desc_sec_period = jiffies;
280 pdata->desc_ded_period = jiffies;
281
282
283 ret = pdata->hw_if.exit(pdata);
284 if (ret) {
285 dev_err(dev, "software reset failed\n");
286 return ret;
287 }
288
289
290 xgbe_default_config(pdata);
291
292
293 ret = dma_set_mask_and_coherent(dev,
294 DMA_BIT_MASK(pdata->hw_feat.dma_width));
295 if (ret) {
296 dev_err(dev, "dma_set_mask_and_coherent failed\n");
297 return ret;
298 }
299
300
301 if (!pdata->tx_max_fifo_size)
302 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
303 if (!pdata->rx_max_fifo_size)
304 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
305
306
307 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
308 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
309
310 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
311 pdata->rx_desc_count = XGBE_RX_DESC_CNT;
312
313
314 if (pdata->channel_irq_count) {
315 pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
316 pdata->channel_irq_count);
317 pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
318 pdata->channel_irq_count);
319
320 if (netif_msg_probe(pdata))
321 dev_dbg(pdata->dev,
322 "adjusted TX/RX DMA channel count = %u/%u\n",
323 pdata->tx_ring_count, pdata->rx_ring_count);
324 }
325
326
327 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
328
329 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
330 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
331 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
332
333
334 pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
335 ret = pdata->phy_if.phy_init(pdata);
336 if (ret)
337 return ret;
338
339
340 netdev->netdev_ops = xgbe_get_netdev_ops();
341 netdev->ethtool_ops = xgbe_get_ethtool_ops();
342#ifdef CONFIG_AMD_XGBE_DCB
343 netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
344#endif
345
346
347 netdev->hw_features = NETIF_F_SG |
348 NETIF_F_IP_CSUM |
349 NETIF_F_IPV6_CSUM |
350 NETIF_F_RXCSUM |
351 NETIF_F_TSO |
352 NETIF_F_TSO6 |
353 NETIF_F_GRO |
354 NETIF_F_HW_VLAN_CTAG_RX |
355 NETIF_F_HW_VLAN_CTAG_TX |
356 NETIF_F_HW_VLAN_CTAG_FILTER;
357
358 if (pdata->hw_feat.rss)
359 netdev->hw_features |= NETIF_F_RXHASH;
360
361 if (pdata->hw_feat.vxn) {
362 netdev->hw_enc_features = NETIF_F_SG |
363 NETIF_F_IP_CSUM |
364 NETIF_F_IPV6_CSUM |
365 NETIF_F_RXCSUM |
366 NETIF_F_TSO |
367 NETIF_F_TSO6 |
368 NETIF_F_GRO |
369 NETIF_F_GSO_UDP_TUNNEL |
370 NETIF_F_GSO_UDP_TUNNEL_CSUM |
371 NETIF_F_RX_UDP_TUNNEL_PORT;
372
373 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
374 NETIF_F_GSO_UDP_TUNNEL_CSUM |
375 NETIF_F_RX_UDP_TUNNEL_PORT;
376
377 pdata->vxlan_offloads_set = 1;
378 pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL |
379 NETIF_F_GSO_UDP_TUNNEL_CSUM |
380 NETIF_F_RX_UDP_TUNNEL_PORT;
381 }
382
383 netdev->vlan_features |= NETIF_F_SG |
384 NETIF_F_IP_CSUM |
385 NETIF_F_IPV6_CSUM |
386 NETIF_F_TSO |
387 NETIF_F_TSO6;
388
389 netdev->features |= netdev->hw_features;
390 pdata->netdev_features = netdev->features;
391
392 netdev->priv_flags |= IFF_UNICAST_FLT;
393 netdev->min_mtu = 0;
394 netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
395
396
397 netdev->watchdog_timeo = 0;
398
399 xgbe_init_rx_coalesce(pdata);
400 xgbe_init_tx_coalesce(pdata);
401
402 netif_carrier_off(netdev);
403 ret = register_netdev(netdev);
404 if (ret) {
405 dev_err(dev, "net device registration failed\n");
406 return ret;
407 }
408
409 if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
410 xgbe_ptp_register(pdata);
411
412 xgbe_debugfs_init(pdata);
413
414 netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
415 pdata->tx_ring_count);
416 netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
417 pdata->rx_ring_count);
418
419 return 0;
420}
421
422void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
423{
424 struct net_device *netdev = pdata->netdev;
425
426 xgbe_debugfs_exit(pdata);
427
428 if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
429 xgbe_ptp_unregister(pdata);
430
431 unregister_netdev(netdev);
432
433 pdata->phy_if.phy_exit(pdata);
434}
435
436static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
437 void *data)
438{
439 struct net_device *netdev = netdev_notifier_info_to_dev(data);
440 struct xgbe_prv_data *pdata = netdev_priv(netdev);
441
442 if (netdev->netdev_ops != xgbe_get_netdev_ops())
443 goto out;
444
445 switch (event) {
446 case NETDEV_CHANGENAME:
447 xgbe_debugfs_rename(pdata);
448 break;
449
450 default:
451 break;
452 }
453
454out:
455 return NOTIFY_DONE;
456}
457
458static struct notifier_block xgbe_netdev_notifier = {
459 .notifier_call = xgbe_netdev_event,
460};
461
462static int __init xgbe_mod_init(void)
463{
464 int ret;
465
466 ret = register_netdevice_notifier(&xgbe_netdev_notifier);
467 if (ret)
468 return ret;
469
470 ret = xgbe_platform_init();
471 if (ret)
472 return ret;
473
474 ret = xgbe_pci_init();
475 if (ret)
476 return ret;
477
478 return 0;
479}
480
481static void __exit xgbe_mod_exit(void)
482{
483 xgbe_pci_exit();
484
485 xgbe_platform_exit();
486
487 unregister_netdevice_notifier(&xgbe_netdev_notifier);
488}
489
490module_init(xgbe_mod_init);
491module_exit(xgbe_mod_exit);
492