1
2
3
4
5
6
7
8
9#include <linux/interrupt.h>
10#include <linux/module.h>
11
12#include "aq_main.h"
13#include "aq_nic.h"
14#include "aq_vec.h"
15#include "aq_hw.h"
16#include "aq_pci_func.h"
17#include "hw_atl/hw_atl_a0.h"
18#include "hw_atl/hw_atl_b0.h"
19#include "aq_filters.h"
20#include "aq_drvinfo.h"
21
22static const struct pci_device_id aq_pci_tbl[] = {
23 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
24 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
25 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
26 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
27 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
28
29 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
30 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
31 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
32 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
33 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
34 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
35
36 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
37 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
38 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
39 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
40 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
41 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
42
43 {}
44};
45
46static const struct aq_board_revision_s hw_atl_boards[] = {
47 { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
48 { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
49 { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
50 { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
51 { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
52
53 { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
54 { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
55 { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
56 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
57 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
58
59 { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
60 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
61 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
62 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
63 { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
64 { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
65
66 { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
67 { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
68 { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
69 { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
70 { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
71 { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
72};
73
74MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
75
76static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
77 const struct aq_hw_ops **ops,
78 const struct aq_hw_caps_s **caps)
79{
80 int i;
81
82 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
83 return -EINVAL;
84
85 for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
86 if (hw_atl_boards[i].devid == pdev->device &&
87 (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
88 hw_atl_boards[i].revision == pdev->revision)) {
89 *ops = hw_atl_boards[i].ops;
90 *caps = hw_atl_boards[i].caps;
91 break;
92 }
93 }
94
95 if (i == ARRAY_SIZE(hw_atl_boards))
96 return -EINVAL;
97
98 return 0;
99}
100
101int aq_pci_func_init(struct pci_dev *pdev)
102{
103 int err;
104
105 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
106 if (!err) {
107 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
108
109 }
110 if (err) {
111 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
112 if (!err)
113 err = pci_set_consistent_dma_mask(pdev,
114 DMA_BIT_MASK(32));
115 }
116 if (err != 0) {
117 err = -ENOSR;
118 goto err_exit;
119 }
120
121 err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
122 if (err < 0)
123 goto err_exit;
124
125 pci_set_master(pdev);
126
127 return 0;
128
129err_exit:
130 return err;
131}
132
133int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
134 char *name, irq_handler_t irq_handler,
135 void *irq_arg, cpumask_t *affinity_mask)
136{
137 struct pci_dev *pdev = self->pdev;
138 int err;
139
140 if (pdev->msix_enabled || pdev->msi_enabled)
141 err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
142 name, irq_arg);
143 else
144 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
145 IRQF_SHARED, name, irq_arg);
146
147 if (err >= 0) {
148 self->msix_entry_mask |= (1 << i);
149
150 if (pdev->msix_enabled && affinity_mask)
151 irq_set_affinity_hint(pci_irq_vector(pdev, i),
152 affinity_mask);
153 }
154
155 return err;
156}
157
158void aq_pci_func_free_irqs(struct aq_nic_s *self)
159{
160 struct pci_dev *pdev = self->pdev;
161 unsigned int i;
162 void *irq_data;
163
164 for (i = 32U; i--;) {
165 if (!((1U << i) & self->msix_entry_mask))
166 continue;
167 if (self->aq_nic_cfg.link_irq_vec &&
168 i == self->aq_nic_cfg.link_irq_vec)
169 irq_data = self;
170 else if (i < AQ_CFG_VECS_MAX)
171 irq_data = self->aq_vec[i];
172 else
173 continue;
174
175 if (pdev->msix_enabled)
176 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
177 free_irq(pci_irq_vector(pdev, i), irq_data);
178 self->msix_entry_mask &= ~(1U << i);
179 }
180}
181
182unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
183{
184 if (self->pdev->msix_enabled)
185 return AQ_HW_IRQ_MSIX;
186 if (self->pdev->msi_enabled)
187 return AQ_HW_IRQ_MSI;
188
189 return AQ_HW_IRQ_LEGACY;
190}
191
192static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
193{
194 pci_free_irq_vectors(self->pdev);
195}
196
197static int aq_pci_probe(struct pci_dev *pdev,
198 const struct pci_device_id *pci_id)
199{
200 struct net_device *ndev;
201 resource_size_t mmio_pa;
202 struct aq_nic_s *self;
203 u32 numvecs;
204 u32 bar;
205 int err;
206
207 err = pci_enable_device(pdev);
208 if (err)
209 return err;
210
211 err = aq_pci_func_init(pdev);
212 if (err)
213 goto err_pci_func;
214
215 ndev = aq_ndev_alloc();
216 if (!ndev) {
217 err = -ENOMEM;
218 goto err_ndev;
219 }
220
221 self = netdev_priv(ndev);
222 self->pdev = pdev;
223 SET_NETDEV_DEV(ndev, &pdev->dev);
224 pci_set_drvdata(pdev, self);
225
226 mutex_init(&self->fwreq_mutex);
227
228 err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
229 &aq_nic_get_cfg(self)->aq_hw_caps);
230 if (err)
231 goto err_ioremap;
232
233 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
234 if (!self->aq_hw) {
235 err = -ENOMEM;
236 goto err_ioremap;
237 }
238 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
239
240 for (bar = 0; bar < 4; ++bar) {
241 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
242 resource_size_t reg_sz;
243
244 mmio_pa = pci_resource_start(pdev, bar);
245 if (mmio_pa == 0U) {
246 err = -EIO;
247 goto err_free_aq_hw;
248 }
249
250 reg_sz = pci_resource_len(pdev, bar);
251 if ((reg_sz <= 24 )) {
252 err = -EIO;
253 goto err_free_aq_hw;
254 }
255
256 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
257 if (!self->aq_hw->mmio) {
258 err = -EIO;
259 goto err_free_aq_hw;
260 }
261 break;
262 }
263 }
264
265 if (bar == 4) {
266 err = -EIO;
267 goto err_free_aq_hw;
268 }
269
270 numvecs = min((u8)AQ_CFG_VECS_DEF,
271 aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
272 numvecs = min(numvecs, num_online_cpus());
273
274 numvecs += 1;
275
276 numvecs += AQ_HW_SERVICE_IRQS;
277
278#if !AQ_CFG_FORCE_LEGACY_INT
279 err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
280 PCI_IRQ_MSIX | PCI_IRQ_MSI |
281 PCI_IRQ_LEGACY);
282
283 if (err < 0)
284 goto err_hwinit;
285 numvecs = err;
286#endif
287 self->irqvecs = numvecs;
288
289
290 aq_nic_cfg_start(self);
291
292 aq_nic_ndev_init(self);
293
294 err = aq_nic_ndev_register(self);
295 if (err < 0)
296 goto err_register;
297
298 aq_drvinfo_init(ndev);
299
300 return 0;
301
302err_register:
303 aq_nic_free_vectors(self);
304 aq_pci_free_irq_vectors(self);
305err_hwinit:
306 iounmap(self->aq_hw->mmio);
307err_free_aq_hw:
308 kfree(self->aq_hw);
309err_ioremap:
310 free_netdev(ndev);
311err_ndev:
312 pci_release_regions(pdev);
313err_pci_func:
314 pci_disable_device(pdev);
315
316 return err;
317}
318
319static void aq_pci_remove(struct pci_dev *pdev)
320{
321 struct aq_nic_s *self = pci_get_drvdata(pdev);
322
323 if (self->ndev) {
324 aq_clear_rxnfc_all_rules(self);
325 if (self->ndev->reg_state == NETREG_REGISTERED)
326 unregister_netdev(self->ndev);
327 aq_nic_free_vectors(self);
328 aq_pci_free_irq_vectors(self);
329 iounmap(self->aq_hw->mmio);
330 kfree(self->aq_hw);
331 pci_release_regions(pdev);
332 free_netdev(self->ndev);
333 }
334
335 pci_disable_device(pdev);
336}
337
338static void aq_pci_shutdown(struct pci_dev *pdev)
339{
340 struct aq_nic_s *self = pci_get_drvdata(pdev);
341
342 aq_nic_shutdown(self);
343
344 pci_disable_device(pdev);
345
346 if (system_state == SYSTEM_POWER_OFF) {
347 pci_wake_from_d3(pdev, false);
348 pci_set_power_state(pdev, PCI_D3hot);
349 }
350}
351
352static int aq_suspend_common(struct device *dev, bool deep)
353{
354 struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
355
356 rtnl_lock();
357
358 nic->power_state = AQ_HW_POWER_STATE_D3;
359 netif_device_detach(nic->ndev);
360 netif_tx_stop_all_queues(nic->ndev);
361
362 aq_nic_stop(nic);
363
364 if (deep) {
365 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
366 aq_nic_set_power(nic);
367 }
368
369 rtnl_unlock();
370
371 return 0;
372}
373
374static int atl_resume_common(struct device *dev, bool deep)
375{
376 struct pci_dev *pdev = to_pci_dev(dev);
377 struct aq_nic_s *nic;
378 int ret;
379
380 nic = pci_get_drvdata(pdev);
381
382 rtnl_lock();
383
384 pci_set_power_state(pdev, PCI_D0);
385 pci_restore_state(pdev);
386
387 if (deep) {
388 ret = aq_nic_init(nic);
389 if (ret)
390 goto err_exit;
391 }
392
393 ret = aq_nic_start(nic);
394 if (ret)
395 goto err_exit;
396
397 netif_device_attach(nic->ndev);
398 netif_tx_start_all_queues(nic->ndev);
399
400err_exit:
401 rtnl_unlock();
402
403 return ret;
404}
405
406static int aq_pm_freeze(struct device *dev)
407{
408 return aq_suspend_common(dev, false);
409}
410
411static int aq_pm_suspend_poweroff(struct device *dev)
412{
413 return aq_suspend_common(dev, true);
414}
415
416static int aq_pm_thaw(struct device *dev)
417{
418 return atl_resume_common(dev, false);
419}
420
421static int aq_pm_resume_restore(struct device *dev)
422{
423 return atl_resume_common(dev, true);
424}
425
426static const struct dev_pm_ops aq_pm_ops = {
427 .suspend = aq_pm_suspend_poweroff,
428 .poweroff = aq_pm_suspend_poweroff,
429 .freeze = aq_pm_freeze,
430 .resume = aq_pm_resume_restore,
431 .restore = aq_pm_resume_restore,
432 .thaw = aq_pm_thaw,
433};
434
435static struct pci_driver aq_pci_ops = {
436 .name = AQ_CFG_DRV_NAME,
437 .id_table = aq_pci_tbl,
438 .probe = aq_pci_probe,
439 .remove = aq_pci_remove,
440 .shutdown = aq_pci_shutdown,
441#ifdef CONFIG_PM
442 .driver.pm = &aq_pm_ops,
443#endif
444};
445
446int aq_pci_func_register_driver(void)
447{
448 return pci_register_driver(&aq_pci_ops);
449}
450
451void aq_pci_func_unregister_driver(void)
452{
453 pci_unregister_driver(&aq_pci_ops);
454}
455
456