1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/module.h>
14
15#include "aq_main.h"
16#include "aq_nic.h"
17#include "aq_vec.h"
18#include "aq_hw.h"
19#include "aq_pci_func.h"
20#include "hw_atl/hw_atl_a0.h"
21#include "hw_atl/hw_atl_b0.h"
22#include "aq_filters.h"
23#include "aq_drvinfo.h"
24
25static const struct pci_device_id aq_pci_tbl[] = {
26 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
27 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
28 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
29 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
30 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
31
32 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
33 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
34 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
35 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
36 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
37 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
38
39 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
40 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
41 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
42 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
43 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
44 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
45
46 {}
47};
48
49static const struct aq_board_revision_s hw_atl_boards[] = {
50 { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
51 { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
52 { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
53 { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
54 { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
55
56 { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
57 { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
58 { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
59 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
60 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
61
62 { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
63 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
64 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
65 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
66 { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
67 { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
68
69 { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
70 { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
71 { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
72 { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
73 { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
74 { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
75};
76
77MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
78
79static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
80 const struct aq_hw_ops **ops,
81 const struct aq_hw_caps_s **caps)
82{
83 int i;
84
85 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
86 return -EINVAL;
87
88 for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
89 if (hw_atl_boards[i].devid == pdev->device &&
90 (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
91 hw_atl_boards[i].revision == pdev->revision)) {
92 *ops = hw_atl_boards[i].ops;
93 *caps = hw_atl_boards[i].caps;
94 break;
95 }
96 }
97
98 if (i == ARRAY_SIZE(hw_atl_boards))
99 return -EINVAL;
100
101 return 0;
102}
103
104int aq_pci_func_init(struct pci_dev *pdev)
105{
106 int err;
107
108 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
109 if (!err) {
110 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
111
112 }
113 if (err) {
114 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
115 if (!err)
116 err = pci_set_consistent_dma_mask(pdev,
117 DMA_BIT_MASK(32));
118 }
119 if (err != 0) {
120 err = -ENOSR;
121 goto err_exit;
122 }
123
124 err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
125 if (err < 0)
126 goto err_exit;
127
128 pci_set_master(pdev);
129
130 return 0;
131
132err_exit:
133 return err;
134}
135
136int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
137 char *name, irq_handler_t irq_handler,
138 void *irq_arg, cpumask_t *affinity_mask)
139{
140 struct pci_dev *pdev = self->pdev;
141 int err;
142
143 if (pdev->msix_enabled || pdev->msi_enabled)
144 err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
145 name, irq_arg);
146 else
147 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
148 IRQF_SHARED, name, irq_arg);
149
150 if (err >= 0) {
151 self->msix_entry_mask |= (1 << i);
152
153 if (pdev->msix_enabled && affinity_mask)
154 irq_set_affinity_hint(pci_irq_vector(pdev, i),
155 affinity_mask);
156 }
157
158 return err;
159}
160
161void aq_pci_func_free_irqs(struct aq_nic_s *self)
162{
163 struct pci_dev *pdev = self->pdev;
164 unsigned int i;
165 void *irq_data;
166
167 for (i = 32U; i--;) {
168 if (!((1U << i) & self->msix_entry_mask))
169 continue;
170 if (self->aq_nic_cfg.link_irq_vec &&
171 i == self->aq_nic_cfg.link_irq_vec)
172 irq_data = self;
173 else if (i < AQ_CFG_VECS_MAX)
174 irq_data = self->aq_vec[i];
175 else
176 continue;
177
178 if (pdev->msix_enabled)
179 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
180 free_irq(pci_irq_vector(pdev, i), irq_data);
181 self->msix_entry_mask &= ~(1U << i);
182 }
183}
184
185unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
186{
187 if (self->pdev->msix_enabled)
188 return AQ_HW_IRQ_MSIX;
189 if (self->pdev->msi_enabled)
190 return AQ_HW_IRQ_MSI;
191 return AQ_HW_IRQ_LEGACY;
192}
193
194static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
195{
196 pci_free_irq_vectors(self->pdev);
197}
198
199static int aq_pci_probe(struct pci_dev *pdev,
200 const struct pci_device_id *pci_id)
201{
202 struct aq_nic_s *self;
203 int err;
204 struct net_device *ndev;
205 resource_size_t mmio_pa;
206 u32 bar;
207 u32 numvecs;
208
209 err = pci_enable_device(pdev);
210 if (err)
211 return err;
212
213 err = aq_pci_func_init(pdev);
214 if (err)
215 goto err_pci_func;
216
217 ndev = aq_ndev_alloc();
218 if (!ndev) {
219 err = -ENOMEM;
220 goto err_ndev;
221 }
222
223 self = netdev_priv(ndev);
224 self->pdev = pdev;
225 SET_NETDEV_DEV(ndev, &pdev->dev);
226 pci_set_drvdata(pdev, self);
227
228 mutex_init(&self->fwreq_mutex);
229
230 err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
231 &aq_nic_get_cfg(self)->aq_hw_caps);
232 if (err)
233 goto err_ioremap;
234
235 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
236 if (!self->aq_hw) {
237 err = -ENOMEM;
238 goto err_ioremap;
239 }
240 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
241
242 for (bar = 0; bar < 4; ++bar) {
243 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
244 resource_size_t reg_sz;
245
246 mmio_pa = pci_resource_start(pdev, bar);
247 if (mmio_pa == 0U) {
248 err = -EIO;
249 goto err_free_aq_hw;
250 }
251
252 reg_sz = pci_resource_len(pdev, bar);
253 if ((reg_sz <= 24 )) {
254 err = -EIO;
255 goto err_free_aq_hw;
256 }
257
258 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
259 if (!self->aq_hw->mmio) {
260 err = -EIO;
261 goto err_free_aq_hw;
262 }
263 break;
264 }
265 }
266
267 if (bar == 4) {
268 err = -EIO;
269 goto err_free_aq_hw;
270 }
271
272 numvecs = min((u8)AQ_CFG_VECS_DEF,
273 aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
274 numvecs = min(numvecs, num_online_cpus());
275 numvecs += AQ_HW_SERVICE_IRQS;
276
277#if !AQ_CFG_FORCE_LEGACY_INT
278 err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
279 PCI_IRQ_MSIX | PCI_IRQ_MSI |
280 PCI_IRQ_LEGACY);
281
282 if (err < 0)
283 goto err_hwinit;
284 numvecs = err;
285#endif
286 self->irqvecs = numvecs;
287
288
289 aq_nic_cfg_start(self);
290
291 aq_nic_ndev_init(self);
292
293 err = aq_nic_ndev_register(self);
294 if (err < 0)
295 goto err_register;
296
297 aq_drvinfo_init(ndev);
298
299 return 0;
300
301err_register:
302 aq_nic_free_vectors(self);
303 aq_pci_free_irq_vectors(self);
304err_hwinit:
305 iounmap(self->aq_hw->mmio);
306err_free_aq_hw:
307 kfree(self->aq_hw);
308err_ioremap:
309 free_netdev(ndev);
310err_ndev:
311 pci_release_regions(pdev);
312err_pci_func:
313 pci_disable_device(pdev);
314 return err;
315}
316
317static void aq_pci_remove(struct pci_dev *pdev)
318{
319 struct aq_nic_s *self = pci_get_drvdata(pdev);
320
321 if (self->ndev) {
322 aq_clear_rxnfc_all_rules(self);
323 if (self->ndev->reg_state == NETREG_REGISTERED)
324 unregister_netdev(self->ndev);
325 aq_nic_free_vectors(self);
326 aq_pci_free_irq_vectors(self);
327 iounmap(self->aq_hw->mmio);
328 kfree(self->aq_hw);
329 pci_release_regions(pdev);
330 free_netdev(self->ndev);
331 }
332
333 pci_disable_device(pdev);
334}
335
336static void aq_pci_shutdown(struct pci_dev *pdev)
337{
338 struct aq_nic_s *self = pci_get_drvdata(pdev);
339
340 aq_nic_shutdown(self);
341
342 pci_disable_device(pdev);
343
344 if (system_state == SYSTEM_POWER_OFF) {
345 pci_wake_from_d3(pdev, false);
346 pci_set_power_state(pdev, PCI_D3hot);
347 }
348}
349
350static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
351{
352 struct aq_nic_s *self = pci_get_drvdata(pdev);
353
354 return aq_nic_change_pm_state(self, &pm_msg);
355}
356
357static int aq_pci_resume(struct pci_dev *pdev)
358{
359 struct aq_nic_s *self = pci_get_drvdata(pdev);
360 pm_message_t pm_msg = PMSG_RESTORE;
361
362 return aq_nic_change_pm_state(self, &pm_msg);
363}
364
365static struct pci_driver aq_pci_ops = {
366 .name = AQ_CFG_DRV_NAME,
367 .id_table = aq_pci_tbl,
368 .probe = aq_pci_probe,
369 .remove = aq_pci_remove,
370 .suspend = aq_pci_suspend,
371 .resume = aq_pci_resume,
372 .shutdown = aq_pci_shutdown,
373};
374
375int aq_pci_func_register_driver(void)
376{
377 return pci_register_driver(&aq_pci_ops);
378}
379
380void aq_pci_func_unregister_driver(void)
381{
382 pci_unregister_driver(&aq_pci_ops);
383}
384
385