1
2
3
4
5
6
7
8
9
10
11
12#include <linux/interrupt.h>
13#include <linux/module.h>
14
15#include "aq_main.h"
16#include "aq_nic.h"
17#include "aq_vec.h"
18#include "aq_hw.h"
19#include "aq_pci_func.h"
20#include "hw_atl/hw_atl_a0.h"
21#include "hw_atl/hw_atl_b0.h"
22
23static const struct pci_device_id aq_pci_tbl[] = {
24 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
25 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
26 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
27 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
28 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
29
30 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
31 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
32 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
33 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
34 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
35 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
36
37 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
38 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
39 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
40 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
41 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
42 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
43
44 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
45 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
46
47 {}
48};
49
50static const struct aq_board_revision_s hw_atl_boards[] = {
51 { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
52 { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
53 { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
54 { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
55 { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
56
57 { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
58 { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
59 { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
60 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
61 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
62
63 { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
64 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
65 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
66 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
67 { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
68 { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
69
70 { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
71 { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
72 { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
73 { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
74 { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
75 { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
76
77 { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
78 { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
79};
80
81MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
82
83static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
84 const struct aq_hw_ops **ops,
85 const struct aq_hw_caps_s **caps)
86{
87 int i = 0;
88
89 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
90 return -EINVAL;
91
92 for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
93 if (hw_atl_boards[i].devid == pdev->device &&
94 (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
95 hw_atl_boards[i].revision == pdev->revision)) {
96 *ops = hw_atl_boards[i].ops;
97 *caps = hw_atl_boards[i].caps;
98 break;
99 }
100 }
101
102 if (i == ARRAY_SIZE(hw_atl_boards))
103 return -EINVAL;
104
105 return 0;
106}
107
108int aq_pci_func_init(struct pci_dev *pdev)
109{
110 int err = 0;
111
112 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
113 if (!err) {
114 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
115
116 }
117 if (err) {
118 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
119 if (!err)
120 err = pci_set_consistent_dma_mask(pdev,
121 DMA_BIT_MASK(32));
122 }
123 if (err != 0) {
124 err = -ENOSR;
125 goto err_exit;
126 }
127
128 err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
129 if (err < 0)
130 goto err_exit;
131
132 pci_set_master(pdev);
133
134 return 0;
135
136err_exit:
137 return err;
138}
139
140int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
141 char *name, void *aq_vec, cpumask_t *affinity_mask)
142{
143 struct pci_dev *pdev = self->pdev;
144 int err = 0;
145
146 if (pdev->msix_enabled || pdev->msi_enabled)
147 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
148 name, aq_vec);
149 else
150 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
151 IRQF_SHARED, name, aq_vec);
152
153 if (err >= 0) {
154 self->msix_entry_mask |= (1 << i);
155 self->aq_vec[i] = aq_vec;
156
157 if (pdev->msix_enabled)
158 irq_set_affinity_hint(pci_irq_vector(pdev, i),
159 affinity_mask);
160 }
161 return err;
162}
163
164void aq_pci_func_free_irqs(struct aq_nic_s *self)
165{
166 struct pci_dev *pdev = self->pdev;
167 unsigned int i = 0U;
168
169 for (i = 32U; i--;) {
170 if (!((1U << i) & self->msix_entry_mask))
171 continue;
172
173 if (pdev->msix_enabled)
174 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
175 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
176 self->msix_entry_mask &= ~(1U << i);
177 }
178}
179
180unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
181{
182 if (self->pdev->msix_enabled)
183 return AQ_HW_IRQ_MSIX;
184 if (self->pdev->msi_enabled)
185 return AQ_HW_IRQ_MSIX;
186 return AQ_HW_IRQ_LEGACY;
187}
188
189static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
190{
191 pci_free_irq_vectors(self->pdev);
192}
193
194static int aq_pci_probe(struct pci_dev *pdev,
195 const struct pci_device_id *pci_id)
196{
197 struct aq_nic_s *self = NULL;
198 int err = 0;
199 struct net_device *ndev;
200 resource_size_t mmio_pa;
201 u32 bar;
202 u32 numvecs;
203
204 err = pci_enable_device(pdev);
205 if (err)
206 return err;
207
208 err = aq_pci_func_init(pdev);
209 if (err)
210 goto err_pci_func;
211
212 ndev = aq_ndev_alloc();
213 if (!ndev) {
214 err = -ENOMEM;
215 goto err_ndev;
216 }
217
218 self = netdev_priv(ndev);
219 self->pdev = pdev;
220 SET_NETDEV_DEV(ndev, &pdev->dev);
221 pci_set_drvdata(pdev, self);
222
223 err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
224 &aq_nic_get_cfg(self)->aq_hw_caps);
225 if (err)
226 goto err_ioremap;
227
228 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
229 if (!self->aq_hw) {
230 err = -ENOMEM;
231 goto err_ioremap;
232 }
233 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
234
235 for (bar = 0; bar < 4; ++bar) {
236 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
237 resource_size_t reg_sz;
238
239 mmio_pa = pci_resource_start(pdev, bar);
240 if (mmio_pa == 0U) {
241 err = -EIO;
242 goto err_free_aq_hw;
243 }
244
245 reg_sz = pci_resource_len(pdev, bar);
246 if ((reg_sz <= 24 )) {
247 err = -EIO;
248 goto err_free_aq_hw;
249 }
250
251 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
252 if (!self->aq_hw->mmio) {
253 err = -EIO;
254 goto err_free_aq_hw;
255 }
256 break;
257 }
258 }
259
260 if (bar == 4) {
261 err = -EIO;
262 goto err_free_aq_hw;
263 }
264
265 numvecs = min((u8)AQ_CFG_VECS_DEF,
266 aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
267 numvecs = min(numvecs, num_online_cpus());
268
269#if !AQ_CFG_FORCE_LEGACY_INT
270 err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
271 PCI_IRQ_MSIX | PCI_IRQ_MSI |
272 PCI_IRQ_LEGACY);
273
274 if (err < 0)
275 goto err_hwinit;
276 numvecs = err;
277#endif
278 self->irqvecs = numvecs;
279
280
281 aq_nic_cfg_start(self);
282
283 aq_nic_ndev_init(self);
284
285 err = aq_nic_ndev_register(self);
286 if (err < 0)
287 goto err_register;
288
289 return 0;
290
291err_register:
292 aq_nic_free_vectors(self);
293 aq_pci_free_irq_vectors(self);
294err_hwinit:
295 iounmap(self->aq_hw->mmio);
296err_free_aq_hw:
297 kfree(self->aq_hw);
298err_ioremap:
299 free_netdev(ndev);
300err_ndev:
301 pci_release_regions(pdev);
302err_pci_func:
303 pci_disable_device(pdev);
304 return err;
305}
306
307static void aq_pci_remove(struct pci_dev *pdev)
308{
309 struct aq_nic_s *self = pci_get_drvdata(pdev);
310
311 if (self->ndev) {
312 if (self->ndev->reg_state == NETREG_REGISTERED)
313 unregister_netdev(self->ndev);
314 aq_nic_free_vectors(self);
315 aq_pci_free_irq_vectors(self);
316 iounmap(self->aq_hw->mmio);
317 kfree(self->aq_hw);
318 pci_release_regions(pdev);
319 free_netdev(self->ndev);
320 }
321
322 pci_disable_device(pdev);
323}
324
325static void aq_pci_shutdown(struct pci_dev *pdev)
326{
327 struct aq_nic_s *self = pci_get_drvdata(pdev);
328
329 aq_nic_shutdown(self);
330
331 pci_disable_device(pdev);
332
333 if (system_state == SYSTEM_POWER_OFF) {
334 pci_wake_from_d3(pdev, false);
335 pci_set_power_state(pdev, PCI_D3hot);
336 }
337}
338
339static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
340{
341 struct aq_nic_s *self = pci_get_drvdata(pdev);
342
343 return aq_nic_change_pm_state(self, &pm_msg);
344}
345
346static int aq_pci_resume(struct pci_dev *pdev)
347{
348 struct aq_nic_s *self = pci_get_drvdata(pdev);
349 pm_message_t pm_msg = PMSG_RESTORE;
350
351 return aq_nic_change_pm_state(self, &pm_msg);
352}
353
354static struct pci_driver aq_pci_ops = {
355 .name = AQ_CFG_DRV_NAME,
356 .id_table = aq_pci_tbl,
357 .probe = aq_pci_probe,
358 .remove = aq_pci_remove,
359 .suspend = aq_pci_suspend,
360 .resume = aq_pci_resume,
361 .shutdown = aq_pci_shutdown,
362};
363
364module_pci_driver(aq_pci_ops);
365