1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/init.h>
44#include <linux/etherdevice.h>
45
46#include "nfp_net_ctrl.h"
47#include "nfp_net.h"
48#include "nfp_main.h"
49
50
51
52
53
54
55
56
57struct nfp_net_vf {
58 struct nfp_net *nn;
59
60 struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
61 NFP_NET_MAX_TX_RINGS];
62 u8 __iomem *q_bar;
63
64 struct dentry *ddir;
65};
66
67static const char nfp_net_driver_name[] = "nfp_netvf";
68
69#define PCI_DEVICE_NFP6000VF 0x6003
70static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
71 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
72 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
73 PCI_ANY_ID, 0,
74 },
75 { 0, }
76};
77MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids);
78
79static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
80{
81 u8 mac_addr[ETH_ALEN];
82
83 put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
84 put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
85
86 if (!is_valid_ether_addr(mac_addr)) {
87 eth_hw_addr_random(nn->dp.netdev);
88 return;
89 }
90
91 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
92 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
93}
94
95static int nfp_netvf_pci_probe(struct pci_dev *pdev,
96 const struct pci_device_id *pci_id)
97{
98 struct nfp_net_fw_version fw_ver;
99 int max_tx_rings, max_rx_rings;
100 u32 tx_bar_off, rx_bar_off;
101 u32 tx_bar_sz, rx_bar_sz;
102 int tx_bar_no, rx_bar_no;
103 struct nfp_net_vf *vf;
104 unsigned int num_irqs;
105 u8 __iomem *ctrl_bar;
106 struct nfp_net *nn;
107 u32 startq;
108 int stride;
109 int err;
110
111 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
112 if (!vf)
113 return -ENOMEM;
114 pci_set_drvdata(pdev, vf);
115
116 err = pci_enable_device_mem(pdev);
117 if (err)
118 goto err_free_vf;
119
120 err = pci_request_regions(pdev, nfp_net_driver_name);
121 if (err) {
122 dev_err(&pdev->dev, "Unable to allocate device memory.\n");
123 goto err_pci_disable;
124 }
125
126 pci_set_master(pdev);
127
128 err = dma_set_mask_and_coherent(&pdev->dev,
129 DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
130 if (err)
131 goto err_pci_regions;
132
133
134
135
136
137
138
139 ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
140 NFP_NET_CFG_BAR_SZ);
141 if (!ctrl_bar) {
142 dev_err(&pdev->dev,
143 "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
144 err = -EIO;
145 goto err_pci_regions;
146 }
147
148 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
149 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
150 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
151 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
152 err = -EINVAL;
153 goto err_ctrl_unmap;
154 }
155
156
157 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
158 stride = 2;
159 tx_bar_no = NFP_NET_Q0_BAR;
160 rx_bar_no = NFP_NET_Q1_BAR;
161 dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
162 } else {
163 switch (fw_ver.major) {
164 case 1 ... 5:
165 stride = 4;
166 tx_bar_no = NFP_NET_Q0_BAR;
167 rx_bar_no = tx_bar_no;
168 break;
169 default:
170 dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
171 fw_ver.resv, fw_ver.class,
172 fw_ver.major, fw_ver.minor);
173 err = -EINVAL;
174 goto err_ctrl_unmap;
175 }
176 }
177
178
179 max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
180 max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
181
182 tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride;
183 rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride;
184
185
186 if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) {
187 dev_err(&pdev->dev,
188 "TX BAR too small for number of TX rings. Adjusting\n");
189 tx_bar_sz = pci_resource_len(pdev, tx_bar_no);
190 max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
191 }
192 if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) {
193 dev_err(&pdev->dev,
194 "RX BAR too small for number of RX rings. Adjusting\n");
195 rx_bar_sz = pci_resource_len(pdev, rx_bar_no);
196 max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
197 }
198
199 startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
200 tx_bar_off = NFP_PCIE_QUEUE(startq);
201 startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
202 rx_bar_off = NFP_PCIE_QUEUE(startq);
203
204
205 nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
206 if (IS_ERR(nn)) {
207 err = PTR_ERR(nn);
208 goto err_ctrl_unmap;
209 }
210 vf->nn = nn;
211
212 nn->fw_ver = fw_ver;
213 nn->dp.ctrl_bar = ctrl_bar;
214 nn->dp.is_vf = 1;
215 nn->stride_tx = stride;
216 nn->stride_rx = stride;
217
218 if (rx_bar_no == tx_bar_no) {
219 u32 bar_off, bar_sz;
220 resource_size_t map_addr;
221
222
223 if (tx_bar_off < rx_bar_off)
224 bar_off = tx_bar_off;
225 else
226 bar_off = rx_bar_off;
227
228 if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz))
229 bar_sz = (tx_bar_off + tx_bar_sz) - bar_off;
230 else
231 bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
232
233 map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
234 vf->q_bar = ioremap_nocache(map_addr, bar_sz);
235 if (!vf->q_bar) {
236 nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
237 err = -EIO;
238 goto err_netdev_free;
239 }
240
241
242 nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
243
244 nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
245 } else {
246 resource_size_t map_addr;
247
248
249 map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
250 nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz);
251 if (!nn->tx_bar) {
252 nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
253 err = -EIO;
254 goto err_netdev_free;
255 }
256
257
258 map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
259 nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz);
260 if (!nn->rx_bar) {
261 nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
262 err = -EIO;
263 goto err_unmap_tx;
264 }
265 }
266
267 nfp_netvf_get_mac_addr(nn);
268
269 num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
270 NFP_NET_MIN_VNIC_IRQS,
271 NFP_NET_NON_Q_VECTORS +
272 nn->dp.num_r_vecs);
273 if (!num_irqs) {
274 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
275 err = -EIO;
276 goto err_unmap_rx;
277 }
278 nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
279
280
281
282
283
284 nn->me_freq_mhz = 1200;
285
286 err = nfp_net_init(nn);
287 if (err)
288 goto err_irqs_disable;
289
290 nfp_net_info(nn);
291 vf->ddir = nfp_net_debugfs_device_add(pdev);
292 nfp_net_debugfs_vnic_add(nn, vf->ddir, 0);
293
294 return 0;
295
296err_irqs_disable:
297 nfp_net_irqs_disable(pdev);
298err_unmap_rx:
299 if (!vf->q_bar)
300 iounmap(nn->rx_bar);
301err_unmap_tx:
302 if (!vf->q_bar)
303 iounmap(nn->tx_bar);
304 else
305 iounmap(vf->q_bar);
306err_netdev_free:
307 nfp_net_free(nn);
308err_ctrl_unmap:
309 iounmap(ctrl_bar);
310err_pci_regions:
311 pci_release_regions(pdev);
312err_pci_disable:
313 pci_disable_device(pdev);
314err_free_vf:
315 pci_set_drvdata(pdev, NULL);
316 kfree(vf);
317 return err;
318}
319
320static void nfp_netvf_pci_remove(struct pci_dev *pdev)
321{
322 struct nfp_net_vf *vf = pci_get_drvdata(pdev);
323 struct nfp_net *nn = vf->nn;
324
325
326
327
328 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
329 nfp_net_debugfs_dir_clean(&vf->ddir);
330
331 nfp_net_clean(nn);
332
333 nfp_net_irqs_disable(pdev);
334
335 if (!vf->q_bar) {
336 iounmap(nn->rx_bar);
337 iounmap(nn->tx_bar);
338 } else {
339 iounmap(vf->q_bar);
340 }
341 iounmap(nn->dp.ctrl_bar);
342
343 nfp_net_free(nn);
344
345 pci_release_regions(pdev);
346 pci_disable_device(pdev);
347
348 pci_set_drvdata(pdev, NULL);
349 kfree(vf);
350}
351
352struct pci_driver nfp_netvf_pci_driver = {
353 .name = nfp_net_driver_name,
354 .id_table = nfp_netvf_pci_device_ids,
355 .probe = nfp_netvf_pci_probe,
356 .remove = nfp_netvf_pci_remove,
357};
358