1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "fm10k.h"
22#include "fm10k_vf.h"
23#include "fm10k_pf.h"
24
25static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
26 struct fm10k_mbx_info *mbx)
27{
28 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
29 struct fm10k_intfc *interface = hw->back;
30 struct pci_dev *pdev = interface->pdev;
31
32 dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
33 **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
34
35 return fm10k_tlv_msg_error(hw, results, mbx);
36}
37
38static const struct fm10k_msg_data iov_mbx_data[] = {
39 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
40 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
41 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
42 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
43 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
44};
45
46s32 fm10k_iov_event(struct fm10k_intfc *interface)
47{
48 struct fm10k_hw *hw = &interface->hw;
49 struct fm10k_iov_data *iov_data;
50 s64 vflre;
51 int i;
52
53
54 if (!READ_ONCE(interface->iov_data))
55 return 0;
56
57 rcu_read_lock();
58
59 iov_data = interface->iov_data;
60
61
62 if (!iov_data)
63 goto read_unlock;
64
65 if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
66 goto read_unlock;
67
68
69 do {
70 vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
71 vflre <<= 32;
72 vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
73 vflre = (vflre << 32) | (vflre >> 32);
74 vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
75
76 i = iov_data->num_vfs;
77
78 for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
79 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
80
81 if (vflre >= 0)
82 continue;
83
84 hw->iov.ops.reset_resources(hw, vf_info);
85 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
86 }
87 } while (i != iov_data->num_vfs);
88
89read_unlock:
90 rcu_read_unlock();
91
92 return 0;
93}
94
95s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
96{
97 struct fm10k_hw *hw = &interface->hw;
98 struct fm10k_iov_data *iov_data;
99 int i;
100
101
102 if (!READ_ONCE(interface->iov_data))
103 return 0;
104
105 rcu_read_lock();
106
107 iov_data = interface->iov_data;
108
109
110 if (!iov_data)
111 goto read_unlock;
112
113
114 fm10k_mbx_lock(interface);
115
116
117
118
119
120
121
122
123process_mbx:
124 for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
125 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
126 struct fm10k_mbx_info *mbx = &vf_info->mbx;
127 u16 glort = vf_info->glort;
128
129
130 if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
131 hw->iov.ops.reset_lport(hw, vf_info);
132
133
134 if (!mbx->timeout) {
135 hw->iov.ops.reset_resources(hw, vf_info);
136 mbx->ops.connect(hw, mbx);
137 }
138
139
140 if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
141
142 interface->hw_sm_mbx_full++;
143 break;
144 }
145
146
147 mbx->ops.process(hw, mbx);
148 }
149
150
151
152
153
154 if (i >= 0) {
155 iov_data->next_vf_mbx = i + 1;
156 } else if (iov_data->next_vf_mbx) {
157 iov_data->next_vf_mbx = 0;
158 goto process_mbx;
159 }
160
161
162 fm10k_mbx_unlock(interface);
163
164read_unlock:
165 rcu_read_unlock();
166
167 return 0;
168}
169
170void fm10k_iov_suspend(struct pci_dev *pdev)
171{
172 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
173 struct fm10k_iov_data *iov_data = interface->iov_data;
174 struct fm10k_hw *hw = &interface->hw;
175 int num_vfs, i;
176
177
178 num_vfs = iov_data ? iov_data->num_vfs : 0;
179
180
181 fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
182 FM10K_DGLORTMAP_NONE);
183
184
185 for (i = 0; i < num_vfs; i++) {
186 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
187
188 hw->iov.ops.reset_resources(hw, vf_info);
189 hw->iov.ops.reset_lport(hw, vf_info);
190 }
191}
192
193int fm10k_iov_resume(struct pci_dev *pdev)
194{
195 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
196 struct fm10k_iov_data *iov_data = interface->iov_data;
197 struct fm10k_dglort_cfg dglort = { 0 };
198 struct fm10k_hw *hw = &interface->hw;
199 int num_vfs, i;
200
201
202 num_vfs = iov_data ? iov_data->num_vfs : 0;
203
204
205 if (!iov_data)
206 return -ENOMEM;
207
208
209 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
210
211
212 dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
213 dglort.idx = fm10k_dglort_vf_rss;
214 dglort.inner_rss = 1;
215 dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
216 dglort.queue_b = fm10k_vf_queue_index(hw, 0);
217 dglort.vsi_l = fls(hw->iov.total_vfs - 1);
218 dglort.vsi_b = 1;
219
220 hw->mac.ops.configure_dglort_map(hw, &dglort);
221
222
223 for (i = 0; i < num_vfs; i++) {
224 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
225
226
227 if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
228 break;
229
230
231 hw->iov.ops.set_lport(hw, vf_info, i,
232 FM10K_VF_FLAG_MULTI_CAPABLE);
233
234
235 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
236
237
238 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
239 }
240
241 return 0;
242}
243
244s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
245{
246 struct fm10k_iov_data *iov_data = interface->iov_data;
247 struct fm10k_hw *hw = &interface->hw;
248 struct fm10k_vf_info *vf_info;
249 u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
250
251
252 if (!iov_data)
253 return FM10K_ERR_PARAM;
254
255
256 if (vf_idx >= iov_data->num_vfs)
257 return FM10K_ERR_PARAM;
258
259
260 vf_info = &iov_data->vf_info[vf_idx];
261 if (vf_info->sw_vid != pvid) {
262 vf_info->sw_vid = pvid;
263 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
264 }
265
266 return 0;
267}
268
269static void fm10k_iov_free_data(struct pci_dev *pdev)
270{
271 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
272
273 if (!interface->iov_data)
274 return;
275
276
277 fm10k_iov_suspend(pdev);
278
279
280 kfree_rcu(interface->iov_data, rcu);
281 interface->iov_data = NULL;
282}
283
284static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
285{
286 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
287 struct fm10k_iov_data *iov_data = interface->iov_data;
288 struct fm10k_hw *hw = &interface->hw;
289 size_t size;
290 int i, err;
291
292
293 if (iov_data)
294 return -EBUSY;
295
296
297 if (!hw->iov.ops.assign_resources)
298 return -ENODEV;
299
300
301 if (!num_vfs)
302 return 0;
303
304
305 size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
306 iov_data = kzalloc(size, GFP_KERNEL);
307 if (!iov_data)
308 return -ENOMEM;
309
310
311 iov_data->num_vfs = num_vfs;
312
313
314 for (i = 0; i < num_vfs; i++) {
315 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
316
317
318 vf_info->vsi = i + 1;
319 vf_info->vf_idx = i;
320
321
322 err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
323 if (err) {
324 dev_err(&pdev->dev,
325 "Unable to initialize SR-IOV mailbox\n");
326 kfree(iov_data);
327 return err;
328 }
329 }
330
331
332 interface->iov_data = iov_data;
333
334
335 fm10k_iov_resume(pdev);
336
337 return 0;
338}
339
340void fm10k_iov_disable(struct pci_dev *pdev)
341{
342 if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
343 dev_err(&pdev->dev,
344 "Cannot disable SR-IOV while VFs are assigned\n");
345 else
346 pci_disable_sriov(pdev);
347
348 fm10k_iov_free_data(pdev);
349}
350
351static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
352{
353 u32 err_sev;
354 int pos;
355
356 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
357 if (!pos)
358 return;
359
360 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
361 err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
362 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
363}
364
365int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
366{
367 int current_vfs = pci_num_vf(pdev);
368 int err = 0;
369
370 if (current_vfs && pci_vfs_assigned(pdev)) {
371 dev_err(&pdev->dev,
372 "Cannot modify SR-IOV while VFs are assigned\n");
373 num_vfs = current_vfs;
374 } else {
375 pci_disable_sriov(pdev);
376 fm10k_iov_free_data(pdev);
377 }
378
379
380 err = fm10k_iov_alloc_data(pdev, num_vfs);
381 if (err)
382 return err;
383
384
385 if (num_vfs && (num_vfs != current_vfs)) {
386
387
388
389
390 fm10k_disable_aer_comp_abort(pdev);
391
392 err = pci_enable_sriov(pdev, num_vfs);
393 if (err) {
394 dev_err(&pdev->dev,
395 "Enable PCI SR-IOV failed: %d\n", err);
396 return err;
397 }
398 }
399
400 return num_vfs;
401}
402
403static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
404 struct fm10k_vf_info *vf_info)
405{
406 struct fm10k_hw *hw = &interface->hw;
407
408
409 fm10k_mbx_lock(interface);
410
411
412 hw->iov.ops.reset_lport(hw, vf_info);
413
414
415 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
416
417
418 hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
419 FM10K_VF_FLAG_MULTI_CAPABLE);
420
421 fm10k_mbx_unlock(interface);
422}
423
424int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
425{
426 struct fm10k_intfc *interface = netdev_priv(netdev);
427 struct fm10k_iov_data *iov_data = interface->iov_data;
428 struct fm10k_vf_info *vf_info;
429
430
431 if (!iov_data || vf_idx >= iov_data->num_vfs)
432 return -EINVAL;
433
434
435 if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
436 return -EINVAL;
437
438
439 vf_info = &iov_data->vf_info[vf_idx];
440 ether_addr_copy(vf_info->mac, mac);
441
442 fm10k_reset_vf_info(interface, vf_info);
443
444 return 0;
445}
446
447int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
448 u8 qos, __be16 vlan_proto)
449{
450 struct fm10k_intfc *interface = netdev_priv(netdev);
451 struct fm10k_iov_data *iov_data = interface->iov_data;
452 struct fm10k_hw *hw = &interface->hw;
453 struct fm10k_vf_info *vf_info;
454
455
456 if (!iov_data || vf_idx >= iov_data->num_vfs)
457 return -EINVAL;
458
459
460 if (qos || (vid > (VLAN_VID_MASK - 1)))
461 return -EINVAL;
462
463
464 if (vlan_proto != htons(ETH_P_8021Q))
465 return -EPROTONOSUPPORT;
466
467 vf_info = &iov_data->vf_info[vf_idx];
468
469
470 if (vf_info->pf_vid == vid)
471 return 0;
472
473
474 vf_info->pf_vid = vid;
475
476
477 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
478
479 fm10k_reset_vf_info(interface, vf_info);
480
481 return 0;
482}
483
484int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
485 int __always_unused unused, int rate)
486{
487 struct fm10k_intfc *interface = netdev_priv(netdev);
488 struct fm10k_iov_data *iov_data = interface->iov_data;
489 struct fm10k_hw *hw = &interface->hw;
490
491
492 if (!iov_data || vf_idx >= iov_data->num_vfs)
493 return -EINVAL;
494
495
496 if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
497 return -EINVAL;
498
499
500 iov_data->vf_info[vf_idx].rate = rate;
501
502
503 hw->iov.ops.configure_tc(hw, vf_idx, rate);
504
505 return 0;
506}
507
508int fm10k_ndo_get_vf_config(struct net_device *netdev,
509 int vf_idx, struct ifla_vf_info *ivi)
510{
511 struct fm10k_intfc *interface = netdev_priv(netdev);
512 struct fm10k_iov_data *iov_data = interface->iov_data;
513 struct fm10k_vf_info *vf_info;
514
515
516 if (!iov_data || vf_idx >= iov_data->num_vfs)
517 return -EINVAL;
518
519 vf_info = &iov_data->vf_info[vf_idx];
520
521 ivi->vf = vf_idx;
522 ivi->max_tx_rate = vf_info->rate;
523 ivi->min_tx_rate = 0;
524 ether_addr_copy(ivi->mac, vf_info->mac);
525 ivi->vlan = vf_info->pf_vid;
526 ivi->qos = 0;
527
528 return 0;
529}
530