1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/netdevice.h>
13#include <linux/if_vlan.h>
14#include <linux/interrupt.h>
15#include <linux/etherdevice.h>
16#include "bnxt_hsi.h"
17#include "bnxt.h"
18#include "bnxt_sriov.h"
19#include "bnxt_ethtool.h"
20
21#ifdef CONFIG_BNXT_SRIOV
22static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
23 struct bnxt_vf_info *vf, u16 event_id)
24{
25 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
26 struct hwrm_fwd_async_event_cmpl_input req = {0};
27 struct hwrm_async_event_cmpl *async_cmpl;
28 int rc = 0;
29
30 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
31 if (vf)
32 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
33 else
34
35 req.encap_async_event_target_id = cpu_to_le16(0xffff);
36 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
37 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
38 async_cmpl->event_id = cpu_to_le16(event_id);
39
40 mutex_lock(&bp->hwrm_cmd_lock);
41 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
42
43 if (rc) {
44 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
45 rc);
46 goto fwd_async_event_cmpl_exit;
47 }
48
49 if (resp->error_code) {
50 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
51 resp->error_code);
52 rc = -1;
53 }
54
55fwd_async_event_cmpl_exit:
56 mutex_unlock(&bp->hwrm_cmd_lock);
57 return rc;
58}
59
60static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
61{
62 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
63 netdev_err(bp->dev, "vf ndo called though PF is down\n");
64 return -EINVAL;
65 }
66 if (!bp->pf.active_vfs) {
67 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
68 return -EINVAL;
69 }
70 if (vf_id >= bp->pf.max_vfs) {
71 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
72 return -EINVAL;
73 }
74 return 0;
75}
76
77int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
78{
79 struct hwrm_func_cfg_input req = {0};
80 struct bnxt *bp = netdev_priv(dev);
81 struct bnxt_vf_info *vf;
82 bool old_setting = false;
83 u32 func_flags;
84 int rc;
85
86 rc = bnxt_vf_ndo_prep(bp, vf_id);
87 if (rc)
88 return rc;
89
90 vf = &bp->pf.vf[vf_id];
91 if (vf->flags & BNXT_VF_SPOOFCHK)
92 old_setting = true;
93 if (old_setting == setting)
94 return 0;
95
96 func_flags = vf->func_flags;
97 if (setting)
98 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
99 else
100 func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
101
102
103
104 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
105 req.fid = cpu_to_le16(vf->fw_fid);
106 req.flags = cpu_to_le32(func_flags);
107 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
108 if (!rc) {
109 vf->func_flags = func_flags;
110 if (setting)
111 vf->flags |= BNXT_VF_SPOOFCHK;
112 else
113 vf->flags &= ~BNXT_VF_SPOOFCHK;
114 }
115 return rc;
116}
117
118int bnxt_get_vf_config(struct net_device *dev, int vf_id,
119 struct ifla_vf_info *ivi)
120{
121 struct bnxt *bp = netdev_priv(dev);
122 struct bnxt_vf_info *vf;
123 int rc;
124
125 rc = bnxt_vf_ndo_prep(bp, vf_id);
126 if (rc)
127 return rc;
128
129 ivi->vf = vf_id;
130 vf = &bp->pf.vf[vf_id];
131
132 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
133 ivi->max_tx_rate = vf->max_tx_rate;
134 ivi->min_tx_rate = vf->min_tx_rate;
135 ivi->vlan = vf->vlan;
136 ivi->qos = vf->flags & BNXT_VF_QOS;
137 ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
138 if (!(vf->flags & BNXT_VF_LINK_FORCED))
139 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
140 else if (vf->flags & BNXT_VF_LINK_UP)
141 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
142 else
143 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
144
145 return 0;
146}
147
148int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
149{
150 struct hwrm_func_cfg_input req = {0};
151 struct bnxt *bp = netdev_priv(dev);
152 struct bnxt_vf_info *vf;
153 int rc;
154
155 rc = bnxt_vf_ndo_prep(bp, vf_id);
156 if (rc)
157 return rc;
158
159
160
161 if (is_multicast_ether_addr(mac)) {
162 netdev_err(dev, "Invalid VF ethernet address\n");
163 return -EINVAL;
164 }
165 vf = &bp->pf.vf[vf_id];
166
167 memcpy(vf->mac_addr, mac, ETH_ALEN);
168 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
169 req.fid = cpu_to_le16(vf->fw_fid);
170 req.flags = cpu_to_le32(vf->func_flags);
171 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
172 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
173 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
174}
175
176int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
177 __be16 vlan_proto)
178{
179 struct hwrm_func_cfg_input req = {0};
180 struct bnxt *bp = netdev_priv(dev);
181 struct bnxt_vf_info *vf;
182 u16 vlan_tag;
183 int rc;
184
185 if (bp->hwrm_spec_code < 0x10201)
186 return -ENOTSUPP;
187
188 if (vlan_proto != htons(ETH_P_8021Q))
189 return -EPROTONOSUPPORT;
190
191 rc = bnxt_vf_ndo_prep(bp, vf_id);
192 if (rc)
193 return rc;
194
195
196
197
198 if (vlan_id > 4095 || qos)
199 return -EINVAL;
200
201 vf = &bp->pf.vf[vf_id];
202 vlan_tag = vlan_id;
203 if (vlan_tag == vf->vlan)
204 return 0;
205
206 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
207 req.fid = cpu_to_le16(vf->fw_fid);
208 req.flags = cpu_to_le32(vf->func_flags);
209 req.dflt_vlan = cpu_to_le16(vlan_tag);
210 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
211 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
212 if (!rc)
213 vf->vlan = vlan_tag;
214 return rc;
215}
216
217int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
218 int max_tx_rate)
219{
220 struct hwrm_func_cfg_input req = {0};
221 struct bnxt *bp = netdev_priv(dev);
222 struct bnxt_vf_info *vf;
223 u32 pf_link_speed;
224 int rc;
225
226 rc = bnxt_vf_ndo_prep(bp, vf_id);
227 if (rc)
228 return rc;
229
230 vf = &bp->pf.vf[vf_id];
231 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
232 if (max_tx_rate > pf_link_speed) {
233 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
234 max_tx_rate, vf_id);
235 return -EINVAL;
236 }
237
238 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
239 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
240 min_tx_rate, vf_id);
241 return -EINVAL;
242 }
243 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
244 return 0;
245 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
246 req.fid = cpu_to_le16(vf->fw_fid);
247 req.flags = cpu_to_le32(vf->func_flags);
248 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
249 req.max_bw = cpu_to_le32(max_tx_rate);
250 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
251 req.min_bw = cpu_to_le32(min_tx_rate);
252 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
253 if (!rc) {
254 vf->min_tx_rate = min_tx_rate;
255 vf->max_tx_rate = max_tx_rate;
256 }
257 return rc;
258}
259
260int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
261{
262 struct bnxt *bp = netdev_priv(dev);
263 struct bnxt_vf_info *vf;
264 int rc;
265
266 rc = bnxt_vf_ndo_prep(bp, vf_id);
267 if (rc)
268 return rc;
269
270 vf = &bp->pf.vf[vf_id];
271
272 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
273 switch (link) {
274 case IFLA_VF_LINK_STATE_AUTO:
275 vf->flags |= BNXT_VF_LINK_UP;
276 break;
277 case IFLA_VF_LINK_STATE_DISABLE:
278 vf->flags |= BNXT_VF_LINK_FORCED;
279 break;
280 case IFLA_VF_LINK_STATE_ENABLE:
281 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
282 break;
283 default:
284 netdev_err(bp->dev, "Invalid link option\n");
285 rc = -EINVAL;
286 break;
287 }
288 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
289 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
290 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
291 return rc;
292}
293
294static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
295{
296 int i;
297 struct bnxt_vf_info *vf;
298
299 for (i = 0; i < num_vfs; i++) {
300 vf = &bp->pf.vf[i];
301 memset(vf, 0, sizeof(*vf));
302 vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
303 }
304 return 0;
305}
306
307static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
308{
309 int i, rc = 0;
310 struct bnxt_pf_info *pf = &bp->pf;
311 struct hwrm_func_vf_resc_free_input req = {0};
312
313 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
314
315 mutex_lock(&bp->hwrm_cmd_lock);
316 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
317 req.vf_id = cpu_to_le16(i);
318 rc = _hwrm_send_message(bp, &req, sizeof(req),
319 HWRM_CMD_TIMEOUT);
320 if (rc)
321 break;
322 }
323 mutex_unlock(&bp->hwrm_cmd_lock);
324 return rc;
325}
326
327static void bnxt_free_vf_resources(struct bnxt *bp)
328{
329 struct pci_dev *pdev = bp->pdev;
330 int i;
331
332 kfree(bp->pf.vf_event_bmap);
333 bp->pf.vf_event_bmap = NULL;
334
335 for (i = 0; i < 4; i++) {
336 if (bp->pf.hwrm_cmd_req_addr[i]) {
337 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
338 bp->pf.hwrm_cmd_req_addr[i],
339 bp->pf.hwrm_cmd_req_dma_addr[i]);
340 bp->pf.hwrm_cmd_req_addr[i] = NULL;
341 }
342 }
343
344 kfree(bp->pf.vf);
345 bp->pf.vf = NULL;
346}
347
348static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
349{
350 struct pci_dev *pdev = bp->pdev;
351 u32 nr_pages, size, i, j, k = 0;
352
353 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
354 if (!bp->pf.vf)
355 return -ENOMEM;
356
357 bnxt_set_vf_attr(bp, num_vfs);
358
359 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
360 nr_pages = size / BNXT_PAGE_SIZE;
361 if (size & (BNXT_PAGE_SIZE - 1))
362 nr_pages++;
363
364 for (i = 0; i < nr_pages; i++) {
365 bp->pf.hwrm_cmd_req_addr[i] =
366 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
367 &bp->pf.hwrm_cmd_req_dma_addr[i],
368 GFP_KERNEL);
369
370 if (!bp->pf.hwrm_cmd_req_addr[i])
371 return -ENOMEM;
372
373 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
374 struct bnxt_vf_info *vf = &bp->pf.vf[k];
375
376 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
377 j * BNXT_HWRM_REQ_MAX_SIZE;
378 vf->hwrm_cmd_req_dma_addr =
379 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
380 BNXT_HWRM_REQ_MAX_SIZE;
381 k++;
382 }
383 }
384
385
386 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
387 if (!bp->pf.vf_event_bmap)
388 return -ENOMEM;
389
390 bp->pf.hwrm_cmd_req_pages = nr_pages;
391 return 0;
392}
393
394static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
395{
396 struct hwrm_func_buf_rgtr_input req = {0};
397
398 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
399
400 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
401 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
402 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
403 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
404 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
405 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
406 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
407
408 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
409}
410
411
412static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
413{
414 u32 rc = 0, mtu, i;
415 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
416 u16 vf_ring_grps;
417 struct hwrm_func_cfg_input req = {0};
418 struct bnxt_pf_info *pf = &bp->pf;
419
420 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
421
422
423 vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
424 vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
425 if (bp->flags & BNXT_FLAG_AGG_RINGS)
426 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
427 num_vfs;
428 else
429 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
430 vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
431 vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
432
433 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
434 FUNC_CFG_REQ_ENABLES_MRU |
435 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
436 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
437 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
438 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
439 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
440 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
441 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
442 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
443
444 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
445 req.mru = cpu_to_le16(mtu);
446 req.mtu = cpu_to_le16(mtu);
447
448 req.num_rsscos_ctxs = cpu_to_le16(1);
449 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
450 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
451 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
452 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
453 req.num_l2_ctxs = cpu_to_le16(4);
454 vf_vnics = 1;
455
456 req.num_vnics = cpu_to_le16(vf_vnics);
457
458 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
459
460 mutex_lock(&bp->hwrm_cmd_lock);
461 for (i = 0; i < num_vfs; i++) {
462 req.fid = cpu_to_le16(pf->first_vf_id + i);
463 rc = _hwrm_send_message(bp, &req, sizeof(req),
464 HWRM_CMD_TIMEOUT);
465 if (rc)
466 break;
467 pf->active_vfs = i + 1;
468 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
469 }
470 mutex_unlock(&bp->hwrm_cmd_lock);
471 if (!rc) {
472 pf->max_tx_rings -= vf_tx_rings * num_vfs;
473 pf->max_rx_rings -= vf_rx_rings * num_vfs;
474 pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
475 pf->max_cp_rings -= vf_cp_rings * num_vfs;
476 pf->max_rsscos_ctxs -= num_vfs;
477 pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
478 pf->max_vnics -= vf_vnics * num_vfs;
479 }
480 return rc;
481}
482
483static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
484{
485 int rc = 0, vfs_supported;
486 int min_rx_rings, min_tx_rings, min_rss_ctxs;
487 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
488
489
490
491
492
493 vfs_supported = *num_vfs;
494
495 while (vfs_supported) {
496 min_rx_rings = vfs_supported;
497 min_tx_rings = vfs_supported;
498 min_rss_ctxs = vfs_supported;
499
500 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
501 if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
502 min_rx_rings)
503 rx_ok = 1;
504 } else {
505 if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
506 min_rx_rings)
507 rx_ok = 1;
508 }
509
510 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
511 tx_ok = 1;
512
513 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
514 rss_ok = 1;
515
516 if (tx_ok && rx_ok && rss_ok)
517 break;
518
519 vfs_supported--;
520 }
521
522 if (!vfs_supported) {
523 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
524 return -EINVAL;
525 }
526
527 if (vfs_supported != *num_vfs) {
528 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
529 *num_vfs, vfs_supported);
530 *num_vfs = vfs_supported;
531 }
532
533 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
534 if (rc)
535 goto err_out1;
536
537
538 rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
539 if (rc)
540 goto err_out2;
541
542
543 rc = bnxt_hwrm_func_buf_rgtr(bp);
544 if (rc)
545 goto err_out2;
546
547 rc = pci_enable_sriov(bp->pdev, *num_vfs);
548 if (rc)
549 goto err_out2;
550
551 return 0;
552
553err_out2:
554
555 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
556
557err_out1:
558 bnxt_free_vf_resources(bp);
559
560 return rc;
561}
562
563void bnxt_sriov_disable(struct bnxt *bp)
564{
565 u16 num_vfs = pci_num_vf(bp->pdev);
566
567 if (!num_vfs)
568 return;
569
570 if (pci_vfs_assigned(bp->pdev)) {
571 bnxt_hwrm_fwd_async_event_cmpl(
572 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
573 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
574 num_vfs);
575 } else {
576 pci_disable_sriov(bp->pdev);
577
578 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
579 }
580
581 bnxt_free_vf_resources(bp);
582
583 bp->pf.active_vfs = 0;
584
585 rtnl_lock();
586 bnxt_restore_pf_fw_resources(bp);
587 rtnl_unlock();
588}
589
590int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
591{
592 struct net_device *dev = pci_get_drvdata(pdev);
593 struct bnxt *bp = netdev_priv(dev);
594
595 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
596 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
597 return 0;
598 }
599
600 rtnl_lock();
601 if (!netif_running(dev)) {
602 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
603 rtnl_unlock();
604 return 0;
605 }
606 bp->sriov_cfg = true;
607 rtnl_unlock();
608
609 if (pci_vfs_assigned(bp->pdev)) {
610 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
611 num_vfs = 0;
612 goto sriov_cfg_exit;
613 }
614
615
616 if (num_vfs && num_vfs == bp->pf.active_vfs)
617 goto sriov_cfg_exit;
618
619
620 bnxt_sriov_disable(bp);
621 if (!num_vfs)
622 goto sriov_cfg_exit;
623
624 bnxt_sriov_enable(bp, &num_vfs);
625
626sriov_cfg_exit:
627 bp->sriov_cfg = false;
628 wake_up(&bp->sriov_cfg_wait);
629
630 return num_vfs;
631}
632
633static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
634 void *encap_resp, __le64 encap_resp_addr,
635 __le16 encap_resp_cpr, u32 msg_size)
636{
637 int rc = 0;
638 struct hwrm_fwd_resp_input req = {0};
639 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
640
641 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
642
643
644 req.target_id = cpu_to_le16(vf->fw_fid);
645 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
646 req.encap_resp_len = cpu_to_le16(msg_size);
647 req.encap_resp_addr = encap_resp_addr;
648 req.encap_resp_cmpl_ring = encap_resp_cpr;
649 memcpy(req.encap_resp, encap_resp, msg_size);
650
651 mutex_lock(&bp->hwrm_cmd_lock);
652 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
653
654 if (rc) {
655 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
656 goto fwd_resp_exit;
657 }
658
659 if (resp->error_code) {
660 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
661 resp->error_code);
662 rc = -1;
663 }
664
665fwd_resp_exit:
666 mutex_unlock(&bp->hwrm_cmd_lock);
667 return rc;
668}
669
670static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
671 u32 msg_size)
672{
673 int rc = 0;
674 struct hwrm_reject_fwd_resp_input req = {0};
675 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
676
677 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
678
679 req.target_id = cpu_to_le16(vf->fw_fid);
680 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
681 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
682
683 mutex_lock(&bp->hwrm_cmd_lock);
684 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
685
686 if (rc) {
687 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
688 goto fwd_err_resp_exit;
689 }
690
691 if (resp->error_code) {
692 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
693 resp->error_code);
694 rc = -1;
695 }
696
697fwd_err_resp_exit:
698 mutex_unlock(&bp->hwrm_cmd_lock);
699 return rc;
700}
701
702static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
703 u32 msg_size)
704{
705 int rc = 0;
706 struct hwrm_exec_fwd_resp_input req = {0};
707 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
708
709 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
710
711 req.target_id = cpu_to_le16(vf->fw_fid);
712 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
713 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
714
715 mutex_lock(&bp->hwrm_cmd_lock);
716 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
717
718 if (rc) {
719 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
720 goto exec_fwd_resp_exit;
721 }
722
723 if (resp->error_code) {
724 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
725 resp->error_code);
726 rc = -1;
727 }
728
729exec_fwd_resp_exit:
730 mutex_unlock(&bp->hwrm_cmd_lock);
731 return rc;
732}
733
734static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
735{
736 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
737 struct hwrm_cfa_l2_filter_alloc_input *req =
738 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
739
740 if (!is_valid_ether_addr(vf->mac_addr) ||
741 ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
742 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
743 else
744 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
745}
746
747static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
748{
749 int rc = 0;
750
751 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
752
753 rc = bnxt_hwrm_exec_fwd_resp(
754 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
755 } else {
756 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
757 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
758
759 phy_qcfg_req =
760 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
761 mutex_lock(&bp->hwrm_cmd_lock);
762 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
763 sizeof(phy_qcfg_resp));
764 mutex_unlock(&bp->hwrm_cmd_lock);
765 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
766
767 if (vf->flags & BNXT_VF_LINK_UP) {
768
769 if (phy_qcfg_resp.link !=
770 PORT_PHY_QCFG_RESP_LINK_LINK) {
771 phy_qcfg_resp.link =
772 PORT_PHY_QCFG_RESP_LINK_LINK;
773 phy_qcfg_resp.link_speed = cpu_to_le16(
774 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
775 phy_qcfg_resp.duplex =
776 PORT_PHY_QCFG_RESP_DUPLEX_FULL;
777 phy_qcfg_resp.pause =
778 (PORT_PHY_QCFG_RESP_PAUSE_TX |
779 PORT_PHY_QCFG_RESP_PAUSE_RX);
780 }
781 } else {
782
783 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
784 phy_qcfg_resp.link_speed = 0;
785 phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
786 phy_qcfg_resp.pause = 0;
787 }
788 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
789 phy_qcfg_req->resp_addr,
790 phy_qcfg_req->cmpl_ring,
791 sizeof(phy_qcfg_resp));
792 }
793 return rc;
794}
795
796static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
797{
798 int rc = 0;
799 struct input *encap_req = vf->hwrm_cmd_req_addr;
800 u32 req_type = le16_to_cpu(encap_req->req_type);
801
802 switch (req_type) {
803 case HWRM_CFA_L2_FILTER_ALLOC:
804 rc = bnxt_vf_validate_set_mac(bp, vf);
805 break;
806 case HWRM_FUNC_CFG:
807
808
809
810 rc = bnxt_hwrm_exec_fwd_resp(
811 bp, vf, sizeof(struct hwrm_func_cfg_input));
812 break;
813 case HWRM_PORT_PHY_QCFG:
814 rc = bnxt_vf_set_link(bp, vf);
815 break;
816 default:
817 break;
818 }
819 return rc;
820}
821
822void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
823{
824 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
825
826
827 while (1) {
828 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
829 if (vf_id >= active_vfs)
830 break;
831
832 clear_bit(vf_id, bp->pf.vf_event_bmap);
833 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
834 i = vf_id + 1;
835 }
836}
837
838void bnxt_update_vf_mac(struct bnxt *bp)
839{
840 struct hwrm_func_qcaps_input req = {0};
841 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
842
843 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
844 req.fid = cpu_to_le16(0xffff);
845
846 mutex_lock(&bp->hwrm_cmd_lock);
847 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
848 goto update_vf_mac_exit;
849
850
851
852
853
854
855
856
857 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
858 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
859
860
861 if (is_valid_ether_addr(bp->vf.mac_addr))
862 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
863update_vf_mac_exit:
864 mutex_unlock(&bp->hwrm_cmd_lock);
865}
866
867int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
868{
869 struct hwrm_func_vf_cfg_input req = {0};
870 int rc = 0;
871
872 if (!BNXT_VF(bp))
873 return 0;
874
875 if (bp->hwrm_spec_code < 0x10202) {
876 if (is_valid_ether_addr(bp->vf.mac_addr))
877 rc = -EADDRNOTAVAIL;
878 goto mac_done;
879 }
880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
881 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
882 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
883 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
884mac_done:
885 if (rc) {
886 rc = -EADDRNOTAVAIL;
887 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
888 mac);
889 }
890 return rc;
891}
892#else
893
894void bnxt_sriov_disable(struct bnxt *bp)
895{
896}
897
898void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
899{
900 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
901}
902
903void bnxt_update_vf_mac(struct bnxt *bp)
904{
905}
906
907int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
908{
909 return 0;
910}
911#endif
912