1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/if_vlan.h>
15#include <linux/interrupt.h>
16#include <linux/etherdevice.h>
17#include "bnxt_hsi.h"
18#include "bnxt.h"
19#include "bnxt_ulp.h"
20#include "bnxt_sriov.h"
21#include "bnxt_vfr.h"
22#include "bnxt_ethtool.h"
23
24#ifdef CONFIG_BNXT_SRIOV
25static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
27{
28 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
29 struct hwrm_fwd_async_event_cmpl_input req = {0};
30 struct hwrm_async_event_cmpl *async_cmpl;
31 int rc = 0;
32
33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
34 if (vf)
35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
36 else
37
38 req.encap_async_event_target_id = cpu_to_le16(0xffff);
39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
41 async_cmpl->event_id = cpu_to_le16(event_id);
42
43 mutex_lock(&bp->hwrm_cmd_lock);
44 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
45
46 if (rc) {
47 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
48 rc);
49 goto fwd_async_event_cmpl_exit;
50 }
51
52 if (resp->error_code) {
53 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
54 resp->error_code);
55 rc = -1;
56 }
57
58fwd_async_event_cmpl_exit:
59 mutex_unlock(&bp->hwrm_cmd_lock);
60 return rc;
61}
62
63static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
64{
65 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
66 netdev_err(bp->dev, "vf ndo called though PF is down\n");
67 return -EINVAL;
68 }
69 if (!bp->pf.active_vfs) {
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL;
72 }
73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL;
76 }
77 return 0;
78}
79
80int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
81{
82 struct hwrm_func_cfg_input req = {0};
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
85 bool old_setting = false;
86 u32 func_flags;
87 int rc;
88
89 if (bp->hwrm_spec_code < 0x10701)
90 return -ENOTSUPP;
91
92 rc = bnxt_vf_ndo_prep(bp, vf_id);
93 if (rc)
94 return rc;
95
96 vf = &bp->pf.vf[vf_id];
97 if (vf->flags & BNXT_VF_SPOOFCHK)
98 old_setting = true;
99 if (old_setting == setting)
100 return 0;
101
102 func_flags = vf->func_flags;
103 if (setting)
104 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
105 else
106 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
107
108
109
110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
111 req.fid = cpu_to_le16(vf->fw_fid);
112 req.flags = cpu_to_le32(func_flags);
113 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
114 if (!rc) {
115 vf->func_flags = func_flags;
116 if (setting)
117 vf->flags |= BNXT_VF_SPOOFCHK;
118 else
119 vf->flags &= ~BNXT_VF_SPOOFCHK;
120 }
121 return rc;
122}
123
124static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
125{
126 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
127 struct hwrm_func_qcfg_input req = {0};
128 int rc;
129
130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
131 req.fid = cpu_to_le16(vf->fw_fid);
132 mutex_lock(&bp->hwrm_cmd_lock);
133 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
134 if (rc) {
135 mutex_unlock(&bp->hwrm_cmd_lock);
136 return -EIO;
137 }
138 vf->func_qcfg_flags = le16_to_cpu(resp->flags);
139 mutex_unlock(&bp->hwrm_cmd_lock);
140 return 0;
141}
142
143static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
144{
145 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
146 return !!(vf->flags & BNXT_VF_TRUST);
147
148 bnxt_hwrm_func_qcfg_flags(bp, vf);
149 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
150}
151
152static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
153{
154 struct hwrm_func_cfg_input req = {0};
155 int rc;
156
157 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
158 return 0;
159
160 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
161 req.fid = cpu_to_le16(vf->fw_fid);
162 if (vf->flags & BNXT_VF_TRUST)
163 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
164 else
165 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
166 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
167 if (rc)
168 return -EIO;
169 return 0;
170}
171
172int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
173{
174 struct bnxt *bp = netdev_priv(dev);
175 struct bnxt_vf_info *vf;
176
177 if (bnxt_vf_ndo_prep(bp, vf_id))
178 return -EINVAL;
179
180 vf = &bp->pf.vf[vf_id];
181 if (trusted)
182 vf->flags |= BNXT_VF_TRUST;
183 else
184 vf->flags &= ~BNXT_VF_TRUST;
185
186 bnxt_hwrm_set_trusted_vf(bp, vf);
187 return 0;
188}
189
190int bnxt_get_vf_config(struct net_device *dev, int vf_id,
191 struct ifla_vf_info *ivi)
192{
193 struct bnxt *bp = netdev_priv(dev);
194 struct bnxt_vf_info *vf;
195 int rc;
196
197 rc = bnxt_vf_ndo_prep(bp, vf_id);
198 if (rc)
199 return rc;
200
201 ivi->vf = vf_id;
202 vf = &bp->pf.vf[vf_id];
203
204 if (is_valid_ether_addr(vf->mac_addr))
205 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
206 else
207 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
208 ivi->max_tx_rate = vf->max_tx_rate;
209 ivi->min_tx_rate = vf->min_tx_rate;
210 ivi->vlan = vf->vlan;
211 if (vf->flags & BNXT_VF_QOS)
212 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
213 else
214 ivi->qos = 0;
215 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
216 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
217 if (!(vf->flags & BNXT_VF_LINK_FORCED))
218 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
219 else if (vf->flags & BNXT_VF_LINK_UP)
220 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
221 else
222 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
223
224 return 0;
225}
226
227int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
228{
229 struct hwrm_func_cfg_input req = {0};
230 struct bnxt *bp = netdev_priv(dev);
231 struct bnxt_vf_info *vf;
232 int rc;
233
234 rc = bnxt_vf_ndo_prep(bp, vf_id);
235 if (rc)
236 return rc;
237
238
239
240 if (is_multicast_ether_addr(mac)) {
241 netdev_err(dev, "Invalid VF ethernet address\n");
242 return -EINVAL;
243 }
244 vf = &bp->pf.vf[vf_id];
245
246 memcpy(vf->mac_addr, mac, ETH_ALEN);
247 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
248 req.fid = cpu_to_le16(vf->fw_fid);
249 req.flags = cpu_to_le32(vf->func_flags);
250 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
251 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
252 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
253}
254
255int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
256 __be16 vlan_proto)
257{
258 struct hwrm_func_cfg_input req = {0};
259 struct bnxt *bp = netdev_priv(dev);
260 struct bnxt_vf_info *vf;
261 u16 vlan_tag;
262 int rc;
263
264 if (bp->hwrm_spec_code < 0x10201)
265 return -ENOTSUPP;
266
267 if (vlan_proto != htons(ETH_P_8021Q))
268 return -EPROTONOSUPPORT;
269
270 rc = bnxt_vf_ndo_prep(bp, vf_id);
271 if (rc)
272 return rc;
273
274
275
276
277 if (vlan_id > 4095 || qos)
278 return -EINVAL;
279
280 vf = &bp->pf.vf[vf_id];
281 vlan_tag = vlan_id;
282 if (vlan_tag == vf->vlan)
283 return 0;
284
285 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
286 req.fid = cpu_to_le16(vf->fw_fid);
287 req.flags = cpu_to_le32(vf->func_flags);
288 req.dflt_vlan = cpu_to_le16(vlan_tag);
289 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
290 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
291 if (!rc)
292 vf->vlan = vlan_tag;
293 return rc;
294}
295
296int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
297 int max_tx_rate)
298{
299 struct hwrm_func_cfg_input req = {0};
300 struct bnxt *bp = netdev_priv(dev);
301 struct bnxt_vf_info *vf;
302 u32 pf_link_speed;
303 int rc;
304
305 rc = bnxt_vf_ndo_prep(bp, vf_id);
306 if (rc)
307 return rc;
308
309 vf = &bp->pf.vf[vf_id];
310 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
311 if (max_tx_rate > pf_link_speed) {
312 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
313 max_tx_rate, vf_id);
314 return -EINVAL;
315 }
316
317 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
318 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
319 min_tx_rate, vf_id);
320 return -EINVAL;
321 }
322 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
323 return 0;
324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
325 req.fid = cpu_to_le16(vf->fw_fid);
326 req.flags = cpu_to_le32(vf->func_flags);
327 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
328 req.max_bw = cpu_to_le32(max_tx_rate);
329 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
330 req.min_bw = cpu_to_le32(min_tx_rate);
331 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
332 if (!rc) {
333 vf->min_tx_rate = min_tx_rate;
334 vf->max_tx_rate = max_tx_rate;
335 }
336 return rc;
337}
338
339int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
340{
341 struct bnxt *bp = netdev_priv(dev);
342 struct bnxt_vf_info *vf;
343 int rc;
344
345 rc = bnxt_vf_ndo_prep(bp, vf_id);
346 if (rc)
347 return rc;
348
349 vf = &bp->pf.vf[vf_id];
350
351 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
352 switch (link) {
353 case IFLA_VF_LINK_STATE_AUTO:
354 vf->flags |= BNXT_VF_LINK_UP;
355 break;
356 case IFLA_VF_LINK_STATE_DISABLE:
357 vf->flags |= BNXT_VF_LINK_FORCED;
358 break;
359 case IFLA_VF_LINK_STATE_ENABLE:
360 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
361 break;
362 default:
363 netdev_err(bp->dev, "Invalid link option\n");
364 rc = -EINVAL;
365 break;
366 }
367 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
368 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
369 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
370 return rc;
371}
372
373static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
374{
375 int i;
376 struct bnxt_vf_info *vf;
377
378 for (i = 0; i < num_vfs; i++) {
379 vf = &bp->pf.vf[i];
380 memset(vf, 0, sizeof(*vf));
381 }
382 return 0;
383}
384
385static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
386{
387 int i, rc = 0;
388 struct bnxt_pf_info *pf = &bp->pf;
389 struct hwrm_func_vf_resc_free_input req = {0};
390
391 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
392
393 mutex_lock(&bp->hwrm_cmd_lock);
394 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
395 req.vf_id = cpu_to_le16(i);
396 rc = _hwrm_send_message(bp, &req, sizeof(req),
397 HWRM_CMD_TIMEOUT);
398 if (rc)
399 break;
400 }
401 mutex_unlock(&bp->hwrm_cmd_lock);
402 return rc;
403}
404
405static void bnxt_free_vf_resources(struct bnxt *bp)
406{
407 struct pci_dev *pdev = bp->pdev;
408 int i;
409
410 kfree(bp->pf.vf_event_bmap);
411 bp->pf.vf_event_bmap = NULL;
412
413 for (i = 0; i < 4; i++) {
414 if (bp->pf.hwrm_cmd_req_addr[i]) {
415 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
416 bp->pf.hwrm_cmd_req_addr[i],
417 bp->pf.hwrm_cmd_req_dma_addr[i]);
418 bp->pf.hwrm_cmd_req_addr[i] = NULL;
419 }
420 }
421
422 kfree(bp->pf.vf);
423 bp->pf.vf = NULL;
424}
425
426static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
427{
428 struct pci_dev *pdev = bp->pdev;
429 u32 nr_pages, size, i, j, k = 0;
430
431 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
432 if (!bp->pf.vf)
433 return -ENOMEM;
434
435 bnxt_set_vf_attr(bp, num_vfs);
436
437 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
438 nr_pages = size / BNXT_PAGE_SIZE;
439 if (size & (BNXT_PAGE_SIZE - 1))
440 nr_pages++;
441
442 for (i = 0; i < nr_pages; i++) {
443 bp->pf.hwrm_cmd_req_addr[i] =
444 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
445 &bp->pf.hwrm_cmd_req_dma_addr[i],
446 GFP_KERNEL);
447
448 if (!bp->pf.hwrm_cmd_req_addr[i])
449 return -ENOMEM;
450
451 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
452 struct bnxt_vf_info *vf = &bp->pf.vf[k];
453
454 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
455 j * BNXT_HWRM_REQ_MAX_SIZE;
456 vf->hwrm_cmd_req_dma_addr =
457 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
458 BNXT_HWRM_REQ_MAX_SIZE;
459 k++;
460 }
461 }
462
463
464 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
465 if (!bp->pf.vf_event_bmap)
466 return -ENOMEM;
467
468 bp->pf.hwrm_cmd_req_pages = nr_pages;
469 return 0;
470}
471
472static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
473{
474 struct hwrm_func_buf_rgtr_input req = {0};
475
476 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
477
478 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
479 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
480 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
481 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
482 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
483 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
484 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
485
486 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
487}
488
489
490
491
492static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
493{
494 struct hwrm_func_vf_resource_cfg_input req = {0};
495 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
496 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
497 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
498 struct bnxt_pf_info *pf = &bp->pf;
499 int i, rc = 0, min = 1;
500 u16 vf_msix = 0;
501
502 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
503
504 if (bp->flags & BNXT_FLAG_CHIP_P5) {
505 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
506 vf_ring_grps = 0;
507 } else {
508 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
509 }
510 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
511 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
512 if (bp->flags & BNXT_FLAG_AGG_RINGS)
513 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
514 else
515 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
516 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
517 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
518 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
519
520 req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
521 req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
522 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
523 min = 0;
524 req.min_rsscos_ctx = cpu_to_le16(min);
525 }
526 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
527 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
528 req.min_cmpl_rings = cpu_to_le16(min);
529 req.min_tx_rings = cpu_to_le16(min);
530 req.min_rx_rings = cpu_to_le16(min);
531 req.min_l2_ctxs = cpu_to_le16(min);
532 req.min_vnics = cpu_to_le16(min);
533 req.min_stat_ctx = cpu_to_le16(min);
534 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
535 req.min_hw_ring_grps = cpu_to_le16(min);
536 } else {
537 vf_cp_rings /= num_vfs;
538 vf_tx_rings /= num_vfs;
539 vf_rx_rings /= num_vfs;
540 vf_vnics /= num_vfs;
541 vf_stat_ctx /= num_vfs;
542 vf_ring_grps /= num_vfs;
543
544 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
545 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
546 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
547 req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
548 req.min_vnics = cpu_to_le16(vf_vnics);
549 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
550 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
551 }
552 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
553 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
554 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
555 req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
556 req.max_vnics = cpu_to_le16(vf_vnics);
557 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
558 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
559 if (bp->flags & BNXT_FLAG_CHIP_P5)
560 req.max_msix = cpu_to_le16(vf_msix / num_vfs);
561
562 mutex_lock(&bp->hwrm_cmd_lock);
563 for (i = 0; i < num_vfs; i++) {
564 req.vf_id = cpu_to_le16(pf->first_vf_id + i);
565 rc = _hwrm_send_message(bp, &req, sizeof(req),
566 HWRM_CMD_TIMEOUT);
567 if (rc) {
568 rc = -ENOMEM;
569 break;
570 }
571 pf->active_vfs = i + 1;
572 pf->vf[i].fw_fid = pf->first_vf_id + i;
573 }
574 mutex_unlock(&bp->hwrm_cmd_lock);
575 if (pf->active_vfs) {
576 u16 n = pf->active_vfs;
577
578 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
579 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
580 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
581 n;
582 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
583 hw_resc->max_rsscos_ctxs -= pf->active_vfs;
584 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
585 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
586 if (bp->flags & BNXT_FLAG_CHIP_P5)
587 hw_resc->max_irqs -= vf_msix * n;
588
589 rc = pf->active_vfs;
590 }
591 return rc;
592}
593
594
595
596
597static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
598{
599 u32 rc = 0, mtu, i;
600 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
601 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
602 struct hwrm_func_cfg_input req = {0};
603 struct bnxt_pf_info *pf = &bp->pf;
604 int total_vf_tx_rings = 0;
605 u16 vf_ring_grps;
606
607 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
608
609
610 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
611 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
612 if (bp->flags & BNXT_FLAG_AGG_RINGS)
613 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
614 num_vfs;
615 else
616 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
617 num_vfs;
618 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
619 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
620 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
621 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
622
623 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
624 FUNC_CFG_REQ_ENABLES_MRU |
625 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
626 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
627 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
628 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
629 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
630 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
631 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
632 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
633
634 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
635 req.mru = cpu_to_le16(mtu);
636 req.mtu = cpu_to_le16(mtu);
637
638 req.num_rsscos_ctxs = cpu_to_le16(1);
639 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
640 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
641 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
642 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
643 req.num_l2_ctxs = cpu_to_le16(4);
644
645 req.num_vnics = cpu_to_le16(vf_vnics);
646
647 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
648
649 mutex_lock(&bp->hwrm_cmd_lock);
650 for (i = 0; i < num_vfs; i++) {
651 int vf_tx_rsvd = vf_tx_rings;
652
653 req.fid = cpu_to_le16(pf->first_vf_id + i);
654 rc = _hwrm_send_message(bp, &req, sizeof(req),
655 HWRM_CMD_TIMEOUT);
656 if (rc)
657 break;
658 pf->active_vfs = i + 1;
659 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
660 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
661 &vf_tx_rsvd);
662 if (rc)
663 break;
664 total_vf_tx_rings += vf_tx_rsvd;
665 }
666 mutex_unlock(&bp->hwrm_cmd_lock);
667 if (rc)
668 rc = -ENOMEM;
669 if (pf->active_vfs) {
670 hw_resc->max_tx_rings -= total_vf_tx_rings;
671 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
672 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
673 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
674 hw_resc->max_rsscos_ctxs -= num_vfs;
675 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
676 hw_resc->max_vnics -= vf_vnics * num_vfs;
677 rc = pf->active_vfs;
678 }
679 return rc;
680}
681
682static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
683{
684 if (BNXT_NEW_RM(bp))
685 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
686 else
687 return bnxt_hwrm_func_cfg(bp, num_vfs);
688}
689
690static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
691{
692 int rc = 0, vfs_supported;
693 int min_rx_rings, min_tx_rings, min_rss_ctxs;
694 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
695 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
696 int avail_cp, avail_stat;
697
698
699
700
701
702 vfs_supported = *num_vfs;
703
704 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
705 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
706 avail_cp = min_t(int, avail_cp, avail_stat);
707
708 while (vfs_supported) {
709 min_rx_rings = vfs_supported;
710 min_tx_rings = vfs_supported;
711 min_rss_ctxs = vfs_supported;
712
713 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
714 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
715 min_rx_rings)
716 rx_ok = 1;
717 } else {
718 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
719 min_rx_rings)
720 rx_ok = 1;
721 }
722 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
723 avail_cp < min_rx_rings)
724 rx_ok = 0;
725
726 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
727 avail_cp >= min_tx_rings)
728 tx_ok = 1;
729
730 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
731 min_rss_ctxs)
732 rss_ok = 1;
733
734 if (tx_ok && rx_ok && rss_ok)
735 break;
736
737 vfs_supported--;
738 }
739
740 if (!vfs_supported) {
741 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
742 return -EINVAL;
743 }
744
745 if (vfs_supported != *num_vfs) {
746 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
747 *num_vfs, vfs_supported);
748 *num_vfs = vfs_supported;
749 }
750
751 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
752 if (rc)
753 goto err_out1;
754
755
756 rc = bnxt_func_cfg(bp, *num_vfs);
757 if (rc != *num_vfs) {
758 if (rc <= 0) {
759 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
760 *num_vfs = 0;
761 goto err_out2;
762 }
763 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
764 *num_vfs = rc;
765 }
766
767
768 rc = bnxt_hwrm_func_buf_rgtr(bp);
769 if (rc)
770 goto err_out2;
771
772 bnxt_ulp_sriov_cfg(bp, *num_vfs);
773
774 rc = pci_enable_sriov(bp->pdev, *num_vfs);
775 if (rc)
776 goto err_out2;
777
778 return 0;
779
780err_out2:
781
782 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
783
784err_out1:
785 bnxt_free_vf_resources(bp);
786
787 return rc;
788}
789
790void bnxt_sriov_disable(struct bnxt *bp)
791{
792 u16 num_vfs = pci_num_vf(bp->pdev);
793
794 if (!num_vfs)
795 return;
796
797
798 mutex_lock(&bp->sriov_lock);
799 bnxt_vf_reps_destroy(bp);
800
801 if (pci_vfs_assigned(bp->pdev)) {
802 bnxt_hwrm_fwd_async_event_cmpl(
803 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
804 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
805 num_vfs);
806 } else {
807 pci_disable_sriov(bp->pdev);
808
809 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
810 }
811 mutex_unlock(&bp->sriov_lock);
812
813 bnxt_free_vf_resources(bp);
814
815 bp->pf.active_vfs = 0;
816
817 rtnl_lock();
818 bnxt_restore_pf_fw_resources(bp);
819 rtnl_unlock();
820
821 bnxt_ulp_sriov_cfg(bp, 0);
822}
823
824int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
825{
826 struct net_device *dev = pci_get_drvdata(pdev);
827 struct bnxt *bp = netdev_priv(dev);
828
829 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
830 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
831 return 0;
832 }
833
834 rtnl_lock();
835 if (!netif_running(dev)) {
836 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
837 rtnl_unlock();
838 return 0;
839 }
840 bp->sriov_cfg = true;
841 rtnl_unlock();
842
843 if (pci_vfs_assigned(bp->pdev)) {
844 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
845 num_vfs = 0;
846 goto sriov_cfg_exit;
847 }
848
849
850 if (num_vfs && num_vfs == bp->pf.active_vfs)
851 goto sriov_cfg_exit;
852
853
854 bnxt_sriov_disable(bp);
855 if (!num_vfs)
856 goto sriov_cfg_exit;
857
858 bnxt_sriov_enable(bp, &num_vfs);
859
860sriov_cfg_exit:
861 bp->sriov_cfg = false;
862 wake_up(&bp->sriov_cfg_wait);
863
864 return num_vfs;
865}
866
867static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
868 void *encap_resp, __le64 encap_resp_addr,
869 __le16 encap_resp_cpr, u32 msg_size)
870{
871 int rc = 0;
872 struct hwrm_fwd_resp_input req = {0};
873 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
874
875 if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
876 return -EINVAL;
877
878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
879
880
881 req.target_id = cpu_to_le16(vf->fw_fid);
882 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
883 req.encap_resp_len = cpu_to_le16(msg_size);
884 req.encap_resp_addr = encap_resp_addr;
885 req.encap_resp_cmpl_ring = encap_resp_cpr;
886 memcpy(req.encap_resp, encap_resp, msg_size);
887
888 mutex_lock(&bp->hwrm_cmd_lock);
889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
890
891 if (rc) {
892 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
893 goto fwd_resp_exit;
894 }
895
896 if (resp->error_code) {
897 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
898 resp->error_code);
899 rc = -1;
900 }
901
902fwd_resp_exit:
903 mutex_unlock(&bp->hwrm_cmd_lock);
904 return rc;
905}
906
907static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
908 u32 msg_size)
909{
910 int rc = 0;
911 struct hwrm_reject_fwd_resp_input req = {0};
912 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
913
914 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
915 return -EINVAL;
916
917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
918
919 req.target_id = cpu_to_le16(vf->fw_fid);
920 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
921 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
922
923 mutex_lock(&bp->hwrm_cmd_lock);
924 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
925
926 if (rc) {
927 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
928 goto fwd_err_resp_exit;
929 }
930
931 if (resp->error_code) {
932 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
933 resp->error_code);
934 rc = -1;
935 }
936
937fwd_err_resp_exit:
938 mutex_unlock(&bp->hwrm_cmd_lock);
939 return rc;
940}
941
942static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
943 u32 msg_size)
944{
945 int rc = 0;
946 struct hwrm_exec_fwd_resp_input req = {0};
947 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
948
949 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
950 return -EINVAL;
951
952 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
953
954 req.target_id = cpu_to_le16(vf->fw_fid);
955 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
956 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
957
958 mutex_lock(&bp->hwrm_cmd_lock);
959 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
960
961 if (rc) {
962 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
963 goto exec_fwd_resp_exit;
964 }
965
966 if (resp->error_code) {
967 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
968 resp->error_code);
969 rc = -1;
970 }
971
972exec_fwd_resp_exit:
973 mutex_unlock(&bp->hwrm_cmd_lock);
974 return rc;
975}
976
977static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
978{
979 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
980 struct hwrm_func_vf_cfg_input *req =
981 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
982
983
984
985
986 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
987 bool trust = bnxt_is_trusted_vf(bp, vf);
988
989 if (is_valid_ether_addr(req->dflt_mac_addr) &&
990 (trust || !is_valid_ether_addr(vf->mac_addr) ||
991 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
992 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
993 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
994 }
995 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
996 }
997 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
998}
999
1000static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1001{
1002 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
1003 struct hwrm_cfa_l2_filter_alloc_input *req =
1004 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
1005 bool mac_ok = false;
1006
1007 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1008 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1009
1010
1011
1012
1013
1014
1015 if (bnxt_is_trusted_vf(bp, vf)) {
1016 mac_ok = true;
1017 } else if (is_valid_ether_addr(vf->mac_addr)) {
1018 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1019 mac_ok = true;
1020 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1021 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1022 mac_ok = true;
1023 } else {
1024
1025
1026
1027
1028
1029
1030 mac_ok = true;
1031 }
1032 if (mac_ok)
1033 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1034 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1035}
1036
1037static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1038{
1039 int rc = 0;
1040
1041 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1042
1043 rc = bnxt_hwrm_exec_fwd_resp(
1044 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1045 } else {
1046 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1047 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1048
1049 phy_qcfg_req =
1050 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1051 mutex_lock(&bp->hwrm_cmd_lock);
1052 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1053 sizeof(phy_qcfg_resp));
1054 mutex_unlock(&bp->hwrm_cmd_lock);
1055 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1056 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1057 phy_qcfg_resp.valid = 1;
1058
1059 if (vf->flags & BNXT_VF_LINK_UP) {
1060
1061 if (phy_qcfg_resp.link !=
1062 PORT_PHY_QCFG_RESP_LINK_LINK) {
1063 phy_qcfg_resp.link =
1064 PORT_PHY_QCFG_RESP_LINK_LINK;
1065 phy_qcfg_resp.link_speed = cpu_to_le16(
1066 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1067 phy_qcfg_resp.duplex_cfg =
1068 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1069 phy_qcfg_resp.duplex_state =
1070 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1071 phy_qcfg_resp.pause =
1072 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1073 PORT_PHY_QCFG_RESP_PAUSE_RX);
1074 }
1075 } else {
1076
1077 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1078 phy_qcfg_resp.link_speed = 0;
1079 phy_qcfg_resp.duplex_state =
1080 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1081 phy_qcfg_resp.pause = 0;
1082 }
1083 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1084 phy_qcfg_req->resp_addr,
1085 phy_qcfg_req->cmpl_ring,
1086 sizeof(phy_qcfg_resp));
1087 }
1088 return rc;
1089}
1090
1091static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1092{
1093 int rc = 0;
1094 struct input *encap_req = vf->hwrm_cmd_req_addr;
1095 u32 req_type = le16_to_cpu(encap_req->req_type);
1096
1097 switch (req_type) {
1098 case HWRM_FUNC_VF_CFG:
1099 rc = bnxt_vf_configure_mac(bp, vf);
1100 break;
1101 case HWRM_CFA_L2_FILTER_ALLOC:
1102 rc = bnxt_vf_validate_set_mac(bp, vf);
1103 break;
1104 case HWRM_FUNC_CFG:
1105
1106
1107
1108 rc = bnxt_hwrm_exec_fwd_resp(
1109 bp, vf, sizeof(struct hwrm_func_cfg_input));
1110 break;
1111 case HWRM_PORT_PHY_QCFG:
1112 rc = bnxt_vf_set_link(bp, vf);
1113 break;
1114 default:
1115 break;
1116 }
1117 return rc;
1118}
1119
1120void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1121{
1122 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1123
1124
1125 while (1) {
1126 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1127 if (vf_id >= active_vfs)
1128 break;
1129
1130 clear_bit(vf_id, bp->pf.vf_event_bmap);
1131 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1132 i = vf_id + 1;
1133 }
1134}
1135
1136void bnxt_update_vf_mac(struct bnxt *bp)
1137{
1138 struct hwrm_func_qcaps_input req = {0};
1139 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1140
1141 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1142 req.fid = cpu_to_le16(0xffff);
1143
1144 mutex_lock(&bp->hwrm_cmd_lock);
1145 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1146 goto update_vf_mac_exit;
1147
1148
1149
1150
1151
1152
1153
1154
1155 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1156 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1157
1158
1159 if (is_valid_ether_addr(bp->vf.mac_addr))
1160 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1161update_vf_mac_exit:
1162 mutex_unlock(&bp->hwrm_cmd_lock);
1163}
1164
1165int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1166{
1167 struct hwrm_func_vf_cfg_input req = {0};
1168 int rc = 0;
1169
1170 if (!BNXT_VF(bp))
1171 return 0;
1172
1173 if (bp->hwrm_spec_code < 0x10202) {
1174 if (is_valid_ether_addr(bp->vf.mac_addr))
1175 rc = -EADDRNOTAVAIL;
1176 goto mac_done;
1177 }
1178 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1179 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1180 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1181 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1182mac_done:
1183 if (rc && strict) {
1184 rc = -EADDRNOTAVAIL;
1185 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1186 mac);
1187 return rc;
1188 }
1189 return 0;
1190}
1191#else
1192
1193void bnxt_sriov_disable(struct bnxt *bp)
1194{
1195}
1196
1197void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1198{
1199 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1200}
1201
1202void bnxt_update_vf_mac(struct bnxt *bp)
1203{
1204}
1205
1206int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1207{
1208 return 0;
1209}
1210#endif
1211