1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/if_vlan.h>
15#include <linux/interrupt.h>
16#include <linux/etherdevice.h>
17#include "bnxt_hsi.h"
18#include "bnxt.h"
19#include "bnxt_ulp.h"
20#include "bnxt_sriov.h"
21#include "bnxt_vfr.h"
22#include "bnxt_ethtool.h"
23
24#ifdef CONFIG_BNXT_SRIOV
25static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
27{
28 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
29 struct hwrm_fwd_async_event_cmpl_input req = {0};
30 struct hwrm_async_event_cmpl *async_cmpl;
31 int rc = 0;
32
33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
34 if (vf)
35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
36 else
37
38 req.encap_async_event_target_id = cpu_to_le16(0xffff);
39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
41 async_cmpl->event_id = cpu_to_le16(event_id);
42
43 mutex_lock(&bp->hwrm_cmd_lock);
44 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
45
46 if (rc) {
47 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
48 rc);
49 goto fwd_async_event_cmpl_exit;
50 }
51
52 if (resp->error_code) {
53 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
54 resp->error_code);
55 rc = -1;
56 }
57
58fwd_async_event_cmpl_exit:
59 mutex_unlock(&bp->hwrm_cmd_lock);
60 return rc;
61}
62
63static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
64{
65 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
66 netdev_err(bp->dev, "vf ndo called though PF is down\n");
67 return -EINVAL;
68 }
69 if (!bp->pf.active_vfs) {
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
71 return -EINVAL;
72 }
73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
75 return -EINVAL;
76 }
77 return 0;
78}
79
80int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
81{
82 struct hwrm_func_cfg_input req = {0};
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
85 bool old_setting = false;
86 u32 func_flags;
87 int rc;
88
89 if (bp->hwrm_spec_code < 0x10701)
90 return -ENOTSUPP;
91
92 rc = bnxt_vf_ndo_prep(bp, vf_id);
93 if (rc)
94 return rc;
95
96 vf = &bp->pf.vf[vf_id];
97 if (vf->flags & BNXT_VF_SPOOFCHK)
98 old_setting = true;
99 if (old_setting == setting)
100 return 0;
101
102 func_flags = vf->func_flags;
103 if (setting)
104 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
105 else
106 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
107
108
109
110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
111 req.fid = cpu_to_le16(vf->fw_fid);
112 req.flags = cpu_to_le32(func_flags);
113 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
114 if (!rc) {
115 vf->func_flags = func_flags;
116 if (setting)
117 vf->flags |= BNXT_VF_SPOOFCHK;
118 else
119 vf->flags &= ~BNXT_VF_SPOOFCHK;
120 }
121 return rc;
122}
123
124int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
125{
126 struct bnxt *bp = netdev_priv(dev);
127 struct bnxt_vf_info *vf;
128
129 if (bnxt_vf_ndo_prep(bp, vf_id))
130 return -EINVAL;
131
132 vf = &bp->pf.vf[vf_id];
133 if (trusted)
134 vf->flags |= BNXT_VF_TRUST;
135 else
136 vf->flags &= ~BNXT_VF_TRUST;
137
138 return 0;
139}
140
141int bnxt_get_vf_config(struct net_device *dev, int vf_id,
142 struct ifla_vf_info *ivi)
143{
144 struct bnxt *bp = netdev_priv(dev);
145 struct bnxt_vf_info *vf;
146 int rc;
147
148 rc = bnxt_vf_ndo_prep(bp, vf_id);
149 if (rc)
150 return rc;
151
152 ivi->vf = vf_id;
153 vf = &bp->pf.vf[vf_id];
154
155 if (is_valid_ether_addr(vf->mac_addr))
156 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
157 else
158 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
159 ivi->max_tx_rate = vf->max_tx_rate;
160 ivi->min_tx_rate = vf->min_tx_rate;
161 ivi->vlan = vf->vlan;
162 if (vf->flags & BNXT_VF_QOS)
163 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
164 else
165 ivi->qos = 0;
166 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
167 ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
168 if (!(vf->flags & BNXT_VF_LINK_FORCED))
169 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
170 else if (vf->flags & BNXT_VF_LINK_UP)
171 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
172 else
173 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
174
175 return 0;
176}
177
178int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
179{
180 struct hwrm_func_cfg_input req = {0};
181 struct bnxt *bp = netdev_priv(dev);
182 struct bnxt_vf_info *vf;
183 int rc;
184
185 rc = bnxt_vf_ndo_prep(bp, vf_id);
186 if (rc)
187 return rc;
188
189
190
191 if (is_multicast_ether_addr(mac)) {
192 netdev_err(dev, "Invalid VF ethernet address\n");
193 return -EINVAL;
194 }
195 vf = &bp->pf.vf[vf_id];
196
197 memcpy(vf->mac_addr, mac, ETH_ALEN);
198 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
199 req.fid = cpu_to_le16(vf->fw_fid);
200 req.flags = cpu_to_le32(vf->func_flags);
201 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
202 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
203 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
204}
205
206int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
207 __be16 vlan_proto)
208{
209 struct hwrm_func_cfg_input req = {0};
210 struct bnxt *bp = netdev_priv(dev);
211 struct bnxt_vf_info *vf;
212 u16 vlan_tag;
213 int rc;
214
215 if (bp->hwrm_spec_code < 0x10201)
216 return -ENOTSUPP;
217
218 if (vlan_proto != htons(ETH_P_8021Q))
219 return -EPROTONOSUPPORT;
220
221 rc = bnxt_vf_ndo_prep(bp, vf_id);
222 if (rc)
223 return rc;
224
225
226
227
228 if (vlan_id > 4095 || qos)
229 return -EINVAL;
230
231 vf = &bp->pf.vf[vf_id];
232 vlan_tag = vlan_id;
233 if (vlan_tag == vf->vlan)
234 return 0;
235
236 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
237 req.fid = cpu_to_le16(vf->fw_fid);
238 req.flags = cpu_to_le32(vf->func_flags);
239 req.dflt_vlan = cpu_to_le16(vlan_tag);
240 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
241 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
242 if (!rc)
243 vf->vlan = vlan_tag;
244 return rc;
245}
246
247int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
248 int max_tx_rate)
249{
250 struct hwrm_func_cfg_input req = {0};
251 struct bnxt *bp = netdev_priv(dev);
252 struct bnxt_vf_info *vf;
253 u32 pf_link_speed;
254 int rc;
255
256 rc = bnxt_vf_ndo_prep(bp, vf_id);
257 if (rc)
258 return rc;
259
260 vf = &bp->pf.vf[vf_id];
261 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
262 if (max_tx_rate > pf_link_speed) {
263 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
264 max_tx_rate, vf_id);
265 return -EINVAL;
266 }
267
268 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
269 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
270 min_tx_rate, vf_id);
271 return -EINVAL;
272 }
273 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
274 return 0;
275 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
276 req.fid = cpu_to_le16(vf->fw_fid);
277 req.flags = cpu_to_le32(vf->func_flags);
278 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
279 req.max_bw = cpu_to_le32(max_tx_rate);
280 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
281 req.min_bw = cpu_to_le32(min_tx_rate);
282 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
283 if (!rc) {
284 vf->min_tx_rate = min_tx_rate;
285 vf->max_tx_rate = max_tx_rate;
286 }
287 return rc;
288}
289
290int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
291{
292 struct bnxt *bp = netdev_priv(dev);
293 struct bnxt_vf_info *vf;
294 int rc;
295
296 rc = bnxt_vf_ndo_prep(bp, vf_id);
297 if (rc)
298 return rc;
299
300 vf = &bp->pf.vf[vf_id];
301
302 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
303 switch (link) {
304 case IFLA_VF_LINK_STATE_AUTO:
305 vf->flags |= BNXT_VF_LINK_UP;
306 break;
307 case IFLA_VF_LINK_STATE_DISABLE:
308 vf->flags |= BNXT_VF_LINK_FORCED;
309 break;
310 case IFLA_VF_LINK_STATE_ENABLE:
311 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
312 break;
313 default:
314 netdev_err(bp->dev, "Invalid link option\n");
315 rc = -EINVAL;
316 break;
317 }
318 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
319 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
320 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
321 return rc;
322}
323
324static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
325{
326 int i;
327 struct bnxt_vf_info *vf;
328
329 for (i = 0; i < num_vfs; i++) {
330 vf = &bp->pf.vf[i];
331 memset(vf, 0, sizeof(*vf));
332 }
333 return 0;
334}
335
336static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
337{
338 int i, rc = 0;
339 struct bnxt_pf_info *pf = &bp->pf;
340 struct hwrm_func_vf_resc_free_input req = {0};
341
342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
343
344 mutex_lock(&bp->hwrm_cmd_lock);
345 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
346 req.vf_id = cpu_to_le16(i);
347 rc = _hwrm_send_message(bp, &req, sizeof(req),
348 HWRM_CMD_TIMEOUT);
349 if (rc)
350 break;
351 }
352 mutex_unlock(&bp->hwrm_cmd_lock);
353 return rc;
354}
355
356static void bnxt_free_vf_resources(struct bnxt *bp)
357{
358 struct pci_dev *pdev = bp->pdev;
359 int i;
360
361 kfree(bp->pf.vf_event_bmap);
362 bp->pf.vf_event_bmap = NULL;
363
364 for (i = 0; i < 4; i++) {
365 if (bp->pf.hwrm_cmd_req_addr[i]) {
366 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
367 bp->pf.hwrm_cmd_req_addr[i],
368 bp->pf.hwrm_cmd_req_dma_addr[i]);
369 bp->pf.hwrm_cmd_req_addr[i] = NULL;
370 }
371 }
372
373 kfree(bp->pf.vf);
374 bp->pf.vf = NULL;
375}
376
377static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
378{
379 struct pci_dev *pdev = bp->pdev;
380 u32 nr_pages, size, i, j, k = 0;
381
382 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
383 if (!bp->pf.vf)
384 return -ENOMEM;
385
386 bnxt_set_vf_attr(bp, num_vfs);
387
388 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
389 nr_pages = size / BNXT_PAGE_SIZE;
390 if (size & (BNXT_PAGE_SIZE - 1))
391 nr_pages++;
392
393 for (i = 0; i < nr_pages; i++) {
394 bp->pf.hwrm_cmd_req_addr[i] =
395 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
396 &bp->pf.hwrm_cmd_req_dma_addr[i],
397 GFP_KERNEL);
398
399 if (!bp->pf.hwrm_cmd_req_addr[i])
400 return -ENOMEM;
401
402 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
403 struct bnxt_vf_info *vf = &bp->pf.vf[k];
404
405 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
406 j * BNXT_HWRM_REQ_MAX_SIZE;
407 vf->hwrm_cmd_req_dma_addr =
408 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
409 BNXT_HWRM_REQ_MAX_SIZE;
410 k++;
411 }
412 }
413
414
415 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
416 if (!bp->pf.vf_event_bmap)
417 return -ENOMEM;
418
419 bp->pf.hwrm_cmd_req_pages = nr_pages;
420 return 0;
421}
422
423static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
424{
425 struct hwrm_func_buf_rgtr_input req = {0};
426
427 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
428
429 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
430 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
431 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
432 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
433 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
434 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
435 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
436
437 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
438}
439
440
441
442
443static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
444{
445 struct hwrm_func_vf_resource_cfg_input req = {0};
446 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
447 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
448 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
449 struct bnxt_pf_info *pf = &bp->pf;
450 int i, rc = 0;
451
452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
453
454 vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
456 if (bp->flags & BNXT_FLAG_AGG_RINGS)
457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
458 else
459 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
460 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
461 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
462 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
463 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
464
465 req.min_rsscos_ctx = cpu_to_le16(1);
466 req.max_rsscos_ctx = cpu_to_le16(1);
467 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
468 req.min_cmpl_rings = cpu_to_le16(1);
469 req.min_tx_rings = cpu_to_le16(1);
470 req.min_rx_rings = cpu_to_le16(1);
471 req.min_l2_ctxs = cpu_to_le16(1);
472 req.min_vnics = cpu_to_le16(1);
473 req.min_stat_ctx = cpu_to_le16(1);
474 req.min_hw_ring_grps = cpu_to_le16(1);
475 } else {
476 vf_cp_rings /= num_vfs;
477 vf_tx_rings /= num_vfs;
478 vf_rx_rings /= num_vfs;
479 vf_vnics /= num_vfs;
480 vf_stat_ctx /= num_vfs;
481 vf_ring_grps /= num_vfs;
482
483 req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
484 req.min_tx_rings = cpu_to_le16(vf_tx_rings);
485 req.min_rx_rings = cpu_to_le16(vf_rx_rings);
486 req.min_l2_ctxs = cpu_to_le16(4);
487 req.min_vnics = cpu_to_le16(vf_vnics);
488 req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
489 req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
490 }
491 req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
492 req.max_tx_rings = cpu_to_le16(vf_tx_rings);
493 req.max_rx_rings = cpu_to_le16(vf_rx_rings);
494 req.max_l2_ctxs = cpu_to_le16(4);
495 req.max_vnics = cpu_to_le16(vf_vnics);
496 req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
497 req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
498
499 mutex_lock(&bp->hwrm_cmd_lock);
500 for (i = 0; i < num_vfs; i++) {
501 req.vf_id = cpu_to_le16(pf->first_vf_id + i);
502 rc = _hwrm_send_message(bp, &req, sizeof(req),
503 HWRM_CMD_TIMEOUT);
504 if (rc) {
505 rc = -ENOMEM;
506 break;
507 }
508 pf->active_vfs = i + 1;
509 pf->vf[i].fw_fid = pf->first_vf_id + i;
510 }
511 mutex_unlock(&bp->hwrm_cmd_lock);
512 if (pf->active_vfs) {
513 u16 n = pf->active_vfs;
514
515 hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
516 hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
517 hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
518 n;
519 hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
520 hw_resc->max_rsscos_ctxs -= pf->active_vfs;
521 hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
522 hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
523
524 rc = pf->active_vfs;
525 }
526 return rc;
527}
528
529
530
531
532static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
533{
534 u32 rc = 0, mtu, i;
535 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
536 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
537 u16 vf_ring_grps, max_stat_ctxs;
538 struct hwrm_func_cfg_input req = {0};
539 struct bnxt_pf_info *pf = &bp->pf;
540 int total_vf_tx_rings = 0;
541
542 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
543
544 max_stat_ctxs = hw_resc->max_stat_ctxs;
545
546
547 vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
548 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
549 if (bp->flags & BNXT_FLAG_AGG_RINGS)
550 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
551 num_vfs;
552 else
553 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
554 num_vfs;
555 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
556 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
557 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
558 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
559
560 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
561 FUNC_CFG_REQ_ENABLES_MRU |
562 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
563 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
564 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
565 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
566 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
567 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
568 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
569 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
570
571 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
572 req.mru = cpu_to_le16(mtu);
573 req.mtu = cpu_to_le16(mtu);
574
575 req.num_rsscos_ctxs = cpu_to_le16(1);
576 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
577 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
578 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
579 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
580 req.num_l2_ctxs = cpu_to_le16(4);
581
582 req.num_vnics = cpu_to_le16(vf_vnics);
583
584 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
585
586 mutex_lock(&bp->hwrm_cmd_lock);
587 for (i = 0; i < num_vfs; i++) {
588 int vf_tx_rsvd = vf_tx_rings;
589
590 req.fid = cpu_to_le16(pf->first_vf_id + i);
591 rc = _hwrm_send_message(bp, &req, sizeof(req),
592 HWRM_CMD_TIMEOUT);
593 if (rc)
594 break;
595 pf->active_vfs = i + 1;
596 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
597 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
598 &vf_tx_rsvd);
599 if (rc)
600 break;
601 total_vf_tx_rings += vf_tx_rsvd;
602 }
603 mutex_unlock(&bp->hwrm_cmd_lock);
604 if (rc)
605 rc = -ENOMEM;
606 if (pf->active_vfs) {
607 hw_resc->max_tx_rings -= total_vf_tx_rings;
608 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
609 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
610 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
611 hw_resc->max_rsscos_ctxs -= num_vfs;
612 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
613 hw_resc->max_vnics -= vf_vnics * num_vfs;
614 rc = pf->active_vfs;
615 }
616 return rc;
617}
618
619static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
620{
621 if (bp->flags & BNXT_FLAG_NEW_RM)
622 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
623 else
624 return bnxt_hwrm_func_cfg(bp, num_vfs);
625}
626
627static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
628{
629 int rc = 0, vfs_supported;
630 int min_rx_rings, min_tx_rings, min_rss_ctxs;
631 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
632 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
633 int avail_cp, avail_stat;
634
635
636
637
638
639 vfs_supported = *num_vfs;
640
641 avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
642 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
643 avail_cp = min_t(int, avail_cp, avail_stat);
644
645 while (vfs_supported) {
646 min_rx_rings = vfs_supported;
647 min_tx_rings = vfs_supported;
648 min_rss_ctxs = vfs_supported;
649
650 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
651 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
652 min_rx_rings)
653 rx_ok = 1;
654 } else {
655 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
656 min_rx_rings)
657 rx_ok = 1;
658 }
659 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
660 avail_cp < min_rx_rings)
661 rx_ok = 0;
662
663 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
664 avail_cp >= min_tx_rings)
665 tx_ok = 1;
666
667 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
668 min_rss_ctxs)
669 rss_ok = 1;
670
671 if (tx_ok && rx_ok && rss_ok)
672 break;
673
674 vfs_supported--;
675 }
676
677 if (!vfs_supported) {
678 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
679 return -EINVAL;
680 }
681
682 if (vfs_supported != *num_vfs) {
683 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
684 *num_vfs, vfs_supported);
685 *num_vfs = vfs_supported;
686 }
687
688 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
689 if (rc)
690 goto err_out1;
691
692
693 rc = bnxt_func_cfg(bp, *num_vfs);
694 if (rc != *num_vfs) {
695 if (rc <= 0) {
696 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
697 *num_vfs = 0;
698 goto err_out2;
699 }
700 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
701 *num_vfs = rc;
702 }
703
704
705 rc = bnxt_hwrm_func_buf_rgtr(bp);
706 if (rc)
707 goto err_out2;
708
709 bnxt_ulp_sriov_cfg(bp, *num_vfs);
710
711 rc = pci_enable_sriov(bp->pdev, *num_vfs);
712 if (rc)
713 goto err_out2;
714
715 return 0;
716
717err_out2:
718
719 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
720
721err_out1:
722 bnxt_free_vf_resources(bp);
723
724 return rc;
725}
726
727void bnxt_sriov_disable(struct bnxt *bp)
728{
729 u16 num_vfs = pci_num_vf(bp->pdev);
730
731 if (!num_vfs)
732 return;
733
734
735 mutex_lock(&bp->sriov_lock);
736 bnxt_vf_reps_destroy(bp);
737
738 if (pci_vfs_assigned(bp->pdev)) {
739 bnxt_hwrm_fwd_async_event_cmpl(
740 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
741 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
742 num_vfs);
743 } else {
744 pci_disable_sriov(bp->pdev);
745
746 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
747 }
748 mutex_unlock(&bp->sriov_lock);
749
750 bnxt_free_vf_resources(bp);
751
752 bp->pf.active_vfs = 0;
753
754 rtnl_lock();
755 bnxt_restore_pf_fw_resources(bp);
756 rtnl_unlock();
757
758 bnxt_ulp_sriov_cfg(bp, 0);
759}
760
761int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
762{
763 struct net_device *dev = pci_get_drvdata(pdev);
764 struct bnxt *bp = netdev_priv(dev);
765
766 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
767 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
768 return 0;
769 }
770
771 rtnl_lock();
772 if (!netif_running(dev)) {
773 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
774 rtnl_unlock();
775 return 0;
776 }
777 bp->sriov_cfg = true;
778 rtnl_unlock();
779
780 if (pci_vfs_assigned(bp->pdev)) {
781 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
782 num_vfs = 0;
783 goto sriov_cfg_exit;
784 }
785
786
787 if (num_vfs && num_vfs == bp->pf.active_vfs)
788 goto sriov_cfg_exit;
789
790
791 bnxt_sriov_disable(bp);
792 if (!num_vfs)
793 goto sriov_cfg_exit;
794
795 bnxt_sriov_enable(bp, &num_vfs);
796
797sriov_cfg_exit:
798 bp->sriov_cfg = false;
799 wake_up(&bp->sriov_cfg_wait);
800
801 return num_vfs;
802}
803
804static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
805 void *encap_resp, __le64 encap_resp_addr,
806 __le16 encap_resp_cpr, u32 msg_size)
807{
808 int rc = 0;
809 struct hwrm_fwd_resp_input req = {0};
810 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
811
812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
813
814
815 req.target_id = cpu_to_le16(vf->fw_fid);
816 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
817 req.encap_resp_len = cpu_to_le16(msg_size);
818 req.encap_resp_addr = encap_resp_addr;
819 req.encap_resp_cmpl_ring = encap_resp_cpr;
820 memcpy(req.encap_resp, encap_resp, msg_size);
821
822 mutex_lock(&bp->hwrm_cmd_lock);
823 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
824
825 if (rc) {
826 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
827 goto fwd_resp_exit;
828 }
829
830 if (resp->error_code) {
831 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
832 resp->error_code);
833 rc = -1;
834 }
835
836fwd_resp_exit:
837 mutex_unlock(&bp->hwrm_cmd_lock);
838 return rc;
839}
840
841static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
842 u32 msg_size)
843{
844 int rc = 0;
845 struct hwrm_reject_fwd_resp_input req = {0};
846 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
847
848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
849
850 req.target_id = cpu_to_le16(vf->fw_fid);
851 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
852 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
853
854 mutex_lock(&bp->hwrm_cmd_lock);
855 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
856
857 if (rc) {
858 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
859 goto fwd_err_resp_exit;
860 }
861
862 if (resp->error_code) {
863 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
864 resp->error_code);
865 rc = -1;
866 }
867
868fwd_err_resp_exit:
869 mutex_unlock(&bp->hwrm_cmd_lock);
870 return rc;
871}
872
873static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
874 u32 msg_size)
875{
876 int rc = 0;
877 struct hwrm_exec_fwd_resp_input req = {0};
878 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
879
880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
881
882 req.target_id = cpu_to_le16(vf->fw_fid);
883 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
884 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
885
886 mutex_lock(&bp->hwrm_cmd_lock);
887 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
888
889 if (rc) {
890 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
891 goto exec_fwd_resp_exit;
892 }
893
894 if (resp->error_code) {
895 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
896 resp->error_code);
897 rc = -1;
898 }
899
900exec_fwd_resp_exit:
901 mutex_unlock(&bp->hwrm_cmd_lock);
902 return rc;
903}
904
905static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
906{
907 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
908 struct hwrm_func_vf_cfg_input *req =
909 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
910
911
912
913
914 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
915 if (is_valid_ether_addr(req->dflt_mac_addr) &&
916 ((vf->flags & BNXT_VF_TRUST) ||
917 (!is_valid_ether_addr(vf->mac_addr)))) {
918 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
919 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
920 }
921 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
922 }
923 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
924}
925
926static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
927{
928 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
929 struct hwrm_cfa_l2_filter_alloc_input *req =
930 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
931 bool mac_ok = false;
932
933 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
934 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
935
936
937
938
939
940
941 if (vf->flags & BNXT_VF_TRUST) {
942 mac_ok = true;
943 } else if (is_valid_ether_addr(vf->mac_addr)) {
944 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
945 mac_ok = true;
946 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
947 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
948 mac_ok = true;
949 } else if (bp->hwrm_spec_code < 0x10202) {
950 mac_ok = true;
951 } else {
952 mac_ok = true;
953 }
954 if (mac_ok)
955 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
956 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
957}
958
959static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
960{
961 int rc = 0;
962
963 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
964
965 rc = bnxt_hwrm_exec_fwd_resp(
966 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
967 } else {
968 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
969 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
970
971 phy_qcfg_req =
972 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
973 mutex_lock(&bp->hwrm_cmd_lock);
974 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
975 sizeof(phy_qcfg_resp));
976 mutex_unlock(&bp->hwrm_cmd_lock);
977 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
978 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
979 phy_qcfg_resp.valid = 1;
980
981 if (vf->flags & BNXT_VF_LINK_UP) {
982
983 if (phy_qcfg_resp.link !=
984 PORT_PHY_QCFG_RESP_LINK_LINK) {
985 phy_qcfg_resp.link =
986 PORT_PHY_QCFG_RESP_LINK_LINK;
987 phy_qcfg_resp.link_speed = cpu_to_le16(
988 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
989 phy_qcfg_resp.duplex_cfg =
990 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
991 phy_qcfg_resp.duplex_state =
992 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
993 phy_qcfg_resp.pause =
994 (PORT_PHY_QCFG_RESP_PAUSE_TX |
995 PORT_PHY_QCFG_RESP_PAUSE_RX);
996 }
997 } else {
998
999 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1000 phy_qcfg_resp.link_speed = 0;
1001 phy_qcfg_resp.duplex_state =
1002 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1003 phy_qcfg_resp.pause = 0;
1004 }
1005 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1006 phy_qcfg_req->resp_addr,
1007 phy_qcfg_req->cmpl_ring,
1008 sizeof(phy_qcfg_resp));
1009 }
1010 return rc;
1011}
1012
1013static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1014{
1015 int rc = 0;
1016 struct input *encap_req = vf->hwrm_cmd_req_addr;
1017 u32 req_type = le16_to_cpu(encap_req->req_type);
1018
1019 switch (req_type) {
1020 case HWRM_FUNC_VF_CFG:
1021 rc = bnxt_vf_configure_mac(bp, vf);
1022 break;
1023 case HWRM_CFA_L2_FILTER_ALLOC:
1024 rc = bnxt_vf_validate_set_mac(bp, vf);
1025 break;
1026 case HWRM_FUNC_CFG:
1027
1028
1029
1030 rc = bnxt_hwrm_exec_fwd_resp(
1031 bp, vf, sizeof(struct hwrm_func_cfg_input));
1032 break;
1033 case HWRM_PORT_PHY_QCFG:
1034 rc = bnxt_vf_set_link(bp, vf);
1035 break;
1036 default:
1037 break;
1038 }
1039 return rc;
1040}
1041
1042void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1043{
1044 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1045
1046
1047 while (1) {
1048 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1049 if (vf_id >= active_vfs)
1050 break;
1051
1052 clear_bit(vf_id, bp->pf.vf_event_bmap);
1053 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1054 i = vf_id + 1;
1055 }
1056}
1057
1058void bnxt_update_vf_mac(struct bnxt *bp)
1059{
1060 struct hwrm_func_qcaps_input req = {0};
1061 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1062
1063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1064 req.fid = cpu_to_le16(0xffff);
1065
1066 mutex_lock(&bp->hwrm_cmd_lock);
1067 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1068 goto update_vf_mac_exit;
1069
1070
1071
1072
1073
1074
1075
1076
1077 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1078 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1079
1080
1081 if (is_valid_ether_addr(bp->vf.mac_addr))
1082 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1083update_vf_mac_exit:
1084 mutex_unlock(&bp->hwrm_cmd_lock);
1085}
1086
1087int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
1088{
1089 struct hwrm_func_vf_cfg_input req = {0};
1090 int rc = 0;
1091
1092 if (!BNXT_VF(bp))
1093 return 0;
1094
1095 if (bp->hwrm_spec_code < 0x10202) {
1096 if (is_valid_ether_addr(bp->vf.mac_addr))
1097 rc = -EADDRNOTAVAIL;
1098 goto mac_done;
1099 }
1100 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1101 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1102 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1103 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1104mac_done:
1105 if (rc) {
1106 rc = -EADDRNOTAVAIL;
1107 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1108 mac);
1109 }
1110 return rc;
1111}
1112#else
1113
1114void bnxt_sriov_disable(struct bnxt *bp)
1115{
1116}
1117
1118void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1119{
1120 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1121}
1122
1123void bnxt_update_vf_mac(struct bnxt *bp)
1124{
1125}
1126
1127int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
1128{
1129 return 0;
1130}
1131#endif
1132