1
2
3
4
5
6
7#include "bcm_osal.h"
8#include "ecore.h"
9#include "reg_addr.h"
10#include "ecore_sriov.h"
11#include "ecore_status.h"
12#include "ecore_hw.h"
13#include "ecore_hw_defs.h"
14#include "ecore_int.h"
15#include "ecore_hsi_eth.h"
16#include "ecore_l2.h"
17#include "ecore_vfpf_if.h"
18#include "ecore_rt_defs.h"
19#include "ecore_init_ops.h"
20#include "ecore_gtt_reg_addr.h"
21#include "ecore_iro.h"
22#include "ecore_mcp.h"
23#include "ecore_cxt.h"
24#include "ecore_vf.h"
25#include "ecore_init_fw_funcs.h"
26#include "ecore_sp_commands.h"
27
28static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
29 u8 opcode,
30 __le16 echo,
31 union event_ring_data *data,
32 u8 fw_return_code);
33
34const char *qede_ecore_channel_tlvs_string[] = {
35 "CHANNEL_TLV_NONE",
36 "CHANNEL_TLV_ACQUIRE",
37 "CHANNEL_TLV_VPORT_START",
38 "CHANNEL_TLV_VPORT_UPDATE",
39 "CHANNEL_TLV_VPORT_TEARDOWN",
40 "CHANNEL_TLV_START_RXQ",
41 "CHANNEL_TLV_START_TXQ",
42 "CHANNEL_TLV_STOP_RXQ",
43 "CHANNEL_TLV_STOP_TXQ",
44 "CHANNEL_TLV_UPDATE_RXQ",
45 "CHANNEL_TLV_INT_CLEANUP",
46 "CHANNEL_TLV_CLOSE",
47 "CHANNEL_TLV_RELEASE",
48 "CHANNEL_TLV_LIST_END",
49 "CHANNEL_TLV_UCAST_FILTER",
50 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
51 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
52 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
53 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
54 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
55 "CHANNEL_TLV_VPORT_UPDATE_RSS",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
57 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
59 "CHANNEL_TLV_COALESCE_UPDATE",
60 "CHANNEL_TLV_QID",
61 "CHANNEL_TLV_COALESCE_READ",
62 "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
63 "CHANNEL_TLV_UPDATE_MTU",
64 "CHANNEL_TLV_RDMA_ACQUIRE",
65 "CHANNEL_TLV_RDMA_START",
66 "CHANNEL_TLV_RDMA_STOP",
67 "CHANNEL_TLV_RDMA_ADD_USER",
68 "CHANNEL_TLV_RDMA_REMOVE_USER",
69 "CHANNEL_TLV_RDMA_QUERY_COUNTERS",
70 "CHANNEL_TLV_RDMA_ALLOC_TID",
71 "CHANNEL_TLV_RDMA_REGISTER_TID",
72 "CHANNEL_TLV_RDMA_DEREGISTER_TID",
73 "CHANNEL_TLV_RDMA_FREE_TID",
74 "CHANNEL_TLV_RDMA_CREATE_CQ",
75 "CHANNEL_TLV_RDMA_RESIZE_CQ",
76 "CHANNEL_TLV_RDMA_DESTROY_CQ",
77 "CHANNEL_TLV_RDMA_CREATE_QP",
78 "CHANNEL_TLV_RDMA_MODIFY_QP",
79 "CHANNEL_TLV_RDMA_QUERY_QP",
80 "CHANNEL_TLV_RDMA_DESTROY_QP",
81 "CHANNEL_TLV_RDMA_CREATE_SRQ",
82 "CHANNEL_TLV_RDMA_MODIFY_SRQ",
83 "CHANNEL_TLV_RDMA_DESTROY_SRQ",
84 "CHANNEL_TLV_RDMA_QUERY_PORT",
85 "CHANNEL_TLV_RDMA_QUERY_DEVICE",
86 "CHANNEL_TLV_RDMA_IWARP_CONNECT",
87 "CHANNEL_TLV_RDMA_IWARP_ACCEPT",
88 "CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN",
89 "CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN",
90 "CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN",
91 "CHANNEL_TLV_RDMA_IWARP_REJECT",
92 "CHANNEL_TLV_RDMA_IWARP_SEND_RTR",
93 "CHANNEL_TLV_ESTABLISH_LL2_CONN",
94 "CHANNEL_TLV_TERMINATE_LL2_CONN",
95 "CHANNEL_TLV_ASYNC_EVENT",
96 "CHANNEL_TLV_SOFT_FLR",
97 "CHANNEL_TLV_MAX"
98};
99
100static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
101{
102 u8 legacy = 0;
103
104 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
105 ETH_HSI_VER_NO_PKT_LEN_TUNN)
106 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
107
108 if (!(p_vf->acquire.vfdev_info.capabilities &
109 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
110 legacy |= ECORE_QCID_LEGACY_VF_CID;
111
112 return legacy;
113}
114
115
116static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
117 struct ecore_vf_info *p_vf)
118{
119 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
120 struct ecore_spq_entry *p_ent = OSAL_NULL;
121 struct ecore_sp_init_data init_data;
122 enum _ecore_status_t rc = ECORE_NOTIMPL;
123 u8 fp_minor;
124
125
126 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
127 init_data.cid = ecore_spq_get_cid(p_hwfn);
128 init_data.opaque_fid = p_vf->opaque_fid;
129 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
130
131 rc = ecore_sp_init_request(p_hwfn, &p_ent,
132 COMMON_RAMROD_VF_START,
133 PROTOCOLID_COMMON, &init_data);
134 if (rc != ECORE_SUCCESS)
135 return rc;
136
137 p_ramrod = &p_ent->ramrod.vf_start;
138
139 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
140 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
141
142 switch (p_hwfn->hw_info.personality) {
143 case ECORE_PCI_ETH:
144 p_ramrod->personality = PERSONALITY_ETH;
145 break;
146 case ECORE_PCI_ETH_ROCE:
147 case ECORE_PCI_ETH_IWARP:
148 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
149 break;
150 default:
151 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
152 p_hwfn->hw_info.personality);
153 return ECORE_INVAL;
154 }
155
156 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
157 if (fp_minor > ETH_HSI_VER_MINOR &&
158 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
159 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
160 "VF [%d] - Requested fp hsi %02x.%02x which is"
161 " slightly newer than PF's %02x.%02x; Configuring"
162 " PFs version\n",
163 p_vf->abs_vf_id,
164 ETH_HSI_VER_MAJOR, fp_minor,
165 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
166 fp_minor = ETH_HSI_VER_MINOR;
167 }
168
169 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
170 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
171
172 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
173 "VF[%d] - Starting using HSI %02x.%02x\n",
174 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
175
176 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
177}
178
179static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
180 u32 concrete_vfid,
181 u16 opaque_vfid)
182{
183 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
184 struct ecore_spq_entry *p_ent = OSAL_NULL;
185 struct ecore_sp_init_data init_data;
186 enum _ecore_status_t rc = ECORE_NOTIMPL;
187
188
189 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
190 init_data.cid = ecore_spq_get_cid(p_hwfn);
191 init_data.opaque_fid = opaque_vfid;
192 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
193
194 rc = ecore_sp_init_request(p_hwfn, &p_ent,
195 COMMON_RAMROD_VF_STOP,
196 PROTOCOLID_COMMON, &init_data);
197 if (rc != ECORE_SUCCESS)
198 return rc;
199
200 p_ramrod = &p_ent->ramrod.vf_stop;
201
202 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
203
204 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
205}
206
207bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
208 bool b_enabled_only, bool b_non_malicious)
209{
210 if (!p_hwfn->pf_iov_info) {
211 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
212 return false;
213 }
214
215 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
216 (rel_vf_id < 0))
217 return false;
218
219 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
220 b_enabled_only)
221 return false;
222
223 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
224 b_non_malicious)
225 return false;
226
227 return true;
228}
229
230struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
231 u16 relative_vf_id,
232 bool b_enabled_only)
233{
234 struct ecore_vf_info *vf = OSAL_NULL;
235
236 if (!p_hwfn->pf_iov_info) {
237 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
238 return OSAL_NULL;
239 }
240
241 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
242 b_enabled_only, false))
243 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
244 else
245 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
246 relative_vf_id);
247
248 return vf;
249}
250
251static struct ecore_queue_cid *
252ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
253{
254 u32 i;
255
256 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
257 if (p_queue->cids[i].p_cid &&
258 !p_queue->cids[i].b_is_tx)
259 return p_queue->cids[i].p_cid;
260 }
261
262 return OSAL_NULL;
263}
264
265enum ecore_iov_validate_q_mode {
266 ECORE_IOV_VALIDATE_Q_NA,
267 ECORE_IOV_VALIDATE_Q_ENABLE,
268 ECORE_IOV_VALIDATE_Q_DISABLE,
269};
270
271static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
272 u16 qid,
273 enum ecore_iov_validate_q_mode mode,
274 bool b_is_tx)
275{
276 u32 i;
277
278 if (mode == ECORE_IOV_VALIDATE_Q_NA)
279 return true;
280
281 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
282 struct ecore_vf_queue_cid *p_qcid;
283
284 p_qcid = &p_vf->vf_queues[qid].cids[i];
285
286 if (p_qcid->p_cid == OSAL_NULL)
287 continue;
288
289 if (p_qcid->b_is_tx != b_is_tx)
290 continue;
291
292
293 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
294 }
295
296
297 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
298}
299
300static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
301 struct ecore_vf_info *p_vf,
302 u16 rx_qid,
303 enum ecore_iov_validate_q_mode mode)
304{
305 if (rx_qid >= p_vf->num_rxqs) {
306 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
307 "VF[0x%02x] - can't touch Rx queue[%04x];"
308 " Only 0x%04x are allocated\n",
309 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
310 return false;
311 }
312
313 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
314}
315
316static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
317 struct ecore_vf_info *p_vf,
318 u16 tx_qid,
319 enum ecore_iov_validate_q_mode mode)
320{
321 if (tx_qid >= p_vf->num_txqs) {
322 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
323 "VF[0x%02x] - can't touch Tx queue[%04x];"
324 " Only 0x%04x are allocated\n",
325 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
326 return false;
327 }
328
329 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
330}
331
332static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
333 struct ecore_vf_info *p_vf,
334 u16 sb_idx)
335{
336 int i;
337
338 for (i = 0; i < p_vf->num_sbs; i++)
339 if (p_vf->igu_sbs[i] == sb_idx)
340 return true;
341
342 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
343 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
344 " one of its 0x%02x SBs\n",
345 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
346
347 return false;
348}
349
350
351static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
352{
353 u8 i;
354
355 for (i = 0; i < p_vf->num_rxqs; i++)
356 if (ecore_iov_validate_queue_mode(p_vf, i,
357 ECORE_IOV_VALIDATE_Q_ENABLE,
358 false))
359 return true;
360
361 return false;
362}
363
364static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
365{
366 u8 i;
367
368 for (i = 0; i < p_vf->num_txqs; i++)
369 if (ecore_iov_validate_queue_mode(p_vf, i,
370 ECORE_IOV_VALIDATE_Q_ENABLE,
371 true))
372 return true;
373
374 return false;
375}
376
377enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
378 int vfid,
379 struct ecore_ptt *p_ptt)
380{
381 struct ecore_bulletin_content *p_bulletin;
382 int crc_size = sizeof(p_bulletin->crc);
383 struct dmae_params params;
384 struct ecore_vf_info *p_vf;
385
386 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
387 if (!p_vf)
388 return ECORE_INVAL;
389
390
391 if (!p_vf->vf_bulletin)
392 return ECORE_INVAL;
393
394 p_bulletin = p_vf->bulletin.p_virt;
395
396
397 p_bulletin->version++;
398 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
399 p_vf->bulletin.size - crc_size);
400
401 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
402 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
403 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
404
405
406 OSAL_MEMSET(¶ms, 0, sizeof(params));
407 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
408 params.dst_vf_id = p_vf->abs_vf_id;
409 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
410 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
411 ¶ms);
412}
413
414static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
415{
416 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
417 int pos = iov->pos;
418
419 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
420 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_CTRL, &iov->ctrl);
421
422 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_TOTAL_VF,
423 &iov->total_vfs);
424 OSAL_PCI_READ_CONFIG_WORD(p_dev,
425 pos + RTE_PCI_SRIOV_INITIAL_VF,
426 &iov->initial_vfs);
427
428 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_NUM_VF,
429 &iov->num_vfs);
430 if (iov->num_vfs) {
431
432
433
434 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
435 "Number of VFs are already set to non-zero value."
436 " Ignoring PCI configuration value\n");
437 iov->num_vfs = 0;
438 }
439
440 OSAL_PCI_READ_CONFIG_WORD(p_dev,
441 pos + RTE_PCI_SRIOV_VF_OFFSET, &iov->offset);
442
443 OSAL_PCI_READ_CONFIG_WORD(p_dev,
444 pos + RTE_PCI_SRIOV_VF_STRIDE, &iov->stride);
445
446 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_VF_DID,
447 &iov->vf_device_id);
448
449 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
450 pos + RTE_PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
451
452 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + RTE_PCI_SRIOV_CAP, &iov->cap);
453
454 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + RTE_PCI_SRIOV_FUNC_LINK,
455 &iov->link);
456
457 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
458 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
459 " stride %d, page size 0x%x\n",
460 iov->nres, iov->cap, iov->ctrl,
461 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
462 iov->offset, iov->stride, iov->pgsz);
463
464
465 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
466 iov->total_vfs > NUM_OF_VFS(p_dev)) {
467
468
469
470
471 DP_NOTICE(p_dev, false,
472 "IOV: Unexpected number of vfs set: %d"
473 " setting num_vf to zero\n",
474 iov->num_vfs);
475
476 iov->num_vfs = 0;
477 iov->total_vfs = 0;
478 }
479
480 return ECORE_SUCCESS;
481}
482
483static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
484{
485 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
486 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
487 struct ecore_bulletin_content *p_bulletin_virt;
488 dma_addr_t req_p, rply_p, bulletin_p;
489 union pfvf_tlvs *p_reply_virt_addr;
490 union vfpf_tlvs *p_req_virt_addr;
491 u8 idx = 0;
492
493 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
494
495 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
496 req_p = p_iov_info->mbx_msg_phys_addr;
497 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
498 rply_p = p_iov_info->mbx_reply_phys_addr;
499 p_bulletin_virt = p_iov_info->p_bulletins;
500 bulletin_p = p_iov_info->bulletins_phys;
501 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
502 DP_ERR(p_hwfn,
503 "ecore_iov_setup_vfdb called without alloc mem first\n");
504 return;
505 }
506
507 for (idx = 0; idx < p_iov->total_vfs; idx++) {
508 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
509 u32 concrete;
510
511 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
512 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
513 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
514 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
515
516#ifdef CONFIG_ECORE_SW_CHANNEL
517 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
518 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
519#endif
520 vf->state = VF_STOPPED;
521 vf->b_init = false;
522
523 vf->bulletin.phys = idx *
524 sizeof(struct ecore_bulletin_content) + bulletin_p;
525 vf->bulletin.p_virt = p_bulletin_virt + idx;
526 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
527
528 vf->relative_vf_id = idx;
529 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
530 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
531 vf->concrete_fid = concrete;
532
533 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
534 (vf->abs_vf_id << 8);
535
536 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
537 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
538 }
539}
540
541static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
542{
543 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
544 void **p_v_addr;
545 u16 num_vfs = 0;
546
547 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
548
549 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
550 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
551
552
553 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
554 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
555 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
556 &p_iov_info->mbx_msg_phys_addr,
557 p_iov_info->mbx_msg_size);
558 if (!*p_v_addr)
559 return ECORE_NOMEM;
560
561
562 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
563 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
564 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
565 &p_iov_info->mbx_reply_phys_addr,
566 p_iov_info->mbx_reply_size);
567 if (!*p_v_addr)
568 return ECORE_NOMEM;
569
570 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
571 num_vfs;
572 p_v_addr = &p_iov_info->p_bulletins;
573 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
574 &p_iov_info->bulletins_phys,
575 p_iov_info->bulletins_size);
576 if (!*p_v_addr)
577 return ECORE_NOMEM;
578
579 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
580 "PF's Requests mailbox [%p virt 0x%lx phys], "
581 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
582 " [%p virt 0x%lx phys]\n",
583 p_iov_info->mbx_msg_virt_addr,
584 (unsigned long)p_iov_info->mbx_msg_phys_addr,
585 p_iov_info->mbx_reply_virt_addr,
586 (unsigned long)p_iov_info->mbx_reply_phys_addr,
587 p_iov_info->p_bulletins,
588 (unsigned long)p_iov_info->bulletins_phys);
589
590 return ECORE_SUCCESS;
591}
592
593static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
594{
595 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
596
597 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
598 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
599 p_iov_info->mbx_msg_virt_addr,
600 p_iov_info->mbx_msg_phys_addr,
601 p_iov_info->mbx_msg_size);
602
603 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
604 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
605 p_iov_info->mbx_reply_virt_addr,
606 p_iov_info->mbx_reply_phys_addr,
607 p_iov_info->mbx_reply_size);
608
609 if (p_iov_info->p_bulletins)
610 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
611 p_iov_info->p_bulletins,
612 p_iov_info->bulletins_phys,
613 p_iov_info->bulletins_size);
614}
615
616enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
617{
618 struct ecore_pf_iov *p_sriov;
619
620 if (!IS_PF_SRIOV(p_hwfn)) {
621 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
622 "No SR-IOV - no need for IOV db\n");
623 return ECORE_SUCCESS;
624 }
625
626 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
627 if (!p_sriov) {
628 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
629 return ECORE_NOMEM;
630 }
631
632 p_hwfn->pf_iov_info = p_sriov;
633
634 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
635 ecore_sriov_eqe_event);
636
637 return ecore_iov_allocate_vfdb(p_hwfn);
638}
639
640void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
641{
642 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
643 return;
644
645 ecore_iov_setup_vfdb(p_hwfn);
646}
647
648void ecore_iov_free(struct ecore_hwfn *p_hwfn)
649{
650 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
651
652 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
653 ecore_iov_free_vfdb(p_hwfn);
654 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
655 }
656}
657
658void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
659{
660 OSAL_FREE(p_dev, p_dev->p_iov_info);
661}
662
663enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
664{
665 struct ecore_dev *p_dev = p_hwfn->p_dev;
666 int pos;
667 enum _ecore_status_t rc;
668
669 if (IS_VF(p_hwfn->p_dev))
670 return ECORE_SUCCESS;
671
672
673 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
674 RTE_PCI_EXT_CAP_ID_SRIOV);
675 if (!pos) {
676 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
677 return ECORE_SUCCESS;
678 }
679
680
681
682 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
683 sizeof(*p_dev->p_iov_info));
684 if (!p_dev->p_iov_info) {
685 DP_NOTICE(p_hwfn, false,
686 "Can't support IOV due to lack of memory\n");
687 return ECORE_NOMEM;
688 }
689 p_dev->p_iov_info->pos = pos;
690
691 rc = ecore_iov_pci_cfg_info(p_dev);
692 if (rc)
693 return rc;
694
695
696
697
698
699 if (!p_dev->p_iov_info->total_vfs) {
700 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
701 "IOV capabilities, but no VFs are published\n");
702 OSAL_FREE(p_dev, p_dev->p_iov_info);
703 return ECORE_SUCCESS;
704 }
705
706
707
708
709
710
711
712
713
714
715
716 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
717 u32 first = p_hwfn->p_dev->p_iov_info->offset +
718 p_hwfn->abs_pf_id - 16;
719
720 p_dev->p_iov_info->first_vf_in_pf = first;
721
722 if (ECORE_PATH_ID(p_hwfn))
723 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
724 } else {
725 u32 first = p_hwfn->p_dev->p_iov_info->offset +
726 p_hwfn->abs_pf_id - 256;
727
728 p_dev->p_iov_info->first_vf_in_pf = first;
729 }
730
731 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
732 "First VF in hwfn 0x%08x\n",
733 p_dev->p_iov_info->first_vf_in_pf);
734
735 return ECORE_SUCCESS;
736}
737
738static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
739 bool b_fail_malicious)
740{
741
742 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
743 !IS_PF_SRIOV_ALLOC(p_hwfn))
744 return false;
745
746
747 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
748 return false;
749
750 return true;
751}
752
753bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
754{
755 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
756}
757
758void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
759 u16 rel_vf_id, u8 to_disable)
760{
761 struct ecore_vf_info *vf;
762 int i;
763
764 for_each_hwfn(p_dev, i) {
765 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
766
767 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
768 if (!vf)
769 continue;
770
771 vf->to_disable = to_disable;
772 }
773}
774
775void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
776 u8 to_disable)
777{
778 u16 i;
779
780 if (!IS_ECORE_SRIOV(p_dev))
781 return;
782
783 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
784 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
785}
786
787#ifndef LINUX_REMOVE
788
789enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
790 u16 vf_id,
791 void *ctx)
792{
793 enum _ecore_status_t rc = ECORE_SUCCESS;
794 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
795
796 if (vf != OSAL_NULL) {
797 vf->ctx = ctx;
798#ifdef CONFIG_ECORE_SW_CHANNEL
799 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
800#endif
801 } else {
802 rc = ECORE_UNKNOWN_ERROR;
803 }
804 return rc;
805}
806#endif
807
808static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
809 struct ecore_ptt *p_ptt,
810 u8 abs_vfid)
811{
812 ecore_wr(p_hwfn, p_ptt,
813 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
814 1 << (abs_vfid & 0x1f));
815}
816
817static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
818 struct ecore_ptt *p_ptt,
819 struct ecore_vf_info *vf)
820{
821 int i;
822
823
824 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
825
826 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
827
828
829 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
830
831
832 for (i = 0; i < vf->num_sbs; i++)
833 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
834 vf->igu_sbs[i],
835 vf->opaque_fid, true);
836}
837
838static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
839 struct ecore_ptt *p_ptt,
840 struct ecore_vf_info *vf, bool enable)
841{
842 u32 igu_vf_conf;
843
844 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
845
846 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
847
848 if (enable)
849 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
850 else
851 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
852
853 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
854
855
856 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
857}
858
859static enum _ecore_status_t
860ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
861 struct ecore_ptt *p_ptt,
862 u8 abs_vf_id,
863 u8 num_sbs)
864{
865 u8 current_max = 0;
866 int i;
867
868
869 if (p_hwfn->p_dev->b_dont_override_vf_msix)
870 return ECORE_SUCCESS;
871
872
873
874
875 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
876 ecore_for_each_vf(p_hwfn, i) {
877 struct ecore_vf_info *p_vf;
878
879 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
880 if (!p_vf)
881 continue;
882
883 current_max = OSAL_MAX_T(u8, current_max,
884 p_vf->num_sbs);
885 }
886 }
887
888 if (num_sbs > current_max)
889 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
890 abs_vf_id, num_sbs);
891
892 return ECORE_SUCCESS;
893}
894
895static enum _ecore_status_t
896ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
897 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
898{
899 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
900 enum _ecore_status_t rc = ECORE_SUCCESS;
901
902
903
904
905 vf->b_malicious = false;
906
907 if (vf->to_disable)
908 return ECORE_SUCCESS;
909
910 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
911 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
912 ECORE_VF_ABS_ID(p_hwfn, vf));
913
914 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
915 ECORE_VF_ABS_ID(p_hwfn, vf));
916
917 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
918
919 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
920 vf->abs_vf_id, vf->num_sbs);
921 if (rc != ECORE_SUCCESS)
922 return rc;
923
924 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
925
926 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
927 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
928
929 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
930 p_hwfn->hw_info.hw_mode);
931
932
933 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
934
935 vf->state = VF_FREE;
936
937 return rc;
938}
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
954 struct ecore_ptt *p_ptt,
955 struct ecore_vf_info *vf, u8 enable)
956{
957 u32 reg_addr, val;
958 u16 qzone_id = 0;
959 int qid;
960
961 for (qid = 0; qid < vf->num_rxqs; qid++) {
962 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
963 &qzone_id);
964
965 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
966 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
967 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
968 }
969}
970
971static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
972 struct ecore_ptt *p_ptt,
973 struct ecore_vf_info *vf)
974{
975
976 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
977
978 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
979
980
981 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
982}
983
984static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
985 struct ecore_ptt *p_ptt,
986 struct ecore_vf_info *vf,
987 u16 num_rx_queues)
988{
989 struct ecore_igu_block *p_block;
990 struct cau_sb_entry sb_entry;
991 int qid = 0;
992 u32 val = 0;
993
994 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
995 num_rx_queues =
996 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
997 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
998
999 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
1000 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
1001 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
1002
1003 for (qid = 0; qid < num_rx_queues; qid++) {
1004 p_block = ecore_get_igu_free_sb(p_hwfn, false);
1005 if (!p_block)
1006 continue;
1007
1008 vf->igu_sbs[qid] = p_block->igu_sb_id;
1009 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1010 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
1011
1012 ecore_wr(p_hwfn, p_ptt,
1013 IGU_REG_MAPPING_MEMORY +
1014 sizeof(u32) * p_block->igu_sb_id, val);
1015
1016
1017 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1018 p_hwfn->rel_pf_id,
1019 vf->abs_vf_id, 1);
1020
1021 ecore_dmae_host2grc(p_hwfn, p_ptt,
1022 (u64)(osal_uintptr_t)&sb_entry,
1023 CAU_REG_SB_VAR_MEMORY +
1024 p_block->igu_sb_id * sizeof(u64), 2,
1025 OSAL_NULL );
1026 }
1027
1028 vf->num_sbs = (u8)num_rx_queues;
1029
1030 return vf->num_sbs;
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1045 struct ecore_ptt *p_ptt,
1046 struct ecore_vf_info *vf)
1047{
1048 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1049 int idx, igu_id;
1050 u32 addr, val;
1051
1052
1053 for (idx = 0; idx < vf->num_sbs; idx++) {
1054 igu_id = vf->igu_sbs[idx];
1055 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1056
1057 val = ecore_rd(p_hwfn, p_ptt, addr);
1058 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1059 ecore_wr(p_hwfn, p_ptt, addr, val);
1060
1061 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1062 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1063 }
1064
1065 vf->num_sbs = 0;
1066}
1067
1068void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1069 u16 vfid,
1070 struct ecore_mcp_link_params *params,
1071 struct ecore_mcp_link_state *link,
1072 struct ecore_mcp_link_capabilities *p_caps)
1073{
1074 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1075 struct ecore_bulletin_content *p_bulletin;
1076
1077 if (!p_vf)
1078 return;
1079
1080 p_bulletin = p_vf->bulletin.p_virt;
1081 p_bulletin->req_autoneg = params->speed.autoneg;
1082 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1083 p_bulletin->req_forced_speed = params->speed.forced_speed;
1084 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1085 p_bulletin->req_forced_rx = params->pause.forced_rx;
1086 p_bulletin->req_forced_tx = params->pause.forced_tx;
1087 p_bulletin->req_loopback = params->loopback_mode;
1088
1089 p_bulletin->link_up = link->link_up;
1090 p_bulletin->speed = link->speed;
1091 p_bulletin->full_duplex = link->full_duplex;
1092 p_bulletin->autoneg = link->an;
1093 p_bulletin->autoneg_complete = link->an_complete;
1094 p_bulletin->parallel_detection = link->parallel_detection;
1095 p_bulletin->pfc_enabled = link->pfc_enabled;
1096 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1097 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1098 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1099 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1100 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1101
1102 p_bulletin->capability_speed = p_caps->speed_capabilities;
1103}
1104
1105#ifndef ASIC_ONLY
1106static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1107 struct ecore_ptt *p_ptt)
1108{
1109
1110 ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
1111}
1112#endif
1113
1114enum _ecore_status_t
1115ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1116 struct ecore_ptt *p_ptt,
1117 struct ecore_iov_vf_init_params *p_params)
1118{
1119 struct ecore_mcp_link_capabilities link_caps;
1120 struct ecore_mcp_link_params link_params;
1121 struct ecore_mcp_link_state link_state;
1122 u8 num_of_vf_available_chains = 0;
1123 struct ecore_vf_info *vf = OSAL_NULL;
1124 u16 qid, num_irqs;
1125 enum _ecore_status_t rc = ECORE_SUCCESS;
1126 u32 cids;
1127 u8 i;
1128
1129 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1130 if (!vf) {
1131 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1132 return ECORE_UNKNOWN_ERROR;
1133 }
1134
1135 if (vf->b_init) {
1136 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1137 p_params->rel_vf_id);
1138 return ECORE_INVAL;
1139 }
1140
1141
1142 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1143 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1144 p_params->rel_vf_id, p_params->vport_id);
1145 return ECORE_INVAL;
1146 }
1147
1148 if ((p_params->num_queues > 1) &&
1149 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1150 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1151 p_params->rel_vf_id, p_params->rss_eng_id);
1152 return ECORE_INVAL;
1153 }
1154
1155
1156 if (!p_params->vport_id) {
1157 DP_NOTICE(p_hwfn, false,
1158 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1159 p_params->rel_vf_id);
1160 }
1161 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1162 DP_NOTICE(p_hwfn, false,
1163 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1164 p_params->rel_vf_id);
1165 }
1166 vf->vport_id = p_params->vport_id;
1167 vf->rss_eng_id = p_params->rss_eng_id;
1168
1169
1170
1171
1172
1173 for (i = 0; i < p_params->num_queues; i++) {
1174 qid = p_params->req_rx_queue[i];
1175 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1176 DP_NOTICE(p_hwfn, true,
1177 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1178 qid, p_params->rel_vf_id,
1179 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1180 return ECORE_INVAL;
1181 }
1182
1183 qid = p_params->req_tx_queue[i];
1184 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1185 DP_NOTICE(p_hwfn, true,
1186 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1187 qid, p_params->rel_vf_id,
1188 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1189 return ECORE_INVAL;
1190 }
1191 }
1192
1193
1194 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1195 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1196 "VF[%d] - requesting to initialize for 0x%04x queues"
1197 " [0x%04x CIDs available]\n",
1198 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1199 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1200
1201 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1202 p_ptt,
1203 vf,
1204 num_irqs);
1205 if (num_of_vf_available_chains == 0) {
1206 DP_ERR(p_hwfn, "no available igu sbs\n");
1207 return ECORE_NOMEM;
1208 }
1209
1210
1211 vf->num_rxqs = num_of_vf_available_chains;
1212 vf->num_txqs = num_of_vf_available_chains;
1213
1214 for (i = 0; i < vf->num_rxqs; i++) {
1215 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1216
1217 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1218 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1219
1220 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1221 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1222 vf->relative_vf_id, i, vf->igu_sbs[i],
1223 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1224 }
1225
1226
1227
1228 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1229 sizeof(link_params));
1230 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1231 sizeof(link_state));
1232 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1233 sizeof(link_caps));
1234 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1235 &link_params, &link_state, &link_caps);
1236
1237 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1238 if (rc != ECORE_SUCCESS)
1239 return rc;
1240
1241 vf->b_init = true;
1242#ifndef REMOVE_DBG
1243 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1244 (1ULL << (vf->relative_vf_id % 64));
1245#endif
1246
1247 if (IS_LEAD_HWFN(p_hwfn))
1248 p_hwfn->p_dev->p_iov_info->num_vfs++;
1249
1250#ifndef ASIC_ONLY
1251 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1252 ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
1253#endif
1254
1255 return ECORE_SUCCESS;
1256 }
1257
1258#ifndef ASIC_ONLY
1259static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1260 struct ecore_ptt *p_ptt)
1261{
1262 if (!ecore_mcp_is_init(p_hwfn)) {
1263 u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
1264 PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
1265
1266 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
1267 sriov_dis);
1268}
1269}
1270#endif
1271
1272enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1273 struct ecore_ptt *p_ptt,
1274 u16 rel_vf_id)
1275{
1276 struct ecore_mcp_link_capabilities caps;
1277 struct ecore_mcp_link_params params;
1278 struct ecore_mcp_link_state link;
1279 struct ecore_vf_info *vf = OSAL_NULL;
1280
1281 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1282 if (!vf) {
1283 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1284 return ECORE_UNKNOWN_ERROR;
1285 }
1286
1287 if (vf->bulletin.p_virt)
1288 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1289 sizeof(*vf->bulletin.p_virt));
1290
1291 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1292
1293
1294
1295
1296
1297 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1298 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1299 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1300 sizeof(caps));
1301 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1302
1303
1304 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1305
1306
1307
1308
1309
1310 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1311
1312
1313 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1314
1315 vf->num_rxqs = 0;
1316 vf->num_txqs = 0;
1317 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1318
1319 if (vf->b_init) {
1320 vf->b_init = false;
1321 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1322 ~(1ULL << (vf->relative_vf_id / 64));
1323
1324 if (IS_LEAD_HWFN(p_hwfn))
1325 p_hwfn->p_dev->p_iov_info->num_vfs--;
1326 }
1327
1328#ifndef ASIC_ONLY
1329 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1330 ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
1331#endif
1332
1333 return ECORE_SUCCESS;
1334}
1335
1336static bool ecore_iov_tlv_supported(u16 tlvtype)
1337{
1338 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1339}
1340
1341static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1342 struct ecore_vf_info *vf, u16 tlv)
1343{
1344
1345
1346
1347
1348
1349
1350
1351 if (ecore_iov_tlv_supported(tlv))
1352 DP_VERBOSE(p_hwfn,
1353 ECORE_MSG_IOV,
1354 "VF[%d]: vf pf channel locked by %s\n",
1355 vf->abs_vf_id,
1356 qede_ecore_channel_tlvs_string[tlv]);
1357 else
1358 DP_VERBOSE(p_hwfn,
1359 ECORE_MSG_IOV,
1360 "VF[%d]: vf pf channel locked by %04x\n",
1361 vf->abs_vf_id, tlv);
1362}
1363
1364static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1365 struct ecore_vf_info *vf,
1366 u16 expected_tlv)
1367{
1368
1369 if (ecore_iov_tlv_supported(expected_tlv))
1370 DP_VERBOSE(p_hwfn,
1371 ECORE_MSG_IOV,
1372 "VF[%d]: vf pf channel unlocked by %s\n",
1373 vf->abs_vf_id,
1374 qede_ecore_channel_tlvs_string[expected_tlv]);
1375 else
1376 DP_VERBOSE(p_hwfn,
1377 ECORE_MSG_IOV,
1378 "VF[%d]: vf pf channel unlocked by %04x\n",
1379 vf->abs_vf_id, expected_tlv);
1380
1381
1382
1383}
1384
1385
1386void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1387{
1388 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1389
1390 tl->type = type;
1391 tl->length = length;
1392
1393
1394 *offset += length;
1395
1396
1397 return *offset - length;
1398}
1399
1400
1401void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1402{
1403 u16 i = 1, total_length = 0;
1404 struct channel_tlv *tlv;
1405
1406 do {
1407
1408 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1409
1410
1411 if (ecore_iov_tlv_supported(tlv->type))
1412 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1413 "TLV number %d: type %s, length %d\n",
1414 i, qede_ecore_channel_tlvs_string[tlv->type],
1415 tlv->length);
1416 else
1417 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1418 "TLV number %d: type %d, length %d\n",
1419 i, tlv->type, tlv->length);
1420
1421 if (tlv->type == CHANNEL_TLV_LIST_END)
1422 return;
1423
1424
1425 if (!tlv->length) {
1426 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1427 return;
1428 }
1429 total_length += tlv->length;
1430 if (total_length >= sizeof(struct tlv_buffer_size)) {
1431 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1432 return;
1433 }
1434
1435 i++;
1436 } while (1);
1437}
1438
1439static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1440 struct ecore_ptt *p_ptt,
1441 struct ecore_vf_info *p_vf,
1442#ifdef CONFIG_ECORE_SW_CHANNEL
1443 u16 length,
1444#else
1445 u16 OSAL_UNUSED length,
1446#endif
1447 u8 status)
1448{
1449 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1450 struct dmae_params params;
1451 u8 eng_vf_id;
1452
1453 mbx->reply_virt->default_resp.hdr.status = status;
1454
1455 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1456
1457#ifdef CONFIG_ECORE_SW_CHANNEL
1458 mbx->sw_mbx.response_size =
1459 length + sizeof(struct channel_list_end_tlv);
1460
1461 if (!p_vf->b_hw_channel)
1462 return;
1463#endif
1464
1465 eng_vf_id = p_vf->abs_vf_id;
1466
1467 OSAL_MEMSET(¶ms, 0, sizeof(params));
1468 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
1469 params.dst_vf_id = eng_vf_id;
1470
1471 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1472 mbx->req_virt->first_tlv.reply_address +
1473 sizeof(u64),
1474 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1475 ¶ms);
1476
1477
1478
1479
1480
1481 REG_WR(p_hwfn,
1482 GTT_BAR0_MAP_REG_USDM_RAM +
1483 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1484
1485 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1486 mbx->req_virt->first_tlv.reply_address,
1487 sizeof(u64) / 4, ¶ms);
1488
1489 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1490}
1491
1492static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1493{
1494 switch (flag) {
1495 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1496 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1497 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1498 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1499 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1500 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1501 case ECORE_IOV_VP_UPDATE_MCAST:
1502 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1503 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1504 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1505 case ECORE_IOV_VP_UPDATE_RSS:
1506 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1507 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1508 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1509 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1510 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1511 default:
1512 return 0;
1513 }
1514}
1515
1516static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1517 struct ecore_vf_info *p_vf,
1518 struct ecore_iov_vf_mbx *p_mbx,
1519 u8 status, u16 tlvs_mask,
1520 u16 tlvs_accepted)
1521{
1522 struct pfvf_def_resp_tlv *resp;
1523 u16 size, total_len, i;
1524
1525 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1526 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1527 size = sizeof(struct pfvf_def_resp_tlv);
1528 total_len = size;
1529
1530 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1531
1532
1533 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1534 if (!(tlvs_mask & (1 << i)))
1535 continue;
1536
1537 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1538 size);
1539
1540 if (tlvs_accepted & (1 << i))
1541 resp->hdr.status = status;
1542 else
1543 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1544
1545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1546 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1547 p_vf->relative_vf_id,
1548 ecore_iov_vport_to_tlv(i),
1549 resp->hdr.status);
1550
1551 total_len += size;
1552 }
1553
1554 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1555 sizeof(struct channel_list_end_tlv));
1556
1557 return total_len;
1558}
1559
1560static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1561 struct ecore_ptt *p_ptt,
1562 struct ecore_vf_info *vf_info,
1563 u16 type, u16 length, u8 status)
1564{
1565 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1566
1567 mbx->offset = (u8 *)mbx->reply_virt;
1568
1569 ecore_add_tlv(&mbx->offset, type, length);
1570 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1571 sizeof(struct channel_list_end_tlv));
1572
1573 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1574}
1575
1576struct ecore_public_vf_info
1577*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1578 u16 relative_vf_id,
1579 bool b_enabled_only)
1580{
1581 struct ecore_vf_info *vf = OSAL_NULL;
1582
1583 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1584 if (!vf)
1585 return OSAL_NULL;
1586
1587 return &vf->p_vf_info;
1588}
1589
1590static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1591 struct ecore_vf_info *p_vf)
1592{
1593 u32 i, j;
1594 p_vf->vf_bulletin = 0;
1595 p_vf->vport_instance = 0;
1596 p_vf->configured_features = 0;
1597
1598
1599 p_vf->num_rxqs = p_vf->num_sbs;
1600 p_vf->num_txqs = p_vf->num_sbs;
1601
1602 p_vf->num_active_rxqs = 0;
1603
1604 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1605 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1606
1607 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1608 if (!p_queue->cids[j].p_cid)
1609 continue;
1610
1611 ecore_eth_queue_cid_release(p_hwfn,
1612 p_queue->cids[j].p_cid);
1613 p_queue->cids[j].p_cid = OSAL_NULL;
1614 }
1615 }
1616
1617 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1618 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1619 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1620}
1621
1622
1623static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1624 struct ecore_ptt *p_ptt)
1625{
1626 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1627
1628 if (val)
1629 return val + 11;
1630 return 0;
1631}
1632
1633static void
1634ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1635 struct ecore_ptt *p_ptt,
1636 struct ecore_vf_info *p_vf,
1637 struct vf_pf_resc_request *p_req,
1638 struct pf_vf_resc *p_resp)
1639{
1640 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1641 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1642 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1643 u32 bar_size;
1644
1645 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1646
1647
1648
1649
1650
1651 if (!(p_vf->acquire.vfdev_info.capabilities &
1652 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1653 return;
1654
1655
1656
1657
1658
1659 if (p_vf->acquire.vfdev_info.capabilities &
1660 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1661 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1662 if (bar_size)
1663 bar_size = 1 << bar_size;
1664
1665 if (ECORE_IS_CMT(p_hwfn->p_dev))
1666 bar_size /= 2;
1667 } else {
1668 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1669 }
1670
1671 if (bar_size / db_size < 256)
1672 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1673 (u8)(bar_size / db_size));
1674}
1675
1676static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1677 struct ecore_ptt *p_ptt,
1678 struct ecore_vf_info *p_vf,
1679 struct vf_pf_resc_request *p_req,
1680 struct pf_vf_resc *p_resp)
1681{
1682 u8 i;
1683
1684
1685 p_resp->num_rxqs = p_vf->num_rxqs;
1686 p_resp->num_txqs = p_vf->num_txqs;
1687 p_resp->num_sbs = p_vf->num_sbs;
1688
1689 for (i = 0; i < p_resp->num_sbs; i++) {
1690 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1691
1692
1693
1694 p_resp->hw_sbs[i].sb_qid = 0;
1695 }
1696
1697
1698
1699
1700 for (i = 0; i < p_resp->num_rxqs; i++) {
1701 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1702 (u16 *)&p_resp->hw_qid[i]);
1703 p_resp->cid[i] = i;
1704 }
1705
1706
1707 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1708 p_req->num_mac_filters);
1709 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1710 p_req->num_vlan_filters);
1711
1712 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1713
1714
1715
1716
1717 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1718
1719
1720 if (p_resp->num_rxqs < p_req->num_rxqs ||
1721 p_resp->num_txqs < p_req->num_txqs ||
1722 p_resp->num_sbs < p_req->num_sbs ||
1723 p_resp->num_mac_filters < p_req->num_mac_filters ||
1724 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1725 p_resp->num_mc_filters < p_req->num_mc_filters ||
1726 p_resp->num_cids < p_req->num_cids) {
1727 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1728 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1729 p_vf->abs_vf_id,
1730 p_req->num_rxqs, p_resp->num_rxqs,
1731 p_req->num_rxqs, p_resp->num_txqs,
1732 p_req->num_sbs, p_resp->num_sbs,
1733 p_req->num_mac_filters, p_resp->num_mac_filters,
1734 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1735 p_req->num_mc_filters, p_resp->num_mc_filters,
1736 p_req->num_cids, p_resp->num_cids);
1737
1738
1739
1740
1741 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1742 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1743 (p_vf->acquire.vfdev_info.os_type ==
1744 VFPF_ACQUIRE_OS_WINDOWS))
1745 return PFVF_STATUS_SUCCESS;
1746
1747 return PFVF_STATUS_NO_RESOURCE;
1748 }
1749
1750 return PFVF_STATUS_SUCCESS;
1751}
1752
1753static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1754{
1755 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1756 OFFSETOF(struct mstorm_vf_zone,
1757 non_trigger.eth_queue_stat);
1758 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1759 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1760 OFFSETOF(struct ustorm_vf_zone,
1761 non_trigger.eth_queue_stat);
1762 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1763 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1764 OFFSETOF(struct pstorm_vf_zone,
1765 non_trigger.eth_queue_stat);
1766 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1767 p_stats->tstats.address = 0;
1768 p_stats->tstats.len = 0;
1769}
1770
1771static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1772 struct ecore_ptt *p_ptt,
1773 struct ecore_vf_info *vf)
1774{
1775 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1776 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1777 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1778 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1779 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1780 struct pf_vf_resc *resc = &resp->resc;
1781 enum _ecore_status_t rc;
1782
1783 OSAL_MEMSET(resp, 0, sizeof(*resp));
1784
1785
1786
1787
1788
1789 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1790 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1791
1792
1793
1794
1795
1796 if (vf->state != VF_FREE &&
1797 vf->state != VF_STOPPED) {
1798 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1799 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1800 vf->abs_vf_id, vf->state);
1801 goto out;
1802 }
1803
1804
1805 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1806 if (req->vfdev_info.capabilities &
1807 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1808 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1809
1810
1811
1812
1813 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1814
1815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1816 "VF[%d] is pre-fastpath HSI\n",
1817 vf->abs_vf_id);
1818 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1819 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1820 } else {
1821 DP_INFO(p_hwfn,
1822 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1823 " incompatible with loaded FW's faspath"
1824 " HSI %02x.%02x\n",
1825 vf->abs_vf_id,
1826 req->vfdev_info.eth_fp_hsi_major,
1827 req->vfdev_info.eth_fp_hsi_minor,
1828 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1829
1830 goto out;
1831 }
1832 }
1833
1834
1835 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1836 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1837 DP_INFO(p_hwfn,
1838 "VF[%d] is running an old driver that doesn't support"
1839 " 100g\n",
1840 vf->abs_vf_id);
1841 goto out;
1842 }
1843
1844#ifndef __EXTRACT__LINUX__
1845 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1846 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1847 goto out;
1848 }
1849#endif
1850
1851
1852 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1853
1854 vf->opaque_fid = req->vfdev_info.opaque_fid;
1855
1856 vf->vf_bulletin = req->bulletin_addr;
1857 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1858 vf->bulletin.size : req->bulletin_size;
1859
1860
1861 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1862 pfdev_info->db_size = 0;
1863 pfdev_info->indices_per_sb = PIS_PER_SB;
1864
1865 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1866 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1867 if (ECORE_IS_CMT(p_hwfn->p_dev))
1868 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1869
1870
1871
1872
1873 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1874 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1875
1876
1877 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1878 p_ptt);
1879
1880 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1881
1882 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1883 ETH_ALEN);
1884
1885 pfdev_info->fw_major = FW_MAJOR_VERSION;
1886 pfdev_info->fw_minor = FW_MINOR_VERSION;
1887 pfdev_info->fw_rev = FW_REVISION_VERSION;
1888 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1889
1890
1891
1892
1893 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1894 req->vfdev_info.eth_fp_hsi_minor);
1895 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1896 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1897 OSAL_NULL);
1898
1899 pfdev_info->dev_type = p_hwfn->p_dev->type;
1900 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1901
1902
1903
1904
1905 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1906 &req->resc_request, resc);
1907 if (vfpf_status != PFVF_STATUS_SUCCESS)
1908 goto out;
1909
1910
1911 rc = ecore_sp_vf_start(p_hwfn, vf);
1912 if (rc != ECORE_SUCCESS) {
1913 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1914 vf->abs_vf_id);
1915 vfpf_status = PFVF_STATUS_FAILURE;
1916 goto out;
1917 }
1918
1919
1920
1921
1922 resp->bulletin_size = vf->bulletin.size;
1923 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1924
1925 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1926 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1927 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1928 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1929 " n_vlans-%d\n",
1930 vf->abs_vf_id, resp->pfdev_info.chip_num,
1931 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1932 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1933 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1934 resc->num_vlan_filters);
1935
1936 vf->state = VF_ACQUIRED;
1937
1938out:
1939
1940 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1941 sizeof(struct pfvf_acquire_resp_tlv),
1942 vfpf_status);
1943}
1944
1945static enum _ecore_status_t
1946__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1947 struct ecore_vf_info *p_vf, bool val)
1948{
1949 struct ecore_sp_vport_update_params params;
1950 enum _ecore_status_t rc;
1951
1952 if (val == p_vf->spoof_chk) {
1953 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1954 "Spoofchk value[%d] is already configured\n", val);
1955 return ECORE_SUCCESS;
1956 }
1957
1958 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1959 params.opaque_fid = p_vf->opaque_fid;
1960 params.vport_id = p_vf->vport_id;
1961 params.update_anti_spoofing_en_flg = 1;
1962 params.anti_spoofing_en = val;
1963
1964 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1965 OSAL_NULL);
1966 if (rc == ECORE_SUCCESS) {
1967 p_vf->spoof_chk = val;
1968 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1969 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1970 "Spoofchk val[%d] configured\n", val);
1971 } else {
1972 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1973 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1974 val, p_vf->relative_vf_id);
1975 }
1976
1977 return rc;
1978}
1979
1980static enum _ecore_status_t
1981ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1982 struct ecore_vf_info *p_vf)
1983{
1984 struct ecore_filter_ucast filter;
1985 enum _ecore_status_t rc = ECORE_SUCCESS;
1986 int i;
1987
1988 OSAL_MEMSET(&filter, 0, sizeof(filter));
1989 filter.is_rx_filter = 1;
1990 filter.is_tx_filter = 1;
1991 filter.vport_to_add_to = p_vf->vport_id;
1992 filter.opcode = ECORE_FILTER_ADD;
1993
1994
1995 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1996 if (!p_vf->shadow_config.vlans[i].used)
1997 continue;
1998
1999 filter.type = ECORE_FILTER_VLAN;
2000 filter.vlan = p_vf->shadow_config.vlans[i].vid;
2001 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2002 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
2003 filter.vlan, p_vf->relative_vf_id);
2004 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2005 &filter, ECORE_SPQ_MODE_CB,
2006 OSAL_NULL);
2007 if (rc) {
2008 DP_NOTICE(p_hwfn, true,
2009 "Failed to configure VLAN [%04x]"
2010 " to VF [%04x]\n",
2011 filter.vlan, p_vf->relative_vf_id);
2012 break;
2013 }
2014 }
2015
2016 return rc;
2017}
2018
2019static enum _ecore_status_t
2020ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
2021 struct ecore_vf_info *p_vf, u64 events)
2022{
2023 enum _ecore_status_t rc = ECORE_SUCCESS;
2024
2025
2026
2027 if ((events & (1 << VLAN_ADDR_FORCED)) &&
2028 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
2029 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
2030
2031 return rc;
2032}
2033
2034static enum _ecore_status_t
2035ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
2036 struct ecore_vf_info *p_vf,
2037 u64 events)
2038{
2039 enum _ecore_status_t rc = ECORE_SUCCESS;
2040 struct ecore_filter_ucast filter;
2041
2042 if (!p_vf->vport_instance)
2043 return ECORE_INVAL;
2044
2045 if ((events & (1 << MAC_ADDR_FORCED)) ||
2046 p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2047 p_vf->p_vf_info.is_trusted_configured) {
2048
2049
2050
2051 OSAL_MEMSET(&filter, 0, sizeof(filter));
2052 filter.type = ECORE_FILTER_MAC;
2053 filter.opcode = ECORE_FILTER_REPLACE;
2054 filter.is_rx_filter = 1;
2055 filter.is_tx_filter = 1;
2056 filter.vport_to_add_to = p_vf->vport_id;
2057 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
2058
2059 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2060 &filter,
2061 ECORE_SPQ_MODE_CB, OSAL_NULL);
2062 if (rc) {
2063 DP_NOTICE(p_hwfn, true,
2064 "PF failed to configure MAC for VF\n");
2065 return rc;
2066 }
2067
2068 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2069 p_vf->p_vf_info.is_trusted_configured)
2070 p_vf->configured_features |=
2071 1 << VFPF_BULLETIN_MAC_ADDR;
2072 else
2073 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2074 }
2075
2076 if (events & (1 << VLAN_ADDR_FORCED)) {
2077 struct ecore_sp_vport_update_params vport_update;
2078 u8 removal;
2079 int i;
2080
2081 OSAL_MEMSET(&filter, 0, sizeof(filter));
2082 filter.type = ECORE_FILTER_VLAN;
2083 filter.is_rx_filter = 1;
2084 filter.is_tx_filter = 1;
2085 filter.vport_to_add_to = p_vf->vport_id;
2086 filter.vlan = p_vf->bulletin.p_virt->pvid;
2087 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2088 ECORE_FILTER_FLUSH;
2089
2090
2091 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2092 &filter,
2093 ECORE_SPQ_MODE_CB, OSAL_NULL);
2094 if (rc) {
2095 DP_NOTICE(p_hwfn, true,
2096 "PF failed to configure VLAN for VF\n");
2097 return rc;
2098 }
2099
2100
2101 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2102 vport_update.opaque_fid = p_vf->opaque_fid;
2103 vport_update.vport_id = p_vf->vport_id;
2104 vport_update.update_default_vlan_enable_flg = 1;
2105 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2106 vport_update.update_default_vlan_flg = 1;
2107 vport_update.default_vlan = filter.vlan;
2108
2109 vport_update.update_inner_vlan_removal_flg = 1;
2110 removal = filter.vlan ?
2111 1 : p_vf->shadow_config.inner_vlan_removal;
2112 vport_update.inner_vlan_removal_flg = removal;
2113 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2114 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2115 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2116 if (rc) {
2117 DP_NOTICE(p_hwfn, true,
2118 "PF failed to configure VF vport for vlan\n");
2119 return rc;
2120 }
2121
2122
2123 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2124 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2125 struct ecore_queue_cid *p_cid = OSAL_NULL;
2126
2127
2128 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2129 if (p_cid == OSAL_NULL)
2130 continue;
2131
2132 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2133 (void **)&p_cid,
2134 1, 0, 1,
2135 ECORE_SPQ_MODE_EBLOCK,
2136 OSAL_NULL);
2137 if (rc) {
2138 DP_NOTICE(p_hwfn, true,
2139 "Failed to send Rx update"
2140 " fo queue[0x%04x]\n",
2141 p_cid->rel.queue_id);
2142 return rc;
2143 }
2144 }
2145
2146 if (filter.vlan)
2147 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2148 else
2149 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2150 }
2151
2152
2153
2154
2155 if (events)
2156 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2157
2158 return rc;
2159}
2160
2161static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2162 struct ecore_ptt *p_ptt,
2163 struct ecore_vf_info *vf)
2164{
2165 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2166 struct ecore_sp_vport_start_params params;
2167 struct vfpf_vport_start_tlv *start;
2168 u8 status = PFVF_STATUS_SUCCESS;
2169 struct ecore_vf_info *vf_info;
2170 u64 *p_bitmap;
2171 int sb_id;
2172 enum _ecore_status_t rc;
2173
2174 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2175 if (!vf_info) {
2176 DP_NOTICE(p_hwfn->p_dev, true,
2177 "Failed to get VF info, invalid vfid [%d]\n",
2178 vf->relative_vf_id);
2179 return;
2180 }
2181
2182 vf->state = VF_ENABLED;
2183 start = &mbx->req_virt->start_vport;
2184
2185 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2186
2187
2188 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2189 if (!start->sb_addr[sb_id]) {
2190 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2191 "VF[%d] did not fill the address of SB %d\n",
2192 vf->relative_vf_id, sb_id);
2193 break;
2194 }
2195
2196 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2197 start->sb_addr[sb_id],
2198 vf->igu_sbs[sb_id],
2199 vf->abs_vf_id, 1);
2200 }
2201
2202 vf->mtu = start->mtu;
2203 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2204
2205
2206
2207
2208
2209 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2210 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2211 u8 vf_req = start->only_untagged;
2212
2213 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2214 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2215 }
2216
2217 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_start_params));
2218 params.tpa_mode = start->tpa_mode;
2219 params.remove_inner_vlan = start->inner_vlan_removal;
2220 params.tx_switching = true;
2221
2222#ifndef ASIC_ONLY
2223 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2224 DP_NOTICE(p_hwfn, false,
2225 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2226 params.tx_switching = false;
2227 }
2228#endif
2229
2230 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2231 params.drop_ttl0 = false;
2232 params.concrete_fid = vf->concrete_fid;
2233 params.opaque_fid = vf->opaque_fid;
2234 params.vport_id = vf->vport_id;
2235 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2236 params.mtu = vf->mtu;
2237
2238
2239 params.check_mac = !vf->p_vf_info.is_trusted_configured;
2240
2241 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2242 if (rc != ECORE_SUCCESS) {
2243 DP_ERR(p_hwfn,
2244 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2245 status = PFVF_STATUS_FAILURE;
2246 } else {
2247 vf->vport_instance++;
2248
2249
2250 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2251 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2252 vf->vport_id, vf->opaque_fid);
2253 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2254 }
2255
2256 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2257 sizeof(struct pfvf_def_resp_tlv), status);
2258}
2259
2260static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2261 struct ecore_ptt *p_ptt,
2262 struct ecore_vf_info *vf)
2263{
2264 u8 status = PFVF_STATUS_SUCCESS;
2265 enum _ecore_status_t rc;
2266
2267 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2268 vf->vport_instance--;
2269 vf->spoof_chk = false;
2270
2271 if ((ecore_iov_validate_active_rxq(vf)) ||
2272 (ecore_iov_validate_active_txq(vf))) {
2273 vf->b_malicious = true;
2274 DP_NOTICE(p_hwfn, false,
2275 "VF [%02x] - considered malicious;"
2276 " Unable to stop RX/TX queuess\n",
2277 vf->abs_vf_id);
2278 status = PFVF_STATUS_MALICIOUS;
2279 goto out;
2280 }
2281
2282 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2283 if (rc != ECORE_SUCCESS) {
2284 DP_ERR(p_hwfn,
2285 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2286 status = PFVF_STATUS_FAILURE;
2287 }
2288
2289
2290 vf->configured_features = 0;
2291 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2292
2293out:
2294 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2295 sizeof(struct pfvf_def_resp_tlv), status);
2296}
2297
2298static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2299 struct ecore_ptt *p_ptt,
2300 struct ecore_vf_info *vf,
2301 u8 status, bool b_legacy)
2302{
2303 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2304 struct pfvf_start_queue_resp_tlv *p_tlv;
2305 struct vfpf_start_rxq_tlv *req;
2306 u16 length;
2307
2308 mbx->offset = (u8 *)mbx->reply_virt;
2309
2310
2311
2312
2313
2314 if (!b_legacy)
2315 length = sizeof(*p_tlv);
2316 else
2317 length = sizeof(struct pfvf_def_resp_tlv);
2318
2319 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2320 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2321 sizeof(struct channel_list_end_tlv));
2322
2323
2324
2325
2326 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2327 req = &mbx->req_virt->start_rxq;
2328
2329 p_tlv->offset =
2330 PXP_VF_BAR0_START_MSDM_ZONE_B +
2331 OFFSETOF(struct mstorm_vf_zone,
2332 non_trigger.eth_rx_queue_producers) +
2333 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2334 }
2335
2336 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2337}
2338
2339static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2340 struct ecore_vf_info *p_vf, bool b_is_tx)
2341{
2342 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2343 struct vfpf_qid_tlv *p_qid_tlv;
2344
2345
2346 if (!(p_vf->acquire.vfdev_info.capabilities &
2347 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2348 if (b_is_tx)
2349 return ECORE_IOV_LEGACY_QID_TX;
2350 else
2351 return ECORE_IOV_LEGACY_QID_RX;
2352 }
2353
2354 p_qid_tlv = (struct vfpf_qid_tlv *)
2355 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2356 CHANNEL_TLV_QID);
2357 if (p_qid_tlv == OSAL_NULL) {
2358 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2359 "VF[%2x]: Failed to provide qid\n",
2360 p_vf->relative_vf_id);
2361
2362 return ECORE_IOV_QID_INVALID;
2363 }
2364
2365 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2366 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2367 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2368 p_vf->relative_vf_id, p_qid_tlv->qid);
2369 return ECORE_IOV_QID_INVALID;
2370 }
2371
2372 return p_qid_tlv->qid;
2373}
2374
2375static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2376 struct ecore_ptt *p_ptt,
2377 struct ecore_vf_info *vf)
2378{
2379 struct ecore_queue_start_common_params params;
2380 struct ecore_queue_cid_vf_params vf_params;
2381 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2382 u8 status = PFVF_STATUS_NO_RESOURCE;
2383 u8 qid_usage_idx, vf_legacy = 0;
2384 struct ecore_vf_queue *p_queue;
2385 struct vfpf_start_rxq_tlv *req;
2386 struct ecore_queue_cid *p_cid;
2387 struct ecore_sb_info sb_dummy;
2388 enum _ecore_status_t rc;
2389
2390 req = &mbx->req_virt->start_rxq;
2391
2392 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2393 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2394 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2395 goto out;
2396
2397 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2398 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2399 goto out;
2400
2401 p_queue = &vf->vf_queues[req->rx_qid];
2402 if (p_queue->cids[qid_usage_idx].p_cid)
2403 goto out;
2404
2405 vf_legacy = ecore_vf_calculate_legacy(vf);
2406
2407
2408 OSAL_MEMSET(¶ms, 0, sizeof(params));
2409 params.queue_id = (u8)p_queue->fw_rx_qid;
2410 params.vport_id = vf->vport_id;
2411 params.stats_id = vf->abs_vf_id + 0x10;
2412
2413
2414 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2415 sb_dummy.igu_sb_id = req->hw_sb;
2416 params.p_sb = &sb_dummy;
2417 params.sb_idx = req->sb_index;
2418
2419 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2420 vf_params.vfid = vf->relative_vf_id;
2421 vf_params.vf_qid = (u8)req->rx_qid;
2422 vf_params.vf_legacy = vf_legacy;
2423 vf_params.qid_usage_idx = qid_usage_idx;
2424
2425 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2426 ¶ms, true, &vf_params);
2427 if (p_cid == OSAL_NULL)
2428 goto out;
2429
2430
2431
2432
2433
2434 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2435 REG_WR(p_hwfn,
2436 GTT_BAR0_MAP_REG_MSDM_RAM +
2437 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
2438 req->rx_qid),
2439 0);
2440
2441 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2442 req->bd_max_bytes,
2443 req->rxq_addr,
2444 req->cqe_pbl_addr,
2445 req->cqe_pbl_size);
2446 if (rc != ECORE_SUCCESS) {
2447 status = PFVF_STATUS_FAILURE;
2448 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2449 } else {
2450 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2451 p_queue->cids[qid_usage_idx].b_is_tx = false;
2452 status = PFVF_STATUS_SUCCESS;
2453 vf->num_active_rxqs++;
2454 }
2455
2456out:
2457 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2458 !!(vf_legacy &
2459 ECORE_QCID_LEGACY_VF_RX_PROD));
2460}
2461
2462static void
2463ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2464 struct ecore_tunnel_info *p_tun,
2465 u16 tunn_feature_mask)
2466{
2467 p_resp->tunn_feature_mask = tunn_feature_mask;
2468 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2469 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2470 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2471 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2472 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2473 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2474 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2475 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2476 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2477 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2478 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2479 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2480}
2481
2482static void
2483__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2484 struct ecore_tunn_update_type *p_tun,
2485 enum ecore_tunn_mode mask, u8 tun_cls)
2486{
2487 if (p_req->tun_mode_update_mask & (1 << mask)) {
2488 p_tun->b_update_mode = true;
2489
2490 if (p_req->tunn_mode & (1 << mask))
2491 p_tun->b_mode_enabled = true;
2492 }
2493
2494 p_tun->tun_cls = tun_cls;
2495}
2496
2497static void
2498ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2499 struct ecore_tunn_update_type *p_tun,
2500 struct ecore_tunn_update_udp_port *p_port,
2501 enum ecore_tunn_mode mask,
2502 u8 tun_cls, u8 update_port, u16 port)
2503{
2504 if (update_port) {
2505 p_port->b_update_port = true;
2506 p_port->port = port;
2507 }
2508
2509 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2510}
2511
2512static bool
2513ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2514{
2515 bool b_update_requested = false;
2516
2517 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2518 p_req->update_geneve_port || p_req->update_vxlan_port)
2519 b_update_requested = true;
2520
2521 return b_update_requested;
2522}
2523
2524static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2525 struct ecore_ptt *p_ptt,
2526 struct ecore_vf_info *p_vf)
2527{
2528 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2529 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2530 struct pfvf_update_tunn_param_tlv *p_resp;
2531 struct vfpf_update_tunn_param_tlv *p_req;
2532 enum _ecore_status_t rc = ECORE_SUCCESS;
2533 u8 status = PFVF_STATUS_SUCCESS;
2534 bool b_update_required = false;
2535 struct ecore_tunnel_info tunn;
2536 u16 tunn_feature_mask = 0;
2537 int i;
2538
2539 mbx->offset = (u8 *)mbx->reply_virt;
2540
2541 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2542 p_req = &mbx->req_virt->tunn_param_update;
2543
2544 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2546 "No tunnel update requested by VF\n");
2547 status = PFVF_STATUS_FAILURE;
2548 goto send_resp;
2549 }
2550
2551 tunn.b_update_rx_cls = p_req->update_tun_cls;
2552 tunn.b_update_tx_cls = p_req->update_tun_cls;
2553
2554 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2555 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2556 p_req->update_vxlan_port,
2557 p_req->vxlan_port);
2558 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2559 ECORE_MODE_L2GENEVE_TUNN,
2560 p_req->l2geneve_clss,
2561 p_req->update_geneve_port,
2562 p_req->geneve_port);
2563 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2564 ECORE_MODE_IPGENEVE_TUNN,
2565 p_req->ipgeneve_clss);
2566 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2567 ECORE_MODE_L2GRE_TUNN,
2568 p_req->l2gre_clss);
2569 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2570 ECORE_MODE_IPGRE_TUNN,
2571 p_req->ipgre_clss);
2572
2573
2574
2575
2576
2577 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2578 &b_update_required, &tunn);
2579
2580 if (rc != ECORE_SUCCESS)
2581 status = PFVF_STATUS_FAILURE;
2582
2583
2584 if (b_update_required) {
2585 u16 geneve_port;
2586
2587 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2588 ECORE_SPQ_MODE_EBLOCK,
2589 OSAL_NULL);
2590 if (rc != ECORE_SUCCESS)
2591 status = PFVF_STATUS_FAILURE;
2592
2593 geneve_port = p_tun->geneve_port.port;
2594 ecore_for_each_vf(p_hwfn, i) {
2595 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2596 p_tun->vxlan_port.port,
2597 geneve_port);
2598 }
2599 }
2600
2601send_resp:
2602 p_resp = ecore_add_tlv(&mbx->offset,
2603 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2604
2605 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2606 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2607 sizeof(struct channel_list_end_tlv));
2608
2609 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2610}
2611
2612static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2613 struct ecore_ptt *p_ptt,
2614 struct ecore_vf_info *p_vf,
2615 u32 cid,
2616 u8 status)
2617{
2618 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2619 struct pfvf_start_queue_resp_tlv *p_tlv;
2620 bool b_legacy = false;
2621 u16 length;
2622
2623 mbx->offset = (u8 *)mbx->reply_virt;
2624
2625
2626
2627
2628
2629 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2630 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2631 b_legacy = true;
2632
2633 if (!b_legacy)
2634 length = sizeof(*p_tlv);
2635 else
2636 length = sizeof(struct pfvf_def_resp_tlv);
2637
2638 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2639 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2640 sizeof(struct channel_list_end_tlv));
2641
2642
2643 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2644 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2645
2646 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2647}
2648
2649static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2650 struct ecore_ptt *p_ptt,
2651 struct ecore_vf_info *vf)
2652{
2653 struct ecore_queue_start_common_params params;
2654 struct ecore_queue_cid_vf_params vf_params;
2655 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2656 u8 status = PFVF_STATUS_NO_RESOURCE;
2657 struct ecore_vf_queue *p_queue;
2658 struct vfpf_start_txq_tlv *req;
2659 struct ecore_queue_cid *p_cid;
2660 struct ecore_sb_info sb_dummy;
2661 u8 qid_usage_idx, vf_legacy;
2662 u32 cid = 0;
2663 enum _ecore_status_t rc;
2664 u16 pq;
2665
2666 OSAL_MEMSET(¶ms, 0, sizeof(params));
2667 req = &mbx->req_virt->start_txq;
2668
2669 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2670 ECORE_IOV_VALIDATE_Q_NA) ||
2671 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2672 goto out;
2673
2674 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2675 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2676 goto out;
2677
2678 p_queue = &vf->vf_queues[req->tx_qid];
2679 if (p_queue->cids[qid_usage_idx].p_cid)
2680 goto out;
2681
2682 vf_legacy = ecore_vf_calculate_legacy(vf);
2683
2684
2685 params.queue_id = p_queue->fw_tx_qid;
2686 params.vport_id = vf->vport_id;
2687 params.stats_id = vf->abs_vf_id + 0x10;
2688
2689
2690 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2691 sb_dummy.igu_sb_id = req->hw_sb;
2692 params.p_sb = &sb_dummy;
2693 params.sb_idx = req->sb_index;
2694
2695 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2696 vf_params.vfid = vf->relative_vf_id;
2697 vf_params.vf_qid = (u8)req->tx_qid;
2698 vf_params.vf_legacy = vf_legacy;
2699 vf_params.qid_usage_idx = qid_usage_idx;
2700
2701 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2702 ¶ms, false, &vf_params);
2703 if (p_cid == OSAL_NULL)
2704 goto out;
2705
2706 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2707 vf->relative_vf_id);
2708 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2709 req->pbl_addr, req->pbl_size, pq);
2710 if (rc != ECORE_SUCCESS) {
2711 status = PFVF_STATUS_FAILURE;
2712 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2713 } else {
2714 status = PFVF_STATUS_SUCCESS;
2715 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2716 p_queue->cids[qid_usage_idx].b_is_tx = true;
2717 cid = p_cid->cid;
2718 }
2719
2720out:
2721 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2722 cid, status);
2723}
2724
2725static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2726 struct ecore_vf_info *vf,
2727 u16 rxq_id,
2728 u8 qid_usage_idx,
2729 bool cqe_completion)
2730{
2731 struct ecore_vf_queue *p_queue;
2732 enum _ecore_status_t rc = ECORE_SUCCESS;
2733
2734 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2735 ECORE_IOV_VALIDATE_Q_NA)) {
2736 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2737 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2738 vf->relative_vf_id, rxq_id, qid_usage_idx);
2739 return ECORE_INVAL;
2740 }
2741
2742 p_queue = &vf->vf_queues[rxq_id];
2743
2744
2745
2746
2747 if (!p_queue->cids[qid_usage_idx].p_cid ||
2748 p_queue->cids[qid_usage_idx].b_is_tx) {
2749 struct ecore_queue_cid *p_cid;
2750
2751 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2752 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2753 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2754 vf->relative_vf_id, rxq_id, qid_usage_idx,
2755 rxq_id, p_cid->qid_usage_idx);
2756 return ECORE_INVAL;
2757 }
2758
2759
2760 rc = ecore_eth_rx_queue_stop(p_hwfn,
2761 p_queue->cids[qid_usage_idx].p_cid,
2762 false, cqe_completion);
2763 if (rc != ECORE_SUCCESS)
2764 return rc;
2765
2766 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2767 vf->num_active_rxqs--;
2768
2769 return ECORE_SUCCESS;
2770}
2771
2772static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2773 struct ecore_vf_info *vf,
2774 u16 txq_id,
2775 u8 qid_usage_idx)
2776{
2777 struct ecore_vf_queue *p_queue;
2778 enum _ecore_status_t rc = ECORE_SUCCESS;
2779
2780 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2781 ECORE_IOV_VALIDATE_Q_NA))
2782 return ECORE_INVAL;
2783
2784 p_queue = &vf->vf_queues[txq_id];
2785 if (!p_queue->cids[qid_usage_idx].p_cid ||
2786 !p_queue->cids[qid_usage_idx].b_is_tx)
2787 return ECORE_INVAL;
2788
2789 rc = ecore_eth_tx_queue_stop(p_hwfn,
2790 p_queue->cids[qid_usage_idx].p_cid);
2791 if (rc != ECORE_SUCCESS)
2792 return rc;
2793
2794 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2795 return ECORE_SUCCESS;
2796}
2797
2798static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2799 struct ecore_ptt *p_ptt,
2800 struct ecore_vf_info *vf)
2801{
2802 u16 length = sizeof(struct pfvf_def_resp_tlv);
2803 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2804 u8 status = PFVF_STATUS_FAILURE;
2805 struct vfpf_stop_rxqs_tlv *req;
2806 u8 qid_usage_idx;
2807 enum _ecore_status_t rc;
2808
2809
2810
2811
2812
2813 req = &mbx->req_virt->stop_rxqs;
2814 if (req->num_rxqs != 1) {
2815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2816 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2817 vf->relative_vf_id);
2818 status = PFVF_STATUS_NOT_SUPPORTED;
2819 goto out;
2820 }
2821
2822
2823 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2824 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2825 goto out;
2826
2827 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2828 qid_usage_idx, req->cqe_completion);
2829 if (rc == ECORE_SUCCESS)
2830 status = PFVF_STATUS_SUCCESS;
2831out:
2832 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2833 length, status);
2834}
2835
2836static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2837 struct ecore_ptt *p_ptt,
2838 struct ecore_vf_info *vf)
2839{
2840 u16 length = sizeof(struct pfvf_def_resp_tlv);
2841 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2842 u8 status = PFVF_STATUS_FAILURE;
2843 struct vfpf_stop_txqs_tlv *req;
2844 u8 qid_usage_idx;
2845 enum _ecore_status_t rc;
2846
2847
2848
2849
2850
2851 req = &mbx->req_virt->stop_txqs;
2852 if (req->num_txqs != 1) {
2853 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2854 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2855 vf->relative_vf_id);
2856 status = PFVF_STATUS_NOT_SUPPORTED;
2857 goto out;
2858 }
2859
2860
2861 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2862 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2863 goto out;
2864
2865 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2866 qid_usage_idx);
2867 if (rc == ECORE_SUCCESS)
2868 status = PFVF_STATUS_SUCCESS;
2869
2870out:
2871 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2872 length, status);
2873}
2874
2875static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2876 struct ecore_ptt *p_ptt,
2877 struct ecore_vf_info *vf)
2878{
2879 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2880 u16 length = sizeof(struct pfvf_def_resp_tlv);
2881 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2882 struct vfpf_update_rxq_tlv *req;
2883 u8 status = PFVF_STATUS_FAILURE;
2884 u8 complete_event_flg;
2885 u8 complete_cqe_flg;
2886 u8 qid_usage_idx;
2887 enum _ecore_status_t rc;
2888 u16 i;
2889
2890 req = &mbx->req_virt->update_rxq;
2891 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2892 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2893
2894 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2895 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2896 goto out;
2897
2898
2899
2900
2901 if ((vf->acquire.vfdev_info.capabilities &
2902 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2903 req->num_rxqs != 1) {
2904 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2905 "VF[%d] supports QIDs but sends multiple queues\n",
2906 vf->relative_vf_id);
2907 goto out;
2908 }
2909
2910
2911
2912
2913 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2914 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2915 ECORE_IOV_VALIDATE_Q_NA) ||
2916 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2917 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2918 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2919 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2920 vf->relative_vf_id, req->rx_qid,
2921 req->num_rxqs);
2922 goto out;
2923 }
2924 }
2925
2926 for (i = 0; i < req->num_rxqs; i++) {
2927 u16 qid = req->rx_qid + i;
2928
2929 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2930 }
2931
2932 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2933 req->num_rxqs,
2934 complete_cqe_flg,
2935 complete_event_flg,
2936 ECORE_SPQ_MODE_EBLOCK,
2937 OSAL_NULL);
2938 if (rc != ECORE_SUCCESS)
2939 goto out;
2940
2941 status = PFVF_STATUS_SUCCESS;
2942out:
2943 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2944 length, status);
2945}
2946
2947static enum _ecore_status_t
2948ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
2949 struct ecore_ptt *p_ptt,
2950 struct ecore_vf_info *p_vf)
2951{
2952 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2953 struct ecore_sp_vport_update_params params;
2954 enum _ecore_status_t rc = ECORE_SUCCESS;
2955 struct vfpf_update_mtu_tlv *p_req;
2956 u8 status = PFVF_STATUS_SUCCESS;
2957
2958
2959 if (!p_vf->vport_instance) {
2960 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2961 "No VPORT instance available for VF[%d], failing MTU update\n",
2962 p_vf->abs_vf_id);
2963 status = PFVF_STATUS_FAILURE;
2964 goto send_status;
2965 }
2966
2967 p_req = &mbx->req_virt->update_mtu;
2968
2969 OSAL_MEMSET(¶ms, 0, sizeof(params));
2970 params.opaque_fid = p_vf->opaque_fid;
2971 params.vport_id = p_vf->vport_id;
2972 params.mtu = p_req->mtu;
2973 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2974 OSAL_NULL);
2975
2976 if (rc)
2977 status = PFVF_STATUS_FAILURE;
2978send_status:
2979 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2980 CHANNEL_TLV_UPDATE_MTU,
2981 sizeof(struct pfvf_def_resp_tlv),
2982 status);
2983 return rc;
2984}
2985
2986void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2987 void *p_tlvs_list, u16 req_type)
2988{
2989 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2990 int len = 0;
2991
2992 do {
2993 if (!p_tlv->length) {
2994 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2995 return OSAL_NULL;
2996 }
2997
2998 if (p_tlv->type == req_type) {
2999 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3000 "Extended tlv type %s, length %d found\n",
3001 qede_ecore_channel_tlvs_string[p_tlv->type],
3002 p_tlv->length);
3003 return p_tlv;
3004 }
3005
3006 len += p_tlv->length;
3007 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
3008
3009 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
3010 DP_NOTICE(p_hwfn, true,
3011 "TLVs has overrun the buffer size\n");
3012 return OSAL_NULL;
3013 }
3014 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
3015
3016 return OSAL_NULL;
3017}
3018
3019static void
3020ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
3021 struct ecore_sp_vport_update_params *p_data,
3022 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3023{
3024 struct vfpf_vport_update_activate_tlv *p_act_tlv;
3025 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
3026
3027 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
3028 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3029 if (!p_act_tlv)
3030 return;
3031
3032 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
3033 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
3034 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
3035 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
3036 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
3037}
3038
3039static void
3040ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
3041 struct ecore_sp_vport_update_params *p_data,
3042 struct ecore_vf_info *p_vf,
3043 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3044{
3045 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
3046 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
3047
3048 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
3049 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3050 if (!p_vlan_tlv)
3051 return;
3052
3053 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
3054
3055
3056 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
3057 p_data->update_inner_vlan_removal_flg = 1;
3058 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
3059 }
3060
3061 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
3062}
3063
3064static void
3065ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
3066 struct ecore_sp_vport_update_params *p_data,
3067 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3068{
3069 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
3070 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
3071
3072 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
3073 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3074 if (!p_tx_switch_tlv)
3075 return;
3076
3077#ifndef ASIC_ONLY
3078 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3079 DP_NOTICE(p_hwfn, false,
3080 "FPGA: Ignore tx-switching configuration originating"
3081 " from VFs\n");
3082 return;
3083 }
3084#endif
3085
3086 p_data->update_tx_switching_flg = 1;
3087 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3088 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3089}
3090
3091static void
3092ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3093 struct ecore_sp_vport_update_params *p_data,
3094 struct ecore_iov_vf_mbx *p_mbx,
3095 u16 *tlvs_mask)
3096{
3097 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3098 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3099
3100 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3101 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3102 if (!p_mcast_tlv)
3103 return;
3104
3105 p_data->update_approx_mcast_flg = 1;
3106 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3107 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3108 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3109}
3110
3111static void
3112ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3113 struct ecore_sp_vport_update_params *p_data,
3114 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3115{
3116 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3117 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3118 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3119
3120 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3121 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3122 if (!p_accept_tlv)
3123 return;
3124
3125 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3126 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3127 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3128 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3129 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3130}
3131
3132static void
3133ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3134 struct ecore_sp_vport_update_params *p_data,
3135 struct ecore_iov_vf_mbx *p_mbx,
3136 u16 *tlvs_mask)
3137{
3138 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3139 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3140
3141 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3142 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3143 if (!p_accept_any_vlan)
3144 return;
3145
3146 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3147 p_data->update_accept_any_vlan_flg =
3148 p_accept_any_vlan->update_accept_any_vlan_flg;
3149 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3150}
3151
3152static void
3153ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3154 struct ecore_vf_info *vf,
3155 struct ecore_sp_vport_update_params *p_data,
3156 struct ecore_rss_params *p_rss,
3157 struct ecore_iov_vf_mbx *p_mbx,
3158 u16 *tlvs_mask, u16 *tlvs_accepted)
3159{
3160 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3161 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3162 bool b_reject = false;
3163 u16 table_size;
3164 u16 i, q_idx;
3165
3166 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3167 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3168 if (!p_rss_tlv) {
3169 p_data->rss_params = OSAL_NULL;
3170 return;
3171 }
3172
3173 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3174
3175 p_rss->update_rss_config =
3176 !!(p_rss_tlv->update_rss_flags &
3177 VFPF_UPDATE_RSS_CONFIG_FLAG);
3178 p_rss->update_rss_capabilities =
3179 !!(p_rss_tlv->update_rss_flags &
3180 VFPF_UPDATE_RSS_CAPS_FLAG);
3181 p_rss->update_rss_ind_table =
3182 !!(p_rss_tlv->update_rss_flags &
3183 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3184 p_rss->update_rss_key =
3185 !!(p_rss_tlv->update_rss_flags &
3186 VFPF_UPDATE_RSS_KEY_FLAG);
3187
3188 p_rss->rss_enable = p_rss_tlv->rss_enable;
3189 p_rss->rss_eng_id = vf->rss_eng_id;
3190 p_rss->rss_caps = p_rss_tlv->rss_caps;
3191 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3192 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3193 sizeof(p_rss->rss_key));
3194
3195 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3196 (1 << p_rss_tlv->rss_table_size_log));
3197
3198 for (i = 0; i < table_size; i++) {
3199 struct ecore_queue_cid *p_cid;
3200
3201 q_idx = p_rss_tlv->rss_ind_table[i];
3202 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3203 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3204 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3205 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3206 vf->relative_vf_id, q_idx);
3207 b_reject = true;
3208 goto out;
3209 }
3210
3211 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3212 p_rss->rss_ind_table[i] = p_cid;
3213 }
3214
3215 p_data->rss_params = p_rss;
3216out:
3217 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3218 if (!b_reject)
3219 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3220}
3221
3222static void
3223ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3224 struct ecore_sp_vport_update_params *p_data,
3225 struct ecore_sge_tpa_params *p_sge_tpa,
3226 struct ecore_iov_vf_mbx *p_mbx,
3227 u16 *tlvs_mask)
3228{
3229 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3230 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3231
3232 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3233 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3234
3235 if (!p_sge_tpa_tlv) {
3236 p_data->sge_tpa_params = OSAL_NULL;
3237 return;
3238 }
3239
3240 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3241
3242 p_sge_tpa->update_tpa_en_flg =
3243 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3244 p_sge_tpa->update_tpa_param_flg =
3245 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3246 VFPF_UPDATE_TPA_PARAM_FLAG);
3247
3248 p_sge_tpa->tpa_ipv4_en_flg =
3249 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3250 p_sge_tpa->tpa_ipv6_en_flg =
3251 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3252 p_sge_tpa->tpa_pkt_split_flg =
3253 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3254 p_sge_tpa->tpa_hdr_data_split_flg =
3255 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3256 p_sge_tpa->tpa_gro_consistent_flg =
3257 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3258
3259 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3260 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3261 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3262 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3263 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3264
3265 p_data->sge_tpa_params = p_sge_tpa;
3266
3267 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3268}
3269
3270static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3271 struct ecore_ptt *p_ptt,
3272 struct ecore_vf_info *vf)
3273{
3274 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3275 struct ecore_sp_vport_update_params params;
3276 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3277 struct ecore_sge_tpa_params sge_tpa_params;
3278 u16 tlvs_mask = 0, tlvs_accepted = 0;
3279 u8 status = PFVF_STATUS_SUCCESS;
3280 u16 length;
3281 enum _ecore_status_t rc;
3282
3283
3284 if (!vf->vport_instance) {
3285 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3286 "No VPORT instance available for VF[%d],"
3287 " failing vport update\n",
3288 vf->abs_vf_id);
3289 status = PFVF_STATUS_FAILURE;
3290 goto out;
3291 }
3292
3293 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3294 if (p_rss_params == OSAL_NULL) {
3295 status = PFVF_STATUS_FAILURE;
3296 goto out;
3297 }
3298
3299 OSAL_MEMSET(¶ms, 0, sizeof(params));
3300 params.opaque_fid = vf->opaque_fid;
3301 params.vport_id = vf->vport_id;
3302 params.rss_params = OSAL_NULL;
3303
3304
3305
3306
3307 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3308 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3309 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3310 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3311 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3312 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3313 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3314 &sge_tpa_params, mbx, &tlvs_mask);
3315
3316 tlvs_accepted = tlvs_mask;
3317
3318
3319
3320
3321
3322 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3323 mbx, &tlvs_mask, &tlvs_accepted);
3324
3325
3326
3327
3328
3329
3330 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3331 ¶ms, &tlvs_accepted) !=
3332 ECORE_SUCCESS) {
3333 tlvs_accepted = 0;
3334 status = PFVF_STATUS_NOT_SUPPORTED;
3335 goto out;
3336 }
3337
3338 if (!tlvs_accepted) {
3339 if (tlvs_mask)
3340 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3341 "Upper-layer prevents said VF"
3342 " configuration\n");
3343 else
3344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3345 "No feature tlvs found for vport update\n");
3346 status = PFVF_STATUS_NOT_SUPPORTED;
3347 goto out;
3348 }
3349
3350 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3351 OSAL_NULL);
3352
3353 if (rc)
3354 status = PFVF_STATUS_FAILURE;
3355
3356out:
3357 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3358 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3359 tlvs_mask, tlvs_accepted);
3360 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3361}
3362
3363static enum _ecore_status_t
3364ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3365 struct ecore_vf_info *p_vf,
3366 struct ecore_filter_ucast *p_params)
3367{
3368 int i;
3369
3370
3371 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3372 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3373 if (p_vf->shadow_config.vlans[i].used &&
3374 p_vf->shadow_config.vlans[i].vid ==
3375 p_params->vlan) {
3376 p_vf->shadow_config.vlans[i].used = false;
3377 break;
3378 }
3379 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3380 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3381 "VF [%d] - Tries to remove a non-existing"
3382 " vlan\n",
3383 p_vf->relative_vf_id);
3384 return ECORE_INVAL;
3385 }
3386 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3387 p_params->opcode == ECORE_FILTER_FLUSH) {
3388 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3389 p_vf->shadow_config.vlans[i].used = false;
3390 }
3391
3392
3393
3394
3395 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3396 return ECORE_SUCCESS;
3397
3398 if (p_params->opcode == ECORE_FILTER_ADD ||
3399 p_params->opcode == ECORE_FILTER_REPLACE) {
3400 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3401 if (p_vf->shadow_config.vlans[i].used)
3402 continue;
3403
3404 p_vf->shadow_config.vlans[i].used = true;
3405 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3406 break;
3407 }
3408
3409 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3410 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3411 "VF [%d] - Tries to configure more than %d"
3412 " vlan filters\n",
3413 p_vf->relative_vf_id,
3414 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3415 return ECORE_INVAL;
3416 }
3417 }
3418
3419 return ECORE_SUCCESS;
3420}
3421
3422static enum _ecore_status_t
3423ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3424 struct ecore_vf_info *p_vf,
3425 struct ecore_filter_ucast *p_params)
3426{
3427 char empty_mac[ETH_ALEN];
3428 int i;
3429
3430 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3431
3432
3433
3434
3435
3436
3437 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3438 return ECORE_SUCCESS;
3439
3440
3441
3442
3443
3444
3445 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
3446 p_vf->p_vf_info.is_trusted_configured)
3447 return ECORE_SUCCESS;
3448
3449
3450 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3451 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3452 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3453 p_params->mac, ETH_ALEN)) {
3454 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3455 ETH_ALEN);
3456 break;
3457 }
3458 }
3459
3460 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3461 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3462 "MAC isn't configured\n");
3463 return ECORE_INVAL;
3464 }
3465 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3466 p_params->opcode == ECORE_FILTER_FLUSH) {
3467 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3468 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3469 }
3470
3471
3472 if (p_params->opcode != ECORE_FILTER_ADD &&
3473 p_params->opcode != ECORE_FILTER_REPLACE)
3474 return ECORE_SUCCESS;
3475
3476 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3477 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3478 empty_mac, ETH_ALEN)) {
3479 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3480 p_params->mac, ETH_ALEN);
3481 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3482 "Added MAC at %d entry in shadow\n", i);
3483 break;
3484 }
3485 }
3486
3487 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3488 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3489 "No available place for MAC\n");
3490 return ECORE_INVAL;
3491 }
3492
3493 return ECORE_SUCCESS;
3494}
3495
3496static enum _ecore_status_t
3497ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3498 struct ecore_vf_info *p_vf,
3499 struct ecore_filter_ucast *p_params)
3500{
3501 enum _ecore_status_t rc = ECORE_SUCCESS;
3502
3503 if (p_params->type == ECORE_FILTER_MAC) {
3504 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3505 if (rc != ECORE_SUCCESS)
3506 return rc;
3507 }
3508
3509 if (p_params->type == ECORE_FILTER_VLAN)
3510 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3511
3512 return rc;
3513}
3514
3515static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3516 struct ecore_ptt *p_ptt,
3517 struct ecore_vf_info *vf)
3518{
3519 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3520 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3521 struct vfpf_ucast_filter_tlv *req;
3522 u8 status = PFVF_STATUS_SUCCESS;
3523 struct ecore_filter_ucast params;
3524 enum _ecore_status_t rc;
3525
3526
3527 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3528 req = &mbx->req_virt->ucast_filter;
3529 params.opcode = (enum ecore_filter_opcode)req->opcode;
3530 params.type = (enum ecore_filter_ucast_type)req->type;
3531
3532
3533 params.is_rx_filter = 1;
3534 params.is_tx_filter = 1;
3535 params.vport_to_remove_from = vf->vport_id;
3536 params.vport_to_add_to = vf->vport_id;
3537 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3538 params.vlan = req->vlan;
3539
3540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3541 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3542 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3543 vf->abs_vf_id, params.opcode, params.type,
3544 params.is_rx_filter ? "RX" : "",
3545 params.is_tx_filter ? "TX" : "",
3546 params.vport_to_add_to,
3547 params.mac[0], params.mac[1], params.mac[2],
3548 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3549
3550 if (!vf->vport_instance) {
3551 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3552 "No VPORT instance available for VF[%d],"
3553 " failing ucast MAC configuration\n",
3554 vf->abs_vf_id);
3555 status = PFVF_STATUS_FAILURE;
3556 goto out;
3557 }
3558
3559
3560
3561
3562
3563 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3564 ECORE_SUCCESS)
3565 goto out;
3566
3567
3568 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3569 (params.type == ECORE_FILTER_VLAN ||
3570 params.type == ECORE_FILTER_MAC_VLAN)) {
3571
3572
3573
3574 if (params.opcode == ECORE_FILTER_ADD ||
3575 params.opcode == ECORE_FILTER_REPLACE)
3576 status = PFVF_STATUS_FORCED;
3577 goto out;
3578 }
3579
3580 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3581 (params.type == ECORE_FILTER_MAC ||
3582 params.type == ECORE_FILTER_MAC_VLAN)) {
3583 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3584 (params.opcode != ECORE_FILTER_ADD &&
3585 params.opcode != ECORE_FILTER_REPLACE))
3586 status = PFVF_STATUS_FORCED;
3587 goto out;
3588 }
3589
3590 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3591 if (rc == ECORE_EXISTS) {
3592 goto out;
3593 } else if (rc == ECORE_INVAL) {
3594 status = PFVF_STATUS_FAILURE;
3595 goto out;
3596 }
3597
3598 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3599 ECORE_SPQ_MODE_CB, OSAL_NULL);
3600 if (rc)
3601 status = PFVF_STATUS_FAILURE;
3602
3603out:
3604 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3605 sizeof(struct pfvf_def_resp_tlv), status);
3606}
3607
3608static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3609 struct ecore_ptt *p_ptt,
3610 struct ecore_vf_info *vf)
3611{
3612 int i;
3613
3614
3615 for (i = 0; i < vf->num_sbs; i++)
3616 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3617 vf->igu_sbs[i],
3618 vf->opaque_fid, false);
3619
3620 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3621 sizeof(struct pfvf_def_resp_tlv),
3622 PFVF_STATUS_SUCCESS);
3623}
3624
3625static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3626 struct ecore_ptt *p_ptt,
3627 struct ecore_vf_info *vf)
3628{
3629 u16 length = sizeof(struct pfvf_def_resp_tlv);
3630 u8 status = PFVF_STATUS_SUCCESS;
3631
3632
3633 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3634
3635
3636 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3637
3638 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3639 length, status);
3640}
3641
3642static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3643 struct ecore_ptt *p_ptt,
3644 struct ecore_vf_info *p_vf)
3645{
3646 u16 length = sizeof(struct pfvf_def_resp_tlv);
3647 u8 status = PFVF_STATUS_SUCCESS;
3648 enum _ecore_status_t rc = ECORE_SUCCESS;
3649
3650 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3651
3652 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3653
3654 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3655 p_vf->opaque_fid);
3656
3657 if (rc != ECORE_SUCCESS) {
3658 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3659 rc);
3660 status = PFVF_STATUS_FAILURE;
3661 }
3662
3663 p_vf->state = VF_STOPPED;
3664 }
3665
3666 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3667 length, status);
3668}
3669
3670static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3671 struct ecore_ptt *p_ptt,
3672 struct ecore_vf_info *p_vf)
3673{
3674 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3675 struct pfvf_read_coal_resp_tlv *p_resp;
3676 struct vfpf_read_coal_req_tlv *req;
3677 u8 status = PFVF_STATUS_FAILURE;
3678 struct ecore_vf_queue *p_queue;
3679 struct ecore_queue_cid *p_cid;
3680 enum _ecore_status_t rc = ECORE_SUCCESS;
3681 u16 coal = 0, qid, i;
3682 bool b_is_rx;
3683
3684 mbx->offset = (u8 *)mbx->reply_virt;
3685 req = &mbx->req_virt->read_coal_req;
3686
3687 qid = req->qid;
3688 b_is_rx = req->is_rx ? true : false;
3689
3690 if (b_is_rx) {
3691 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3692 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3694 "VF[%d]: Invalid Rx queue_id = %d\n",
3695 p_vf->abs_vf_id, qid);
3696 goto send_resp;
3697 }
3698
3699 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3700 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3701 if (rc != ECORE_SUCCESS)
3702 goto send_resp;
3703 } else {
3704 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3705 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3706 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3707 "VF[%d]: Invalid Tx queue_id = %d\n",
3708 p_vf->abs_vf_id, qid);
3709 goto send_resp;
3710 }
3711 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3712 p_queue = &p_vf->vf_queues[qid];
3713 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3714 (!p_queue->cids[i].b_is_tx))
3715 continue;
3716
3717 p_cid = p_queue->cids[i].p_cid;
3718
3719 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3720 p_cid, &coal);
3721 if (rc != ECORE_SUCCESS)
3722 goto send_resp;
3723 break;
3724 }
3725 }
3726
3727 status = PFVF_STATUS_SUCCESS;
3728
3729send_resp:
3730 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3731 sizeof(*p_resp));
3732 p_resp->coal = coal;
3733
3734 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3735 sizeof(struct channel_list_end_tlv));
3736
3737 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3738}
3739
3740static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3741 struct ecore_ptt *p_ptt,
3742 struct ecore_vf_info *vf)
3743{
3744 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3745 enum _ecore_status_t rc = ECORE_SUCCESS;
3746 struct vfpf_update_coalesce *req;
3747 u8 status = PFVF_STATUS_FAILURE;
3748 struct ecore_queue_cid *p_cid;
3749 u16 rx_coal, tx_coal;
3750 u16 qid;
3751 u32 i;
3752
3753 req = &mbx->req_virt->update_coalesce;
3754
3755 rx_coal = req->rx_coal;
3756 tx_coal = req->tx_coal;
3757 qid = req->qid;
3758
3759 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3760 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3761 rx_coal) {
3762 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3763 vf->abs_vf_id, qid);
3764 goto out;
3765 }
3766
3767 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3768 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3769 tx_coal) {
3770 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3771 vf->abs_vf_id, qid);
3772 goto out;
3773 }
3774
3775 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3776 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3777 vf->abs_vf_id, rx_coal, tx_coal, qid);
3778
3779 if (rx_coal) {
3780 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3781
3782 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3783 if (rc != ECORE_SUCCESS) {
3784 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3785 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3786 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3787 goto out;
3788 }
3789 vf->rx_coal = rx_coal;
3790 }
3791
3792
3793
3794
3795 if (tx_coal) {
3796 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3797
3798 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3799 if (p_queue->cids[i].p_cid == OSAL_NULL)
3800 continue;
3801
3802 if (!p_queue->cids[i].b_is_tx)
3803 continue;
3804
3805 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3806 p_queue->cids[i].p_cid);
3807 if (rc != ECORE_SUCCESS) {
3808 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3809 "VF[%d]: Unable to set tx queue coalesce\n",
3810 vf->abs_vf_id);
3811 goto out;
3812 }
3813 }
3814 vf->tx_coal = tx_coal;
3815 }
3816
3817 status = PFVF_STATUS_SUCCESS;
3818out:
3819 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3820 sizeof(struct pfvf_def_resp_tlv), status);
3821}
3822
3823enum _ecore_status_t
3824ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3825 u16 rx_coal, u16 tx_coal,
3826 u16 vf_id, u16 qid)
3827{
3828 struct ecore_queue_cid *p_cid;
3829 struct ecore_vf_info *vf;
3830 struct ecore_ptt *p_ptt;
3831 int rc = 0;
3832 u32 i;
3833
3834 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3835 DP_NOTICE(p_hwfn, true,
3836 "VF[%d] - Can not set coalescing: VF is not active\n",
3837 vf_id);
3838 return ECORE_INVAL;
3839 }
3840
3841 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3842 p_ptt = ecore_ptt_acquire(p_hwfn);
3843 if (!p_ptt)
3844 return ECORE_AGAIN;
3845
3846 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3847 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3848 rx_coal) {
3849 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3850 vf->abs_vf_id, qid);
3851 goto out;
3852 }
3853
3854 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3855 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3856 tx_coal) {
3857 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3858 vf->abs_vf_id, qid);
3859 goto out;
3860 }
3861
3862 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3863 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3864 vf->abs_vf_id, rx_coal, tx_coal, qid);
3865
3866 if (rx_coal) {
3867 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3868
3869 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3870 if (rc != ECORE_SUCCESS) {
3871 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3872 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3873 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3874 goto out;
3875 }
3876 vf->rx_coal = rx_coal;
3877 }
3878
3879
3880
3881
3882 if (tx_coal) {
3883 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3884
3885 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3886 if (p_queue->cids[i].p_cid == OSAL_NULL)
3887 continue;
3888
3889 if (!p_queue->cids[i].b_is_tx)
3890 continue;
3891
3892 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3893 p_queue->cids[i].p_cid);
3894 if (rc != ECORE_SUCCESS) {
3895 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3896 "VF[%d]: Unable to set tx queue coalesce\n",
3897 vf->abs_vf_id);
3898 goto out;
3899 }
3900 }
3901 vf->tx_coal = tx_coal;
3902 }
3903
3904out:
3905 ecore_ptt_release(p_hwfn, p_ptt);
3906
3907 return rc;
3908}
3909
3910static enum _ecore_status_t
3911ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3912 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3913{
3914 int cnt;
3915 u32 val;
3916
3917 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3918
3919 for (cnt = 0; cnt < 50; cnt++) {
3920 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3921 if (!val)
3922 break;
3923 OSAL_MSLEEP(20);
3924 }
3925 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3926
3927 if (cnt == 50) {
3928 DP_ERR(p_hwfn,
3929 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3930 p_vf->abs_vf_id, val);
3931 return ECORE_TIMEOUT;
3932 }
3933
3934 return ECORE_SUCCESS;
3935}
3936
3937#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
3938
3939static enum _ecore_status_t
3940ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3941 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3942{
3943 u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
3944 u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
3945 u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
3946 u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
3947 u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
3948 u8 port_id, tc, tc_id = 0, voq = 0;
3949 int cnt;
3950
3951
3952 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
3953
3954 for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
3955 tc_id = (tc < max_phys_tcs_per_port) ?
3956 tc :
3957 PURE_LB_TC;
3958 voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
3959 cons[voq] = ecore_rd(p_hwfn, p_ptt,
3960 cons_voq0_addr + voq * 0x40);
3961 prod = ecore_rd(p_hwfn, p_ptt,
3962 prod_voq0_addr + voq * 0x40);
3963 distance[voq] = prod - cons[voq];
3964 }
3965 }
3966
3967
3968 port_id = 0;
3969 tc = 0;
3970 for (cnt = 0; cnt < 50; cnt++) {
3971 for (; port_id < max_ports_per_engine; port_id++) {
3972
3973 for (; tc < max_phys_tcs_per_port + 1; tc++) {
3974 tc_id = (tc < max_phys_tcs_per_port) ?
3975 tc :
3976 PURE_LB_TC;
3977 voq = VOQ(port_id, tc_id,
3978 max_phys_tcs_per_port);
3979 tmp = ecore_rd(p_hwfn, p_ptt,
3980 cons_voq0_addr + voq * 0x40);
3981 if (distance[voq] > tmp - cons[voq])
3982 break;
3983 }
3984
3985 if (tc == max_phys_tcs_per_port + 1)
3986 tc = 0;
3987 else
3988 break;
3989 }
3990
3991 if (port_id == max_ports_per_engine)
3992 break;
3993
3994 OSAL_MSLEEP(20);
3995 }
3996
3997 if (cnt == 50) {
3998 DP_ERR(p_hwfn,
3999 "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
4000 p_vf->abs_vf_id, voq, port_id, tc_id);
4001 return ECORE_TIMEOUT;
4002 }
4003
4004 return ECORE_SUCCESS;
4005}
4006
4007static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
4008 struct ecore_vf_info *p_vf,
4009 struct ecore_ptt *p_ptt)
4010{
4011 enum _ecore_status_t rc;
4012
4013
4014
4015 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
4016 if (rc)
4017 return rc;
4018
4019 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
4020 if (rc)
4021 return rc;
4022
4023 return ECORE_SUCCESS;
4024}
4025
4026static enum _ecore_status_t
4027ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4028 struct ecore_ptt *p_ptt,
4029 u16 rel_vf_id, u32 *ack_vfs)
4030{
4031 struct ecore_vf_info *p_vf;
4032 enum _ecore_status_t rc = ECORE_SUCCESS;
4033
4034 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
4035 if (!p_vf)
4036 return ECORE_SUCCESS;
4037
4038 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4039 (1ULL << (rel_vf_id % 64))) {
4040 u16 vfid = p_vf->abs_vf_id;
4041
4042
4043
4044 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4045 "VF[%d] - Handling FLR\n", vfid);
4046
4047 ecore_iov_vf_cleanup(p_hwfn, p_vf);
4048
4049
4050 if (!p_vf->b_init)
4051 goto cleanup;
4052
4053
4054 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
4055 if (rc != ECORE_SUCCESS)
4056 goto cleanup;
4057
4058 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
4059 if (rc) {
4060
4061 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
4062 return rc;
4063 }
4064
4065
4066
4067
4068 REG_WR(p_hwfn,
4069 GTT_BAR0_MAP_REG_USDM_RAM +
4070 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
4071
4072
4073
4074
4075 p_vf->state = VF_STOPPED;
4076
4077 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
4078 if (rc) {
4079
4080 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
4081 vfid);
4082 return rc;
4083 }
4084cleanup:
4085
4086 if (p_vf->state == VF_RESET)
4087 p_vf->state = VF_STOPPED;
4088 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
4089 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
4090 ~(1ULL << (rel_vf_id % 64));
4091 p_vf->vf_mbx.b_pending_msg = false;
4092 }
4093
4094 return rc;
4095}
4096
4097enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4098 struct ecore_ptt *p_ptt)
4099{
4100 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4101 enum _ecore_status_t rc = ECORE_SUCCESS;
4102 u16 i;
4103
4104 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4105
4106
4107
4108
4109
4110 OSAL_MSLEEP(100);
4111
4112 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
4113 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
4114
4115 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4116 return rc;
4117}
4118
4119enum _ecore_status_t
4120ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4121 struct ecore_ptt *p_ptt, u16 rel_vf_id)
4122{
4123 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4124 enum _ecore_status_t rc = ECORE_SUCCESS;
4125
4126 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4127
4128
4129 OSAL_MSLEEP(100);
4130
4131 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4132
4133 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4134 return rc;
4135}
4136
4137bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
4138{
4139 bool found = false;
4140 u16 i;
4141
4142 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4143
4144 for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
4145 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4146 "[%08x,...,%08x]: %08x\n",
4147 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4148
4149 if (!p_hwfn->p_dev->p_iov_info) {
4150 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4151 return false;
4152 }
4153
4154
4155 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4156 struct ecore_vf_info *p_vf;
4157 u8 vfid;
4158
4159 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4160 if (!p_vf)
4161 continue;
4162
4163 vfid = p_vf->abs_vf_id;
4164 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4165 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4166 u16 rel_vf_id = p_vf->relative_vf_id;
4167
4168 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4169 "VF[%d] [rel %d] got FLR-ed\n",
4170 vfid, rel_vf_id);
4171
4172 p_vf->state = VF_RESET;
4173
4174
4175
4176
4177
4178
4179 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4180 found = true;
4181 }
4182 }
4183
4184 return found;
4185}
4186
4187void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4188 u16 vfid,
4189 struct ecore_mcp_link_params *p_params,
4190 struct ecore_mcp_link_state *p_link,
4191 struct ecore_mcp_link_capabilities *p_caps)
4192{
4193 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4194 struct ecore_bulletin_content *p_bulletin;
4195
4196 if (!p_vf)
4197 return;
4198
4199 p_bulletin = p_vf->bulletin.p_virt;
4200
4201 if (p_params)
4202 __ecore_vf_get_link_params(p_params, p_bulletin);
4203 if (p_link)
4204 __ecore_vf_get_link_state(p_link, p_bulletin);
4205 if (p_caps)
4206 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4207}
4208
4209void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4210 struct ecore_ptt *p_ptt, int vfid)
4211{
4212 struct ecore_iov_vf_mbx *mbx;
4213 struct ecore_vf_info *p_vf;
4214
4215 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4216 if (!p_vf)
4217 return;
4218
4219 mbx = &p_vf->vf_mbx;
4220
4221
4222#ifndef CONFIG_ECORE_SW_CHANNEL
4223 if (!mbx->b_pending_msg) {
4224 DP_NOTICE(p_hwfn, true,
4225 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4226 p_vf->abs_vf_id);
4227 return;
4228 }
4229 mbx->b_pending_msg = false;
4230#endif
4231
4232 mbx->first_tlv = mbx->req_virt->first_tlv;
4233
4234 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4235 "VF[%02x]: Processing mailbox message [type %04x]\n",
4236 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4237
4238 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4239 p_vf->relative_vf_id,
4240 mbx->first_tlv.tl.type);
4241
4242
4243
4244
4245 ecore_iov_lock_vf_pf_channel(p_hwfn,
4246 p_vf, mbx->first_tlv.tl.type);
4247
4248
4249 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4250 !p_vf->b_malicious) {
4251
4252 switch (mbx->first_tlv.tl.type) {
4253 case CHANNEL_TLV_ACQUIRE:
4254 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4255 break;
4256 case CHANNEL_TLV_VPORT_START:
4257 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4258 break;
4259 case CHANNEL_TLV_VPORT_TEARDOWN:
4260 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4261 break;
4262 case CHANNEL_TLV_START_RXQ:
4263 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4264 break;
4265 case CHANNEL_TLV_START_TXQ:
4266 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4267 break;
4268 case CHANNEL_TLV_STOP_RXQS:
4269 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4270 break;
4271 case CHANNEL_TLV_STOP_TXQS:
4272 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4273 break;
4274 case CHANNEL_TLV_UPDATE_RXQ:
4275 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4276 break;
4277 case CHANNEL_TLV_VPORT_UPDATE:
4278 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4279 break;
4280 case CHANNEL_TLV_UCAST_FILTER:
4281 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4282 break;
4283 case CHANNEL_TLV_CLOSE:
4284 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4285 break;
4286 case CHANNEL_TLV_INT_CLEANUP:
4287 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4288 break;
4289 case CHANNEL_TLV_RELEASE:
4290 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4291 break;
4292 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4293 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4294 break;
4295 case CHANNEL_TLV_COALESCE_UPDATE:
4296 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4297 break;
4298 case CHANNEL_TLV_COALESCE_READ:
4299 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4300 break;
4301 case CHANNEL_TLV_UPDATE_MTU:
4302 ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
4303 break;
4304 }
4305 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4306
4307
4308
4309
4310
4311 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4312
4313 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4314 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4315 p_vf->abs_vf_id);
4316 } else {
4317 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4318 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4319 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4320 }
4321
4322 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4323 mbx->first_tlv.tl.type,
4324 sizeof(struct pfvf_def_resp_tlv),
4325 PFVF_STATUS_MALICIOUS);
4326 } else {
4327
4328
4329
4330
4331
4332
4333 DP_NOTICE(p_hwfn, false,
4334 "VF[%02x]: unknown TLV. type %04x length %04x"
4335 " padding %08x reply address %lu\n",
4336 p_vf->abs_vf_id,
4337 mbx->first_tlv.tl.type,
4338 mbx->first_tlv.tl.length,
4339 mbx->first_tlv.padding,
4340 (unsigned long)mbx->first_tlv.reply_address);
4341
4342
4343
4344
4345 if (p_vf->acquire.first_tlv.reply_address &&
4346 (mbx->first_tlv.reply_address ==
4347 p_vf->acquire.first_tlv.reply_address))
4348 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4349 mbx->first_tlv.tl.type,
4350 sizeof(struct pfvf_def_resp_tlv),
4351 PFVF_STATUS_NOT_SUPPORTED);
4352 else
4353 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4354 "VF[%02x]: Can't respond to TLV -"
4355 " no valid reply address\n",
4356 p_vf->abs_vf_id);
4357 }
4358
4359 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4360 mbx->first_tlv.tl.type);
4361
4362#ifdef CONFIG_ECORE_SW_CHANNEL
4363 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4364 mbx->sw_mbx.response_offset = 0;
4365#endif
4366}
4367
4368void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4369 u64 *events)
4370{
4371 int i;
4372
4373 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4374
4375 ecore_for_each_vf(p_hwfn, i) {
4376 struct ecore_vf_info *p_vf;
4377
4378 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4379 if (p_vf->vf_mbx.b_pending_msg)
4380 events[i / 64] |= 1ULL << (i % 64);
4381 }
4382}
4383
4384static struct ecore_vf_info *
4385ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4386{
4387 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4388
4389 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4390 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4391 "Got indication for VF [abs 0x%08x] that cannot be"
4392 " handled by PF\n",
4393 abs_vfid);
4394 return OSAL_NULL;
4395 }
4396
4397 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4398}
4399
4400static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4401 u16 abs_vfid,
4402 struct regpair *vf_msg)
4403{
4404 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4405 abs_vfid);
4406
4407 if (!p_vf)
4408 return ECORE_SUCCESS;
4409
4410
4411
4412
4413 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4414
4415 p_vf->vf_mbx.b_pending_msg = true;
4416
4417 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4418}
4419
4420static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4421 struct malicious_vf_eqe_data *p_data)
4422{
4423 struct ecore_vf_info *p_vf;
4424
4425 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4426
4427 if (!p_vf)
4428 return;
4429
4430 if (!p_vf->b_malicious) {
4431 DP_NOTICE(p_hwfn, false,
4432 "VF [%d] - Malicious behavior [%02x]\n",
4433 p_vf->abs_vf_id, p_data->err_id);
4434
4435 p_vf->b_malicious = true;
4436 } else {
4437 DP_INFO(p_hwfn,
4438 "VF [%d] - Malicious behavior [%02x]\n",
4439 p_vf->abs_vf_id, p_data->err_id);
4440 }
4441
4442 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4443}
4444
4445static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4446 u8 opcode,
4447 __le16 echo,
4448 union event_ring_data *data,
4449 u8 OSAL_UNUSED fw_return_code)
4450{
4451 switch (opcode) {
4452 case COMMON_EVENT_VF_PF_CHANNEL:
4453 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4454 &data->vf_pf_channel.msg_addr);
4455 case COMMON_EVENT_VF_FLR:
4456 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4457 "VF-FLR is still not supported\n");
4458 return ECORE_SUCCESS;
4459 case COMMON_EVENT_MALICIOUS_VF:
4460 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4461 return ECORE_SUCCESS;
4462 default:
4463 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4464 opcode);
4465 return ECORE_INVAL;
4466 }
4467}
4468
4469bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4470{
4471 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4472 (1ULL << (rel_vf_id % 64)));
4473}
4474
4475u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4476{
4477 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4478 u16 i;
4479
4480 if (!p_iov)
4481 goto out;
4482
4483 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4484 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4485 return i;
4486
4487out:
4488 return MAX_NUM_VFS_K2;
4489}
4490
4491enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4492 struct ecore_ptt *ptt, int vfid)
4493{
4494 struct dmae_params params;
4495 struct ecore_vf_info *vf_info;
4496
4497 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4498 if (!vf_info)
4499 return ECORE_INVAL;
4500
4501 OSAL_MEMSET(¶ms, 0, sizeof(params));
4502 SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
4503 SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
4504 params.src_vf_id = vf_info->abs_vf_id;
4505
4506 if (ecore_dmae_host2host(p_hwfn, ptt,
4507 vf_info->vf_mbx.pending_req,
4508 vf_info->vf_mbx.req_phys,
4509 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4510 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4511 "Failed to copy message from VF 0x%02x\n", vfid);
4512
4513 return ECORE_IO;
4514 }
4515
4516 return ECORE_SUCCESS;
4517}
4518
4519void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4520 u8 *mac, int vfid)
4521{
4522 struct ecore_vf_info *vf_info;
4523 u64 feature;
4524
4525 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4526 if (!vf_info) {
4527 DP_NOTICE(p_hwfn->p_dev, true,
4528 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4529 return;
4530 }
4531 if (vf_info->b_malicious) {
4532 DP_NOTICE(p_hwfn->p_dev, false,
4533 "Can't set forced MAC to malicious VF [%d]\n",
4534 vfid);
4535 return;
4536 }
4537
4538 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4539 vf_info->p_vf_info.is_trusted_configured) {
4540 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4541
4542 vf_info->bulletin.p_virt->valid_bitmap &=
4543 ~(1 << MAC_ADDR_FORCED);
4544 } else {
4545 feature = 1 << MAC_ADDR_FORCED;
4546
4547 vf_info->bulletin.p_virt->valid_bitmap &=
4548 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4549 }
4550
4551 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4552 mac, ETH_ALEN);
4553
4554 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4555
4556 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4557}
4558
4559enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4560 u8 *mac, int vfid)
4561{
4562 struct ecore_vf_info *vf_info;
4563 u64 feature;
4564
4565 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4566 if (!vf_info) {
4567 DP_NOTICE(p_hwfn->p_dev, true,
4568 "Can not set MAC, invalid vfid [%d]\n", vfid);
4569 return ECORE_INVAL;
4570 }
4571 if (vf_info->b_malicious) {
4572 DP_NOTICE(p_hwfn->p_dev, false,
4573 "Can't set MAC to malicious VF [%d]\n",
4574 vfid);
4575 return ECORE_INVAL;
4576 }
4577
4578 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4579 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4580 "Can not set MAC, Forced MAC is configured\n");
4581 return ECORE_INVAL;
4582 }
4583
4584 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4585 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4586
4587 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4588
4589 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4590 vf_info->p_vf_info.is_trusted_configured)
4591 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4592
4593 return ECORE_SUCCESS;
4594}
4595
4596#ifndef LINUX_REMOVE
4597enum _ecore_status_t
4598ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4599 bool b_untagged_only, int vfid)
4600{
4601 struct ecore_vf_info *vf_info;
4602 u64 feature;
4603
4604 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4605 if (!vf_info) {
4606 DP_NOTICE(p_hwfn->p_dev, true,
4607 "Can not set untagged default, invalid vfid [%d]\n",
4608 vfid);
4609 return ECORE_INVAL;
4610 }
4611 if (vf_info->b_malicious) {
4612 DP_NOTICE(p_hwfn->p_dev, false,
4613 "Can't set untagged default to malicious VF [%d]\n",
4614 vfid);
4615 return ECORE_INVAL;
4616 }
4617
4618
4619
4620
4621 if (vf_info->state == VF_ENABLED) {
4622 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4623 "Can't support untagged change for vfid[%d] -"
4624 " VF is already active\n",
4625 vfid);
4626 return ECORE_INVAL;
4627 }
4628
4629
4630
4631
4632 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4633 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4634 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4635
4636 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4637 : 0;
4638
4639 return ECORE_SUCCESS;
4640}
4641
4642void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4643 u16 *opaque_fid)
4644{
4645 struct ecore_vf_info *vf_info;
4646
4647 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4648 if (!vf_info)
4649 return;
4650
4651 *opaque_fid = vf_info->opaque_fid;
4652}
4653#endif
4654
4655void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4656 u16 pvid, int vfid)
4657{
4658 struct ecore_vf_info *vf_info;
4659 u64 feature;
4660
4661 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4662 if (!vf_info) {
4663 DP_NOTICE(p_hwfn->p_dev, true,
4664 "Can not set forced MAC, invalid vfid [%d]\n",
4665 vfid);
4666 return;
4667 }
4668 if (vf_info->b_malicious) {
4669 DP_NOTICE(p_hwfn->p_dev, false,
4670 "Can't set forced vlan to malicious VF [%d]\n",
4671 vfid);
4672 return;
4673 }
4674
4675 feature = 1 << VLAN_ADDR_FORCED;
4676 vf_info->bulletin.p_virt->pvid = pvid;
4677 if (pvid)
4678 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4679 else
4680 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4681
4682 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4683}
4684
4685void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4686 int vfid, u16 vxlan_port, u16 geneve_port)
4687{
4688 struct ecore_vf_info *vf_info;
4689
4690 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4691 if (!vf_info) {
4692 DP_NOTICE(p_hwfn->p_dev, true,
4693 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4694 return;
4695 }
4696
4697 if (vf_info->b_malicious) {
4698 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4699 "Can not set udp ports to malicious VF [%d]\n",
4700 vfid);
4701 return;
4702 }
4703
4704 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4705 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4706}
4707
4708bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4709{
4710 struct ecore_vf_info *p_vf_info;
4711
4712 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4713 if (!p_vf_info)
4714 return false;
4715
4716 return !!p_vf_info->vport_instance;
4717}
4718
4719bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4720{
4721 struct ecore_vf_info *p_vf_info;
4722
4723 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4724 if (!p_vf_info)
4725 return true;
4726
4727 return p_vf_info->state == VF_STOPPED;
4728}
4729
4730bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4731{
4732 struct ecore_vf_info *vf_info;
4733
4734 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4735 if (!vf_info)
4736 return false;
4737
4738 return vf_info->spoof_chk;
4739}
4740
4741enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4742 int vfid, bool val)
4743{
4744 struct ecore_vf_info *vf;
4745 enum _ecore_status_t rc = ECORE_INVAL;
4746
4747 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4748 DP_NOTICE(p_hwfn, true,
4749 "SR-IOV sanity check failed, can't set spoofchk\n");
4750 goto out;
4751 }
4752
4753 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4754 if (!vf)
4755 goto out;
4756
4757 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4758
4759 vf->req_spoofchk_val = val;
4760 rc = ECORE_SUCCESS;
4761 goto out;
4762 }
4763
4764 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4765
4766out:
4767 return rc;
4768}
4769
4770u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4771{
4772 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4773
4774 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4775 : ECORE_MAX_VF_CHAINS_PER_PF;
4776
4777 return max_chains_per_vf;
4778}
4779
4780void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4781 u16 rel_vf_id,
4782 void **pp_req_virt_addr,
4783 u16 *p_req_virt_size)
4784{
4785 struct ecore_vf_info *vf_info =
4786 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4787
4788 if (!vf_info)
4789 return;
4790
4791 if (pp_req_virt_addr)
4792 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4793
4794 if (p_req_virt_size)
4795 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4796}
4797
4798void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4799 u16 rel_vf_id,
4800 void **pp_reply_virt_addr,
4801 u16 *p_reply_virt_size)
4802{
4803 struct ecore_vf_info *vf_info =
4804 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4805
4806 if (!vf_info)
4807 return;
4808
4809 if (pp_reply_virt_addr)
4810 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4811
4812 if (p_reply_virt_size)
4813 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4814}
4815
4816#ifdef CONFIG_ECORE_SW_CHANNEL
4817struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4818 u16 rel_vf_id)
4819{
4820 struct ecore_vf_info *vf_info =
4821 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4822
4823 if (!vf_info)
4824 return OSAL_NULL;
4825
4826 return &vf_info->vf_mbx.sw_mbx;
4827}
4828#endif
4829
4830bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4831{
4832 return (length >= sizeof(struct vfpf_first_tlv) &&
4833 (length <= sizeof(union vfpf_tlvs)));
4834}
4835
4836u32 ecore_iov_pfvf_msg_length(void)
4837{
4838 return sizeof(union pfvf_tlvs);
4839}
4840
4841u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
4842 u16 rel_vf_id)
4843{
4844 struct ecore_vf_info *p_vf;
4845
4846 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4847 if (!p_vf || !p_vf->bulletin.p_virt)
4848 return OSAL_NULL;
4849
4850 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4851 (1 << VFPF_BULLETIN_MAC_ADDR)))
4852 return OSAL_NULL;
4853
4854 return p_vf->bulletin.p_virt->mac;
4855}
4856
4857u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4858{
4859 struct ecore_vf_info *p_vf;
4860
4861 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4862 if (!p_vf || !p_vf->bulletin.p_virt)
4863 return OSAL_NULL;
4864
4865 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4866 return OSAL_NULL;
4867
4868 return p_vf->bulletin.p_virt->mac;
4869}
4870
4871u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4872 u16 rel_vf_id)
4873{
4874 struct ecore_vf_info *p_vf;
4875
4876 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4877 if (!p_vf || !p_vf->bulletin.p_virt)
4878 return 0;
4879
4880 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4881 return 0;
4882
4883 return p_vf->bulletin.p_virt->pvid;
4884}
4885
4886enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4887 struct ecore_ptt *p_ptt,
4888 int vfid, int val)
4889{
4890 struct ecore_vf_info *vf;
4891 u8 abs_vp_id = 0;
4892 u16 rl_id;
4893 enum _ecore_status_t rc;
4894
4895 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4896
4897 if (!vf)
4898 return ECORE_INVAL;
4899
4900 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4901 if (rc != ECORE_SUCCESS)
4902 return rc;
4903
4904 rl_id = abs_vp_id;
4905 return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4906}
4907
4908enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4909 int vfid, u32 rate)
4910{
4911 struct ecore_vf_info *vf;
4912 int i;
4913
4914 for_each_hwfn(p_dev, i) {
4915 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4916
4917 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4918 DP_NOTICE(p_hwfn, true,
4919 "SR-IOV sanity check failed, can't set min rate\n");
4920 return ECORE_INVAL;
4921 }
4922 }
4923
4924 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4925 if (!vf) {
4926 DP_NOTICE(p_dev, true,
4927 "Getting vf info failed, can't set min rate\n");
4928 return ECORE_INVAL;
4929 }
4930
4931 return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
4932}
4933
4934enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4935 struct ecore_ptt *p_ptt,
4936 int vfid,
4937 struct ecore_eth_stats *p_stats)
4938{
4939 struct ecore_vf_info *vf;
4940
4941 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4942 if (!vf)
4943 return ECORE_INVAL;
4944
4945 if (vf->state != VF_ENABLED)
4946 return ECORE_INVAL;
4947
4948 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4949 vf->abs_vf_id + 0x10, false);
4950
4951 return ECORE_SUCCESS;
4952}
4953
4954u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4955{
4956 struct ecore_vf_info *p_vf;
4957
4958 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4959 if (!p_vf)
4960 return 0;
4961
4962 return p_vf->num_rxqs;
4963}
4964
4965u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4966{
4967 struct ecore_vf_info *p_vf;
4968
4969 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4970 if (!p_vf)
4971 return 0;
4972
4973 return p_vf->num_active_rxqs;
4974}
4975
4976void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4977{
4978 struct ecore_vf_info *p_vf;
4979
4980 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4981 if (!p_vf)
4982 return OSAL_NULL;
4983
4984 return p_vf->ctx;
4985}
4986
4987u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4988{
4989 struct ecore_vf_info *p_vf;
4990
4991 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4992 if (!p_vf)
4993 return 0;
4994
4995 return p_vf->num_sbs;
4996}
4997
4998bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4999{
5000 struct ecore_vf_info *p_vf;
5001
5002 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5003 if (!p_vf)
5004 return false;
5005
5006 return (p_vf->state == VF_FREE);
5007}
5008
5009bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
5010 u16 rel_vf_id)
5011{
5012 struct ecore_vf_info *p_vf;
5013
5014 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5015 if (!p_vf)
5016 return false;
5017
5018 return (p_vf->state == VF_ACQUIRED);
5019}
5020
5021bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
5022{
5023 struct ecore_vf_info *p_vf;
5024
5025 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5026 if (!p_vf)
5027 return false;
5028
5029 return (p_vf->state == VF_ENABLED);
5030}
5031
5032bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
5033 u16 rel_vf_id)
5034{
5035 struct ecore_vf_info *p_vf;
5036
5037 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5038 if (!p_vf)
5039 return false;
5040
5041 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
5042}
5043
5044int
5045ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
5046{
5047 struct ecore_wfq_data *vf_vp_wfq;
5048 struct ecore_vf_info *vf_info;
5049
5050 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5051 if (!vf_info)
5052 return 0;
5053
5054 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
5055
5056 if (vf_vp_wfq->configured)
5057 return vf_vp_wfq->min_speed;
5058 else
5059 return 0;
5060}
5061
5062#ifdef CONFIG_ECORE_SW_CHANNEL
5063void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
5064 bool b_is_hw)
5065{
5066 struct ecore_vf_info *vf_info;
5067
5068 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5069 if (!vf_info)
5070 return;
5071
5072 vf_info->b_hw_channel = b_is_hw;
5073}
5074#endif
5075