1
2
3
4
5#include <rte_atomic.h>
6#include <rte_eal.h>
7#include <rte_ether.h>
8#include <rte_malloc.h>
9#include <rte_memzone.h>
10#include <rte_dev.h>
11#include <errno.h>
12
13#include "idpf_ethdev.h"
14#include "idpf_rxtx.h"
15
16#define IDPF_TX_SINGLE_Q "tx_single"
17#define IDPF_RX_SINGLE_Q "rx_single"
18#define IDPF_VPORT "vport"
19
20rte_spinlock_t idpf_adapter_lock;
21
22struct idpf_adapter_list idpf_adapter_list;
23bool idpf_adapter_list_init;
24
25uint64_t idpf_timestamp_dynflag;
26
27static const char * const idpf_valid_args[] = {
28 IDPF_TX_SINGLE_Q,
29 IDPF_RX_SINGLE_Q,
30 IDPF_VPORT,
31 NULL
32};
33
34static int
35idpf_dev_link_update(struct rte_eth_dev *dev,
36 __rte_unused int wait_to_complete)
37{
38 struct rte_eth_link new_link;
39
40 memset(&new_link, 0, sizeof(new_link));
41
42 new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
43 new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
44 new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
45 RTE_ETH_LINK_SPEED_FIXED);
46
47 return rte_eth_linkstatus_set(dev, &new_link);
48}
49
50static int
51idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
52{
53 struct idpf_vport *vport = dev->data->dev_private;
54 struct idpf_adapter *adapter = vport->adapter;
55
56 dev_info->max_rx_queues = adapter->caps->max_rx_q;
57 dev_info->max_tx_queues = adapter->caps->max_tx_q;
58 dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
59 dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
60
61 dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
62 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
63
64 dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
65
66 dev_info->rx_offload_capa =
67 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
68 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
69 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
70 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
71 RTE_ETH_RX_OFFLOAD_TIMESTAMP;
72
73 dev_info->tx_offload_capa =
74 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
75 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
76 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
77 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
78 RTE_ETH_TX_OFFLOAD_TCP_TSO |
79 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
80 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
81
82 dev_info->default_txconf = (struct rte_eth_txconf) {
83 .tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
84 .tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,
85 };
86
87 dev_info->default_rxconf = (struct rte_eth_rxconf) {
88 .rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,
89 };
90
91 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
92 .nb_max = IDPF_MAX_RING_DESC,
93 .nb_min = IDPF_MIN_RING_DESC,
94 .nb_align = IDPF_ALIGN_RING_DESC,
95 };
96
97 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
98 .nb_max = IDPF_MAX_RING_DESC,
99 .nb_min = IDPF_MIN_RING_DESC,
100 .nb_align = IDPF_ALIGN_RING_DESC,
101 };
102
103 return 0;
104}
105
106static int
107idpf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
108{
109
110 if (dev->data->dev_started) {
111 PMD_DRV_LOG(ERR, "port must be stopped before configuration");
112 return -EBUSY;
113 }
114
115 return 0;
116}
117
118static const uint32_t *
119idpf_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
120{
121 static const uint32_t ptypes[] = {
122 RTE_PTYPE_L2_ETHER,
123 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
124 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
125 RTE_PTYPE_L4_FRAG,
126 RTE_PTYPE_L4_UDP,
127 RTE_PTYPE_L4_TCP,
128 RTE_PTYPE_L4_SCTP,
129 RTE_PTYPE_L4_ICMP,
130 RTE_PTYPE_UNKNOWN
131 };
132
133 return ptypes;
134}
135
136static int
137idpf_init_vport_req_info(struct rte_eth_dev *dev)
138{
139 struct idpf_vport *vport = dev->data->dev_private;
140 struct idpf_adapter *adapter = vport->adapter;
141 struct virtchnl2_create_vport *vport_info;
142 uint16_t idx = adapter->cur_vport_idx;
143
144 if (idx == IDPF_INVALID_VPORT_IDX) {
145 PMD_INIT_LOG(ERR, "Invalid vport index.");
146 return -EINVAL;
147 }
148
149 if (adapter->vport_req_info[idx] == NULL) {
150 adapter->vport_req_info[idx] = rte_zmalloc(NULL,
151 sizeof(struct virtchnl2_create_vport), 0);
152 if (adapter->vport_req_info[idx] == NULL) {
153 PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info");
154 return -ENOMEM;
155 }
156 }
157
158 vport_info =
159 (struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
160
161 vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
162 if (adapter->txq_model == 0) {
163 vport_info->txq_model =
164 rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
165 vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
166 vport_info->num_tx_complq =
167 IDPF_DEFAULT_TXQ_NUM * IDPF_TX_COMPLQ_PER_GRP;
168 } else {
169 vport_info->txq_model =
170 rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
171 vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
172 vport_info->num_tx_complq = 0;
173 }
174 if (adapter->rxq_model == 0) {
175 vport_info->rxq_model =
176 rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
177 vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
178 vport_info->num_rx_bufq =
179 IDPF_DEFAULT_RXQ_NUM * IDPF_RX_BUFQ_PER_GRP;
180 } else {
181 vport_info->rxq_model =
182 rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
183 vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
184 vport_info->num_rx_bufq = 0;
185 }
186
187 return 0;
188}
189
190static int
191idpf_parse_devarg_id(char *name)
192{
193 uint16_t val;
194 char *p;
195
196 p = strstr(name, "vport_");
197
198 if (p == NULL)
199 return -EINVAL;
200
201 p += sizeof("vport_") - 1;
202
203 val = strtoul(p, NULL, 10);
204
205 return val;
206}
207
208#define IDPF_RSS_KEY_LEN 52
209
210static int
211idpf_init_vport(struct rte_eth_dev *dev)
212{
213 struct idpf_vport *vport = dev->data->dev_private;
214 struct idpf_adapter *adapter = vport->adapter;
215 uint16_t idx = adapter->cur_vport_idx;
216 struct virtchnl2_create_vport *vport_info =
217 (struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
218 int i, type, ret;
219
220 vport->vport_id = vport_info->vport_id;
221 vport->txq_model = vport_info->txq_model;
222 vport->rxq_model = vport_info->rxq_model;
223 vport->num_tx_q = vport_info->num_tx_q;
224 vport->num_tx_complq = vport_info->num_tx_complq;
225 vport->num_rx_q = vport_info->num_rx_q;
226 vport->num_rx_bufq = vport_info->num_rx_bufq;
227 vport->max_mtu = vport_info->max_mtu;
228 rte_memcpy(vport->default_mac_addr,
229 vport_info->default_mac_addr, ETH_ALEN);
230 vport->rss_algorithm = vport_info->rss_algorithm;
231 vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
232 vport_info->rss_key_size);
233 vport->rss_lut_size = vport_info->rss_lut_size;
234 vport->sw_idx = idx;
235
236 for (i = 0; i < vport_info->chunks.num_chunks; i++) {
237 type = vport_info->chunks.chunks[i].type;
238 switch (type) {
239 case VIRTCHNL2_QUEUE_TYPE_TX:
240 vport->chunks_info.tx_start_qid =
241 vport_info->chunks.chunks[i].start_queue_id;
242 vport->chunks_info.tx_qtail_start =
243 vport_info->chunks.chunks[i].qtail_reg_start;
244 vport->chunks_info.tx_qtail_spacing =
245 vport_info->chunks.chunks[i].qtail_reg_spacing;
246 break;
247 case VIRTCHNL2_QUEUE_TYPE_RX:
248 vport->chunks_info.rx_start_qid =
249 vport_info->chunks.chunks[i].start_queue_id;
250 vport->chunks_info.rx_qtail_start =
251 vport_info->chunks.chunks[i].qtail_reg_start;
252 vport->chunks_info.rx_qtail_spacing =
253 vport_info->chunks.chunks[i].qtail_reg_spacing;
254 break;
255 case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
256 vport->chunks_info.tx_compl_start_qid =
257 vport_info->chunks.chunks[i].start_queue_id;
258 vport->chunks_info.tx_compl_qtail_start =
259 vport_info->chunks.chunks[i].qtail_reg_start;
260 vport->chunks_info.tx_compl_qtail_spacing =
261 vport_info->chunks.chunks[i].qtail_reg_spacing;
262 break;
263 case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
264 vport->chunks_info.rx_buf_start_qid =
265 vport_info->chunks.chunks[i].start_queue_id;
266 vport->chunks_info.rx_buf_qtail_start =
267 vport_info->chunks.chunks[i].qtail_reg_start;
268 vport->chunks_info.rx_buf_qtail_spacing =
269 vport_info->chunks.chunks[i].qtail_reg_spacing;
270 break;
271 default:
272 PMD_INIT_LOG(ERR, "Unsupported queue type");
273 break;
274 }
275 }
276
277 ret = idpf_parse_devarg_id(dev->data->name);
278 if (ret < 0) {
279 PMD_INIT_LOG(ERR, "Failed to parse devarg id.");
280 return -EINVAL;
281 }
282 vport->devarg_id = ret;
283
284 vport->dev_data = dev->data;
285
286 adapter->vports[idx] = vport;
287
288 return 0;
289}
290
291static int
292idpf_config_rss(struct idpf_vport *vport)
293{
294 int ret;
295
296 ret = idpf_vc_set_rss_key(vport);
297 if (ret != 0) {
298 PMD_INIT_LOG(ERR, "Failed to configure RSS key");
299 return ret;
300 }
301
302 ret = idpf_vc_set_rss_lut(vport);
303 if (ret != 0) {
304 PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
305 return ret;
306 }
307
308 ret = idpf_vc_set_rss_hash(vport);
309 if (ret != 0) {
310 PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
311 return ret;
312 }
313
314 return ret;
315}
316
317static int
318idpf_init_rss(struct idpf_vport *vport)
319{
320 struct rte_eth_rss_conf *rss_conf;
321 uint16_t i, nb_q, lut_size;
322 int ret = 0;
323
324 rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
325 nb_q = vport->dev_data->nb_rx_queues;
326
327 vport->rss_key = rte_zmalloc("rss_key",
328 vport->rss_key_size, 0);
329 if (vport->rss_key == NULL) {
330 PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
331 ret = -ENOMEM;
332 goto err_alloc_key;
333 }
334
335 lut_size = vport->rss_lut_size;
336 vport->rss_lut = rte_zmalloc("rss_lut",
337 sizeof(uint32_t) * lut_size, 0);
338 if (vport->rss_lut == NULL) {
339 PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
340 ret = -ENOMEM;
341 goto err_alloc_lut;
342 }
343
344 if (rss_conf->rss_key == NULL) {
345 for (i = 0; i < vport->rss_key_size; i++)
346 vport->rss_key[i] = (uint8_t)rte_rand();
347 } else if (rss_conf->rss_key_len != vport->rss_key_size) {
348 PMD_INIT_LOG(ERR, "Invalid RSS key length in RSS configuration, should be %d",
349 vport->rss_key_size);
350 ret = -EINVAL;
351 goto err_cfg_key;
352 } else {
353 rte_memcpy(vport->rss_key, rss_conf->rss_key,
354 vport->rss_key_size);
355 }
356
357 for (i = 0; i < lut_size; i++)
358 vport->rss_lut[i] = i % nb_q;
359
360 vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED;
361
362 ret = idpf_config_rss(vport);
363 if (ret != 0) {
364 PMD_INIT_LOG(ERR, "Failed to configure RSS");
365 goto err_cfg_key;
366 }
367
368 return ret;
369
370err_cfg_key:
371 rte_free(vport->rss_lut);
372 vport->rss_lut = NULL;
373err_alloc_lut:
374 rte_free(vport->rss_key);
375 vport->rss_key = NULL;
376err_alloc_key:
377 return ret;
378}
379
380static int
381idpf_dev_configure(struct rte_eth_dev *dev)
382{
383 struct idpf_vport *vport = dev->data->dev_private;
384 struct rte_eth_conf *conf = &dev->data->dev_conf;
385 struct idpf_adapter *adapter = vport->adapter;
386 int ret;
387
388 if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
389 PMD_INIT_LOG(ERR, "Setting link speed is not supported");
390 return -ENOTSUP;
391 }
392
393 if (conf->txmode.mq_mode != RTE_ETH_MQ_TX_NONE) {
394 PMD_INIT_LOG(ERR, "Multi-queue TX mode %d is not supported",
395 conf->txmode.mq_mode);
396 return -ENOTSUP;
397 }
398
399 if (conf->lpbk_mode != 0) {
400 PMD_INIT_LOG(ERR, "Loopback operation mode %d is not supported",
401 conf->lpbk_mode);
402 return -ENOTSUP;
403 }
404
405 if (conf->dcb_capability_en != 0) {
406 PMD_INIT_LOG(ERR, "Priority Flow Control(PFC) if not supported");
407 return -ENOTSUP;
408 }
409
410 if (conf->intr_conf.lsc != 0) {
411 PMD_INIT_LOG(ERR, "LSC interrupt is not supported");
412 return -ENOTSUP;
413 }
414
415 if (conf->intr_conf.rxq != 0) {
416 PMD_INIT_LOG(ERR, "RXQ interrupt is not supported");
417 return -ENOTSUP;
418 }
419
420 if (conf->intr_conf.rmv != 0) {
421 PMD_INIT_LOG(ERR, "RMV interrupt is not supported");
422 return -ENOTSUP;
423 }
424
425 if (adapter->caps->rss_caps != 0 && dev->data->nb_rx_queues != 0) {
426 ret = idpf_init_rss(vport);
427 if (ret != 0) {
428 PMD_INIT_LOG(ERR, "Failed to init rss");
429 return ret;
430 }
431 } else {
432 PMD_INIT_LOG(ERR, "RSS is not supported.");
433 return -1;
434 }
435
436 return 0;
437}
438
439static int
440idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
441{
442 struct idpf_vport *vport = dev->data->dev_private;
443 struct idpf_adapter *adapter = vport->adapter;
444 struct virtchnl2_queue_vector *qv_map;
445 struct idpf_hw *hw = &adapter->hw;
446 uint32_t dynctl_reg_start;
447 uint32_t itrn_reg_start;
448 uint32_t dynctl_val, itrn_val;
449 uint16_t i;
450
451 qv_map = rte_zmalloc("qv_map",
452 dev->data->nb_rx_queues *
453 sizeof(struct virtchnl2_queue_vector), 0);
454 if (qv_map == NULL) {
455 PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
456 dev->data->nb_rx_queues);
457 goto qv_map_alloc_err;
458 }
459
460
461
462
463
464
465
466
467 dynctl_reg_start =
468 vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
469 itrn_reg_start =
470 vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
471 dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
472 PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x",
473 dynctl_val);
474 itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
475 PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
476
477
478
479
480
481
482
483 if (itrn_val != 0)
484 IDPF_WRITE_REG(hw,
485 dynctl_reg_start,
486 VIRTCHNL2_ITR_IDX_0 <<
487 PF_GLINT_DYN_CTL_ITR_INDX_S |
488 PF_GLINT_DYN_CTL_WB_ON_ITR_M |
489 itrn_val <<
490 PF_GLINT_DYN_CTL_INTERVAL_S);
491 else
492 IDPF_WRITE_REG(hw,
493 dynctl_reg_start,
494 VIRTCHNL2_ITR_IDX_0 <<
495 PF_GLINT_DYN_CTL_ITR_INDX_S |
496 PF_GLINT_DYN_CTL_WB_ON_ITR_M |
497 IDPF_DFLT_INTERVAL <<
498 PF_GLINT_DYN_CTL_INTERVAL_S);
499
500 for (i = 0; i < dev->data->nb_rx_queues; i++) {
501
502 qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
503 qv_map[i].vector_id =
504 vport->recv_vectors->vchunks.vchunks->start_vector_id;
505 }
506 vport->qv_map = qv_map;
507
508 if (idpf_vc_config_irq_map_unmap(vport, true) != 0) {
509 PMD_DRV_LOG(ERR, "config interrupt mapping failed");
510 goto config_irq_map_err;
511 }
512
513 return 0;
514
515config_irq_map_err:
516 rte_free(vport->qv_map);
517 vport->qv_map = NULL;
518
519qv_map_alloc_err:
520 return -1;
521}
522
523static int
524idpf_start_queues(struct rte_eth_dev *dev)
525{
526 struct idpf_rx_queue *rxq;
527 struct idpf_tx_queue *txq;
528 int err = 0;
529 int i;
530
531 for (i = 0; i < dev->data->nb_tx_queues; i++) {
532 txq = dev->data->tx_queues[i];
533 if (txq == NULL || txq->tx_deferred_start)
534 continue;
535 err = idpf_tx_queue_start(dev, i);
536 if (err != 0) {
537 PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
538 return err;
539 }
540 }
541
542 for (i = 0; i < dev->data->nb_rx_queues; i++) {
543 rxq = dev->data->rx_queues[i];
544 if (rxq == NULL || rxq->rx_deferred_start)
545 continue;
546 err = idpf_rx_queue_start(dev, i);
547 if (err != 0) {
548 PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
549 return err;
550 }
551 }
552
553 return err;
554}
555
556static int
557idpf_dev_start(struct rte_eth_dev *dev)
558{
559 struct idpf_vport *vport = dev->data->dev_private;
560 struct idpf_adapter *adapter = vport->adapter;
561 uint16_t num_allocated_vectors =
562 adapter->caps->num_allocated_vectors;
563 uint16_t req_vecs_num;
564 int ret;
565
566 vport->stopped = 0;
567
568 if (dev->data->mtu > vport->max_mtu) {
569 PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
570 ret = -EINVAL;
571 goto err_mtu;
572 }
573
574 vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
575
576 req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
577 if (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {
578 PMD_DRV_LOG(ERR, "The accumulated request vectors' number should be less than %d",
579 num_allocated_vectors);
580 ret = -EINVAL;
581 goto err_mtu;
582 }
583
584 ret = idpf_vc_alloc_vectors(vport, req_vecs_num);
585 if (ret != 0) {
586 PMD_DRV_LOG(ERR, "Failed to allocate interrupt vectors");
587 goto err_mtu;
588 }
589 adapter->used_vecs_num += req_vecs_num;
590
591 ret = idpf_config_rx_queues_irqs(dev);
592 if (ret != 0) {
593 PMD_DRV_LOG(ERR, "Failed to configure irqs");
594 goto err_mtu;
595 }
596
597 ret = idpf_start_queues(dev);
598 if (ret != 0) {
599 PMD_DRV_LOG(ERR, "Failed to start queues");
600 goto err_mtu;
601 }
602
603 idpf_set_rx_function(dev);
604 idpf_set_tx_function(dev);
605
606 ret = idpf_vc_ena_dis_vport(vport, true);
607 if (ret != 0) {
608 PMD_DRV_LOG(ERR, "Failed to enable vport");
609 goto err_vport;
610 }
611
612 return 0;
613
614err_vport:
615 idpf_stop_queues(dev);
616err_mtu:
617 return ret;
618}
619
620static int
621idpf_dev_stop(struct rte_eth_dev *dev)
622{
623 struct idpf_vport *vport = dev->data->dev_private;
624
625 if (vport->stopped == 1)
626 return 0;
627
628 idpf_vc_ena_dis_vport(vport, false);
629
630 idpf_stop_queues(dev);
631
632 idpf_vc_config_irq_map_unmap(vport, false);
633
634 if (vport->recv_vectors != NULL)
635 idpf_vc_dealloc_vectors(vport);
636
637 vport->stopped = 1;
638
639 return 0;
640}
641
642static int
643idpf_dev_close(struct rte_eth_dev *dev)
644{
645 struct idpf_vport *vport = dev->data->dev_private;
646 struct idpf_adapter *adapter = vport->adapter;
647
648 idpf_dev_stop(dev);
649
650 idpf_vc_destroy_vport(vport);
651
652 rte_free(vport->rss_lut);
653 vport->rss_lut = NULL;
654
655 rte_free(vport->rss_key);
656 vport->rss_key = NULL;
657
658 rte_free(vport->recv_vectors);
659 vport->recv_vectors = NULL;
660
661 rte_free(vport->qv_map);
662 vport->qv_map = NULL;
663
664 adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
665
666 rte_free(vport);
667 dev->data->dev_private = NULL;
668
669 return 0;
670}
671
672static int
673insert_value(struct idpf_adapter *adapter, uint16_t id)
674{
675 uint16_t i;
676
677 for (i = 0; i < adapter->req_vport_nb; i++) {
678 if (adapter->req_vports[i] == id)
679 return 0;
680 }
681
682 if (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {
683 PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
684 IDPF_MAX_VPORT_NUM);
685 return -EINVAL;
686 }
687
688 adapter->req_vports[adapter->req_vport_nb] = id;
689 adapter->req_vport_nb++;
690
691 return 0;
692}
693
694static const char *
695parse_range(const char *value, struct idpf_adapter *adapter)
696{
697 uint16_t lo, hi, i;
698 int n = 0;
699 int result;
700 const char *pos = value;
701
702 result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
703 if (result == 1) {
704 if (lo >= IDPF_MAX_VPORT_NUM)
705 return NULL;
706 if (insert_value(adapter, lo) != 0)
707 return NULL;
708 } else if (result == 2) {
709 if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
710 return NULL;
711 for (i = lo; i <= hi; i++) {
712 if (insert_value(adapter, i) != 0)
713 return NULL;
714 }
715 } else {
716 return NULL;
717 }
718
719 return pos + n;
720}
721
722static int
723parse_vport(const char *key, const char *value, void *args)
724{
725 struct idpf_adapter *adapter = args;
726 const char *pos = value;
727 int i;
728
729 adapter->req_vport_nb = 0;
730
731 if (*pos == '[')
732 pos++;
733
734 while (1) {
735 pos = parse_range(pos, adapter);
736 if (pos == NULL) {
737 PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
738 value, key);
739 return -EINVAL;
740 }
741 if (*pos != ',')
742 break;
743 pos++;
744 }
745
746 if (*value == '[' && *pos != ']') {
747 PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
748 value, key);
749 return -EINVAL;
750 }
751
752 if (adapter->cur_vport_nb + adapter->req_vport_nb >
753 IDPF_MAX_VPORT_NUM) {
754 PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
755 IDPF_MAX_VPORT_NUM);
756 return -EINVAL;
757 }
758
759 for (i = 0; i < adapter->req_vport_nb; i++) {
760 if ((adapter->cur_vports & RTE_BIT32(adapter->req_vports[i])) == 0) {
761 adapter->cur_vports |= RTE_BIT32(adapter->req_vports[i]);
762 adapter->cur_vport_nb++;
763 } else {
764 PMD_INIT_LOG(ERR, "Vport %d has been created",
765 adapter->req_vports[i]);
766 return -EINVAL;
767 }
768 }
769
770 return 0;
771}
772
773static int
774parse_bool(const char *key, const char *value, void *args)
775{
776 int *i = args;
777 char *end;
778 int num;
779
780 errno = 0;
781
782 num = strtoul(value, &end, 10);
783
784 if (errno == ERANGE || (num != 0 && num != 1)) {
785 PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", value must be 0 or 1",
786 value, key);
787 return -EINVAL;
788 }
789
790 *i = num;
791 return 0;
792}
793
794static int
795idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
796{
797 struct rte_devargs *devargs = pci_dev->device.devargs;
798 struct rte_kvargs *kvlist;
799 int ret;
800
801 if (devargs == NULL)
802 return 0;
803
804 kvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);
805 if (kvlist == NULL) {
806 PMD_INIT_LOG(ERR, "invalid kvargs key");
807 return -EINVAL;
808 }
809
810 ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
811 adapter);
812 if (ret != 0)
813 goto bail;
814
815 ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
816 &adapter->txq_model);
817 if (ret != 0)
818 goto bail;
819
820 ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
821 &adapter->rxq_model);
822 if (ret != 0)
823 goto bail;
824
825bail:
826 rte_kvargs_free(kvlist);
827 return ret;
828}
829
830static void
831idpf_reset_pf(struct idpf_hw *hw)
832{
833 uint32_t reg;
834
835 reg = IDPF_READ_REG(hw, PFGEN_CTRL);
836 IDPF_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR));
837}
838
839#define IDPF_RESET_WAIT_CNT 100
840static int
841idpf_check_pf_reset_done(struct idpf_hw *hw)
842{
843 uint32_t reg;
844 int i;
845
846 for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
847 reg = IDPF_READ_REG(hw, PFGEN_RSTAT);
848 if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
849 return 0;
850 rte_delay_ms(1000);
851 }
852
853 PMD_INIT_LOG(ERR, "IDPF reset timeout");
854 return -EBUSY;
855}
856
857#define CTLQ_NUM 2
858static int
859idpf_init_mbx(struct idpf_hw *hw)
860{
861 struct idpf_ctlq_create_info ctlq_info[CTLQ_NUM] = {
862 {
863 .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
864 .id = IDPF_CTLQ_ID,
865 .len = IDPF_CTLQ_LEN,
866 .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
867 .reg = {
868 .head = PF_FW_ATQH,
869 .tail = PF_FW_ATQT,
870 .len = PF_FW_ATQLEN,
871 .bah = PF_FW_ATQBAH,
872 .bal = PF_FW_ATQBAL,
873 .len_mask = PF_FW_ATQLEN_ATQLEN_M,
874 .len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M,
875 .head_mask = PF_FW_ATQH_ATQH_M,
876 }
877 },
878 {
879 .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
880 .id = IDPF_CTLQ_ID,
881 .len = IDPF_CTLQ_LEN,
882 .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
883 .reg = {
884 .head = PF_FW_ARQH,
885 .tail = PF_FW_ARQT,
886 .len = PF_FW_ARQLEN,
887 .bah = PF_FW_ARQBAH,
888 .bal = PF_FW_ARQBAL,
889 .len_mask = PF_FW_ARQLEN_ARQLEN_M,
890 .len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M,
891 .head_mask = PF_FW_ARQH_ARQH_M,
892 }
893 }
894 };
895 struct idpf_ctlq_info *ctlq;
896 int ret;
897
898 ret = idpf_ctlq_init(hw, CTLQ_NUM, ctlq_info);
899 if (ret != 0)
900 return ret;
901
902 LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
903 struct idpf_ctlq_info, cq_list) {
904 if (ctlq->q_id == IDPF_CTLQ_ID &&
905 ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
906 hw->asq = ctlq;
907 if (ctlq->q_id == IDPF_CTLQ_ID &&
908 ctlq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
909 hw->arq = ctlq;
910 }
911
912 if (hw->asq == NULL || hw->arq == NULL) {
913 idpf_ctlq_deinit(hw);
914 ret = -ENOENT;
915 }
916
917 return ret;
918}
919
920static int
921idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
922{
923 struct idpf_hw *hw = &adapter->hw;
924 int ret = 0;
925
926 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
927 hw->hw_addr_len = pci_dev->mem_resource[0].len;
928 hw->back = adapter;
929 hw->vendor_id = pci_dev->id.vendor_id;
930 hw->device_id = pci_dev->id.device_id;
931 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
932
933 strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);
934
935 idpf_reset_pf(hw);
936 ret = idpf_check_pf_reset_done(hw);
937 if (ret != 0) {
938 PMD_INIT_LOG(ERR, "IDPF is still resetting");
939 goto err;
940 }
941
942 ret = idpf_init_mbx(hw);
943 if (ret != 0) {
944 PMD_INIT_LOG(ERR, "Failed to init mailbox");
945 goto err;
946 }
947
948 adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
949 IDPF_DFLT_MBX_BUF_SIZE, 0);
950 if (adapter->mbx_resp == NULL) {
951 PMD_INIT_LOG(ERR, "Failed to allocate idpf_adapter_mbx_resp memory");
952 ret = -ENOMEM;
953 goto err_mbx;
954 }
955
956 ret = idpf_vc_check_api_version(adapter);
957 if (ret != 0) {
958 PMD_INIT_LOG(ERR, "Failed to check api version");
959 goto err_api;
960 }
961
962 ret = idpf_get_pkt_type(adapter);
963 if (ret != 0) {
964 PMD_INIT_LOG(ERR, "Failed to set ptype table");
965 goto err_api;
966 }
967
968 adapter->caps = rte_zmalloc("idpf_caps",
969 sizeof(struct virtchnl2_get_capabilities), 0);
970 if (adapter->caps == NULL) {
971 PMD_INIT_LOG(ERR, "Failed to allocate idpf_caps memory");
972 ret = -ENOMEM;
973 goto err_api;
974 }
975
976 ret = idpf_vc_get_caps(adapter);
977 if (ret != 0) {
978 PMD_INIT_LOG(ERR, "Failed to get capabilities");
979 goto err_caps;
980 }
981
982 adapter->max_vport_nb = adapter->caps->max_vports;
983
984 adapter->vport_req_info = rte_zmalloc("vport_req_info",
985 adapter->max_vport_nb *
986 sizeof(*adapter->vport_req_info),
987 0);
988 if (adapter->vport_req_info == NULL) {
989 PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info memory");
990 ret = -ENOMEM;
991 goto err_caps;
992 }
993
994 adapter->vport_recv_info = rte_zmalloc("vport_recv_info",
995 adapter->max_vport_nb *
996 sizeof(*adapter->vport_recv_info),
997 0);
998 if (adapter->vport_recv_info == NULL) {
999 PMD_INIT_LOG(ERR, "Failed to allocate vport_recv_info memory");
1000 ret = -ENOMEM;
1001 goto err_vport_recv_info;
1002 }
1003
1004 adapter->vports = rte_zmalloc("vports",
1005 adapter->max_vport_nb *
1006 sizeof(*adapter->vports),
1007 0);
1008 if (adapter->vports == NULL) {
1009 PMD_INIT_LOG(ERR, "Failed to allocate vports memory");
1010 ret = -ENOMEM;
1011 goto err_vports;
1012 }
1013
1014 adapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
1015 sizeof(struct virtchnl2_config_rx_queues)) /
1016 sizeof(struct virtchnl2_rxq_info);
1017 adapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
1018 sizeof(struct virtchnl2_config_tx_queues)) /
1019 sizeof(struct virtchnl2_txq_info);
1020
1021 adapter->cur_vports = 0;
1022 adapter->cur_vport_nb = 0;
1023
1024 adapter->used_vecs_num = 0;
1025
1026 return ret;
1027
1028err_vports:
1029 rte_free(adapter->vport_recv_info);
1030 adapter->vport_recv_info = NULL;
1031err_vport_recv_info:
1032 rte_free(adapter->vport_req_info);
1033 adapter->vport_req_info = NULL;
1034err_caps:
1035 rte_free(adapter->caps);
1036 adapter->caps = NULL;
1037err_api:
1038 rte_free(adapter->mbx_resp);
1039 adapter->mbx_resp = NULL;
1040err_mbx:
1041 idpf_ctlq_deinit(hw);
1042err:
1043 return ret;
1044}
1045
1046static const struct eth_dev_ops idpf_eth_dev_ops = {
1047 .dev_configure = idpf_dev_configure,
1048 .dev_close = idpf_dev_close,
1049 .rx_queue_setup = idpf_rx_queue_setup,
1050 .tx_queue_setup = idpf_tx_queue_setup,
1051 .dev_infos_get = idpf_dev_info_get,
1052 .dev_start = idpf_dev_start,
1053 .dev_stop = idpf_dev_stop,
1054 .link_update = idpf_dev_link_update,
1055 .rx_queue_start = idpf_rx_queue_start,
1056 .tx_queue_start = idpf_tx_queue_start,
1057 .rx_queue_stop = idpf_rx_queue_stop,
1058 .tx_queue_stop = idpf_tx_queue_stop,
1059 .rx_queue_release = idpf_dev_rx_queue_release,
1060 .tx_queue_release = idpf_dev_tx_queue_release,
1061 .mtu_set = idpf_dev_mtu_set,
1062 .dev_supported_ptypes_get = idpf_dev_supported_ptypes_get,
1063};
1064
1065static uint16_t
1066idpf_get_vport_idx(struct idpf_vport **vports, uint16_t max_vport_nb)
1067{
1068 uint16_t vport_idx;
1069 uint16_t i;
1070
1071 for (i = 0; i < max_vport_nb; i++) {
1072 if (vports[i] == NULL)
1073 break;
1074 }
1075
1076 if (i == max_vport_nb)
1077 vport_idx = IDPF_INVALID_VPORT_IDX;
1078 else
1079 vport_idx = i;
1080
1081 return vport_idx;
1082}
1083
1084static int
1085idpf_dev_init(struct rte_eth_dev *dev, void *init_params)
1086{
1087 struct idpf_vport *vport = dev->data->dev_private;
1088 struct idpf_adapter *adapter = init_params;
1089 int ret = 0;
1090
1091 dev->dev_ops = &idpf_eth_dev_ops;
1092 vport->adapter = adapter;
1093
1094 ret = idpf_init_vport_req_info(dev);
1095 if (ret != 0) {
1096 PMD_INIT_LOG(ERR, "Failed to init vport req_info.");
1097 goto err;
1098 }
1099
1100 ret = idpf_vc_create_vport(adapter);
1101 if (ret != 0) {
1102 PMD_INIT_LOG(ERR, "Failed to create vport.");
1103 goto err_create_vport;
1104 }
1105
1106 ret = idpf_init_vport(dev);
1107 if (ret != 0) {
1108 PMD_INIT_LOG(ERR, "Failed to init vports.");
1109 goto err_init_vport;
1110 }
1111
1112 adapter->cur_vport_idx = idpf_get_vport_idx(adapter->vports,
1113 adapter->max_vport_nb);
1114
1115 dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
1116 if (dev->data->mac_addrs == NULL) {
1117 PMD_INIT_LOG(ERR, "Cannot allocate mac_addr memory.");
1118 ret = -ENOMEM;
1119 goto err_init_vport;
1120 }
1121
1122 rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
1123 &dev->data->mac_addrs[0]);
1124
1125 return 0;
1126
1127err_init_vport:
1128 idpf_vc_destroy_vport(vport);
1129err_create_vport:
1130 rte_free(vport->adapter->vport_req_info[vport->adapter->cur_vport_idx]);
1131err:
1132 return ret;
1133}
1134
1135static const struct rte_pci_id pci_id_idpf_map[] = {
1136 { RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_PF) },
1137 { .vendor_id = 0, },
1138};
1139
1140struct idpf_adapter *
1141idpf_find_adapter(struct rte_pci_device *pci_dev)
1142{
1143 struct idpf_adapter *adapter;
1144 int found = 0;
1145
1146 if (pci_dev == NULL)
1147 return NULL;
1148
1149 rte_spinlock_lock(&idpf_adapter_lock);
1150 TAILQ_FOREACH(adapter, &idpf_adapter_list, next) {
1151 if (strncmp(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE) == 0) {
1152 found = 1;
1153 break;
1154 }
1155 }
1156 rte_spinlock_unlock(&idpf_adapter_lock);
1157
1158 if (found == 0)
1159 return NULL;
1160
1161 return adapter;
1162}
1163
1164static void
1165idpf_adapter_rel(struct idpf_adapter *adapter)
1166{
1167 struct idpf_hw *hw = &adapter->hw;
1168 int i;
1169
1170 idpf_ctlq_deinit(hw);
1171
1172 rte_free(adapter->caps);
1173 adapter->caps = NULL;
1174
1175 rte_free(adapter->mbx_resp);
1176 adapter->mbx_resp = NULL;
1177
1178 if (adapter->vport_req_info != NULL) {
1179 for (i = 0; i < adapter->max_vport_nb; i++) {
1180 rte_free(adapter->vport_req_info[i]);
1181 adapter->vport_req_info[i] = NULL;
1182 }
1183 rte_free(adapter->vport_req_info);
1184 adapter->vport_req_info = NULL;
1185 }
1186
1187 if (adapter->vport_recv_info != NULL) {
1188 for (i = 0; i < adapter->max_vport_nb; i++) {
1189 rte_free(adapter->vport_recv_info[i]);
1190 adapter->vport_recv_info[i] = NULL;
1191 }
1192 rte_free(adapter->vport_recv_info);
1193 adapter->vport_recv_info = NULL;
1194 }
1195
1196 rte_free(adapter->vports);
1197 adapter->vports = NULL;
1198}
1199
1200static int
1201idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1202 struct rte_pci_device *pci_dev)
1203{
1204 struct idpf_adapter *adapter;
1205 char name[RTE_ETH_NAME_MAX_LEN];
1206 int i, retval;
1207 bool first_probe = false;
1208
1209 if (!idpf_adapter_list_init) {
1210 rte_spinlock_init(&idpf_adapter_lock);
1211 TAILQ_INIT(&idpf_adapter_list);
1212 idpf_adapter_list_init = true;
1213 }
1214
1215 adapter = idpf_find_adapter(pci_dev);
1216 if (adapter == NULL) {
1217 first_probe = true;
1218 adapter = rte_zmalloc("idpf_adapter",
1219 sizeof(struct idpf_adapter), 0);
1220 if (adapter == NULL) {
1221 PMD_INIT_LOG(ERR, "Failed to allocate adapter.");
1222 return -ENOMEM;
1223 }
1224
1225 retval = idpf_adapter_init(pci_dev, adapter);
1226 if (retval != 0) {
1227 PMD_INIT_LOG(ERR, "Failed to init adapter.");
1228 return retval;
1229 }
1230
1231 rte_spinlock_lock(&idpf_adapter_lock);
1232 TAILQ_INSERT_TAIL(&idpf_adapter_list, adapter, next);
1233 rte_spinlock_unlock(&idpf_adapter_lock);
1234 }
1235
1236 retval = idpf_parse_devargs(pci_dev, adapter);
1237 if (retval != 0) {
1238 PMD_INIT_LOG(ERR, "Failed to parse private devargs");
1239 goto err;
1240 }
1241
1242 if (adapter->req_vport_nb == 0) {
1243
1244 snprintf(name, sizeof(name), "idpf_%s_vport_0",
1245 pci_dev->device.name);
1246 retval = rte_eth_dev_create(&pci_dev->device, name,
1247 sizeof(struct idpf_vport),
1248 NULL, NULL, idpf_dev_init,
1249 adapter);
1250 if (retval != 0)
1251 PMD_DRV_LOG(ERR, "Failed to create default vport 0");
1252 adapter->cur_vports |= RTE_BIT32(0);
1253 adapter->cur_vport_nb++;
1254 } else {
1255 for (i = 0; i < adapter->req_vport_nb; i++) {
1256 snprintf(name, sizeof(name), "idpf_%s_vport_%d",
1257 pci_dev->device.name,
1258 adapter->req_vports[i]);
1259 retval = rte_eth_dev_create(&pci_dev->device, name,
1260 sizeof(struct idpf_vport),
1261 NULL, NULL, idpf_dev_init,
1262 adapter);
1263 if (retval != 0)
1264 PMD_DRV_LOG(ERR, "Failed to create vport %d",
1265 adapter->req_vports[i]);
1266 }
1267 }
1268
1269 return 0;
1270
1271err:
1272 if (first_probe) {
1273 rte_spinlock_lock(&idpf_adapter_lock);
1274 TAILQ_REMOVE(&idpf_adapter_list, adapter, next);
1275 rte_spinlock_unlock(&idpf_adapter_lock);
1276 idpf_adapter_rel(adapter);
1277 rte_free(adapter);
1278 }
1279 return retval;
1280}
1281
1282static int
1283idpf_pci_remove(struct rte_pci_device *pci_dev)
1284{
1285 struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);
1286 uint16_t port_id;
1287
1288
1289 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) {
1290 rte_eth_dev_close(port_id);
1291 }
1292
1293 rte_spinlock_lock(&idpf_adapter_lock);
1294 TAILQ_REMOVE(&idpf_adapter_list, adapter, next);
1295 rte_spinlock_unlock(&idpf_adapter_lock);
1296 idpf_adapter_rel(adapter);
1297 rte_free(adapter);
1298
1299 return 0;
1300}
1301
1302static struct rte_pci_driver rte_idpf_pmd = {
1303 .id_table = pci_id_idpf_map,
1304 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1305 .probe = idpf_pci_probe,
1306 .remove = idpf_pci_remove,
1307};
1308
1309
1310
1311
1312
1313
1314RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);
1315RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);
1316RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic | vfio-pci");
1317
1318RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);
1319RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);
1320