1
2
3
4
5
6
7
8#include <linux/interrupt.h>
9#include <linux/pci.h>
10#include <net/tso.h>
11
12#include "otx2_reg.h"
13#include "otx2_common.h"
14#include "otx2_struct.h"
15#include "cn10k.h"
16
17static void otx2_nix_rq_op_stats(struct queue_stats *stats,
18 struct otx2_nic *pfvf, int qidx)
19{
20 u64 incr = (u64)qidx << 32;
21 u64 *ptr;
22
23 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
24 stats->bytes = otx2_atomic64_add(incr, ptr);
25
26 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
27 stats->pkts = otx2_atomic64_add(incr, ptr);
28}
29
30static void otx2_nix_sq_op_stats(struct queue_stats *stats,
31 struct otx2_nic *pfvf, int qidx)
32{
33 u64 incr = (u64)qidx << 32;
34 u64 *ptr;
35
36 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
37 stats->bytes = otx2_atomic64_add(incr, ptr);
38
39 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
40 stats->pkts = otx2_atomic64_add(incr, ptr);
41}
42
43void otx2_update_lmac_stats(struct otx2_nic *pfvf)
44{
45 struct msg_req *req;
46
47 if (!netif_running(pfvf->netdev))
48 return;
49
50 mutex_lock(&pfvf->mbox.lock);
51 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
52 if (!req) {
53 mutex_unlock(&pfvf->mbox.lock);
54 return;
55 }
56
57 otx2_sync_mbox_msg(&pfvf->mbox);
58 mutex_unlock(&pfvf->mbox.lock);
59}
60
61void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
62{
63 struct msg_req *req;
64
65 if (!netif_running(pfvf->netdev))
66 return;
67 mutex_lock(&pfvf->mbox.lock);
68 req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
69 if (req)
70 otx2_sync_mbox_msg(&pfvf->mbox);
71 mutex_unlock(&pfvf->mbox.lock);
72}
73
74int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
75{
76 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
77
78 if (!pfvf->qset.rq)
79 return 0;
80
81 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
82 return 1;
83}
84
85int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
86{
87 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
88
89 if (!pfvf->qset.sq)
90 return 0;
91
92 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
93 return 1;
94}
95
96void otx2_get_dev_stats(struct otx2_nic *pfvf)
97{
98 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
99
100#define OTX2_GET_RX_STATS(reg) \
101 otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
102#define OTX2_GET_TX_STATS(reg) \
103 otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
104
105 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
106 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
107 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
108 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
109 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
110 dev_stats->rx_frames = dev_stats->rx_bcast_frames +
111 dev_stats->rx_mcast_frames +
112 dev_stats->rx_ucast_frames;
113
114 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
115 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
116 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
117 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
118 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
119 dev_stats->tx_frames = dev_stats->tx_bcast_frames +
120 dev_stats->tx_mcast_frames +
121 dev_stats->tx_ucast_frames;
122}
123
124void otx2_get_stats64(struct net_device *netdev,
125 struct rtnl_link_stats64 *stats)
126{
127 struct otx2_nic *pfvf = netdev_priv(netdev);
128 struct otx2_dev_stats *dev_stats;
129
130 otx2_get_dev_stats(pfvf);
131
132 dev_stats = &pfvf->hw.dev_stats;
133 stats->rx_bytes = dev_stats->rx_bytes;
134 stats->rx_packets = dev_stats->rx_frames;
135 stats->rx_dropped = dev_stats->rx_drops;
136 stats->multicast = dev_stats->rx_mcast_frames;
137
138 stats->tx_bytes = dev_stats->tx_bytes;
139 stats->tx_packets = dev_stats->tx_frames;
140 stats->tx_dropped = dev_stats->tx_drops;
141}
142EXPORT_SYMBOL(otx2_get_stats64);
143
144
145static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
146{
147 struct nix_set_mac_addr *req;
148 int err;
149
150 mutex_lock(&pfvf->mbox.lock);
151 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
152 if (!req) {
153 mutex_unlock(&pfvf->mbox.lock);
154 return -ENOMEM;
155 }
156
157 ether_addr_copy(req->mac_addr, mac);
158
159 err = otx2_sync_mbox_msg(&pfvf->mbox);
160 mutex_unlock(&pfvf->mbox.lock);
161 return err;
162}
163
164static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
165 struct net_device *netdev)
166{
167 struct nix_get_mac_addr_rsp *rsp;
168 struct mbox_msghdr *msghdr;
169 struct msg_req *req;
170 int err;
171
172 mutex_lock(&pfvf->mbox.lock);
173 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
174 if (!req) {
175 mutex_unlock(&pfvf->mbox.lock);
176 return -ENOMEM;
177 }
178
179 err = otx2_sync_mbox_msg(&pfvf->mbox);
180 if (err) {
181 mutex_unlock(&pfvf->mbox.lock);
182 return err;
183 }
184
185 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
186 if (IS_ERR(msghdr)) {
187 mutex_unlock(&pfvf->mbox.lock);
188 return PTR_ERR(msghdr);
189 }
190 rsp = (struct nix_get_mac_addr_rsp *)msghdr;
191 ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
192 mutex_unlock(&pfvf->mbox.lock);
193
194 return 0;
195}
196
197int otx2_set_mac_address(struct net_device *netdev, void *p)
198{
199 struct otx2_nic *pfvf = netdev_priv(netdev);
200 struct sockaddr *addr = p;
201
202 if (!is_valid_ether_addr(addr->sa_data))
203 return -EADDRNOTAVAIL;
204
205 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
206 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
207
208 if (netif_running(netdev) &&
209 pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
210 otx2_install_rxvlan_offload_flow(pfvf);
211
212 if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
213 otx2_dmacflt_update_pfmac_flow(pfvf);
214 } else {
215 return -EPERM;
216 }
217
218 return 0;
219}
220EXPORT_SYMBOL(otx2_set_mac_address);
221
222int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
223{
224 struct nix_frs_cfg *req;
225 int err;
226
227 mutex_lock(&pfvf->mbox.lock);
228 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
229 if (!req) {
230 mutex_unlock(&pfvf->mbox.lock);
231 return -ENOMEM;
232 }
233
234 req->maxlen = pfvf->max_frs;
235
236 err = otx2_sync_mbox_msg(&pfvf->mbox);
237 mutex_unlock(&pfvf->mbox.lock);
238 return err;
239}
240
241int otx2_config_pause_frm(struct otx2_nic *pfvf)
242{
243 struct cgx_pause_frm_cfg *req;
244 int err;
245
246 if (is_otx2_lbkvf(pfvf->pdev))
247 return 0;
248
249 mutex_lock(&pfvf->mbox.lock);
250 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
251 if (!req) {
252 err = -ENOMEM;
253 goto unlock;
254 }
255
256 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
257 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
258 req->set = 1;
259
260 err = otx2_sync_mbox_msg(&pfvf->mbox);
261unlock:
262 mutex_unlock(&pfvf->mbox.lock);
263 return err;
264}
265
266int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
267{
268 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
269 struct nix_rss_flowkey_cfg_rsp *rsp;
270 struct nix_rss_flowkey_cfg *req;
271 int err;
272
273 mutex_lock(&pfvf->mbox.lock);
274 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
275 if (!req) {
276 mutex_unlock(&pfvf->mbox.lock);
277 return -ENOMEM;
278 }
279 req->mcam_index = -1;
280 req->flowkey_cfg = rss->flowkey_cfg;
281 req->group = DEFAULT_RSS_CONTEXT_GROUP;
282
283 err = otx2_sync_mbox_msg(&pfvf->mbox);
284 if (err)
285 goto fail;
286
287 rsp = (struct nix_rss_flowkey_cfg_rsp *)
288 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
289 if (IS_ERR(rsp)) {
290 err = PTR_ERR(rsp);
291 goto fail;
292 }
293
294 pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
295fail:
296 mutex_unlock(&pfvf->mbox.lock);
297 return err;
298}
299
300int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
301{
302 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
303 const int index = rss->rss_size * ctx_id;
304 struct mbox *mbox = &pfvf->mbox;
305 struct otx2_rss_ctx *rss_ctx;
306 struct nix_aq_enq_req *aq;
307 int idx, err;
308
309 mutex_lock(&mbox->lock);
310 rss_ctx = rss->rss_ctx[ctx_id];
311
312 for (idx = 0; idx < rss->rss_size; idx++) {
313 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
314 if (!aq) {
315
316
317
318 err = otx2_sync_mbox_msg(mbox);
319 if (err) {
320 mutex_unlock(&mbox->lock);
321 return err;
322 }
323 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
324 if (!aq) {
325 mutex_unlock(&mbox->lock);
326 return -ENOMEM;
327 }
328 }
329
330 aq->rss.rq = rss_ctx->ind_tbl[idx];
331
332
333 aq->qidx = index + idx;
334 aq->ctype = NIX_AQ_CTYPE_RSS;
335 aq->op = NIX_AQ_INSTOP_INIT;
336 }
337 err = otx2_sync_mbox_msg(mbox);
338 mutex_unlock(&mbox->lock);
339 return err;
340}
341
342void otx2_set_rss_key(struct otx2_nic *pfvf)
343{
344 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
345 u64 *key = (u64 *)&rss->key[4];
346 int idx;
347
348
349
350
351
352
353
354
355
356 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
357 (u64)(*((u32 *)&rss->key)) << 32);
358 idx = sizeof(rss->key) / sizeof(u64);
359 while (idx > 0) {
360 idx--;
361 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
362 }
363}
364
365int otx2_rss_init(struct otx2_nic *pfvf)
366{
367 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
368 struct otx2_rss_ctx *rss_ctx;
369 int idx, ret = 0;
370
371 rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
372
373
374 if (!rss->enable)
375 netdev_rss_key_fill(rss->key, sizeof(rss->key));
376 otx2_set_rss_key(pfvf);
377
378 if (!netif_is_rxfh_configured(pfvf->netdev)) {
379
380 rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
381 GFP_KERNEL);
382 if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
383 return -ENOMEM;
384
385 rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
386 for (idx = 0; idx < rss->rss_size; idx++)
387 rss_ctx->ind_tbl[idx] =
388 ethtool_rxfh_indir_default(idx,
389 pfvf->hw.rx_queues);
390 }
391 ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
392 if (ret)
393 return ret;
394
395
396 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
397 NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
398 NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
399 NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
400 NIX_FLOW_KEY_TYPE_IPV4_PROTO;
401
402 ret = otx2_set_flowkey_cfg(pfvf);
403 if (ret)
404 return ret;
405
406 rss->enable = true;
407 return 0;
408}
409
410
411static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
412{
413 struct nix_lso_format *field;
414
415 field = (struct nix_lso_format *)&lso->fields[0];
416 lso->field_mask = GENMASK(18, 0);
417
418
419 field->layer = NIX_TXLAYER_OL3;
420
421 field->offset = v4 ? 2 : 4;
422 field->sizem1 = 1;
423 field->alg = NIX_LSOALG_ADD_PAYLEN;
424 field++;
425
426
427 if (v4) {
428
429 field->layer = NIX_TXLAYER_OL3;
430 field->offset = 4;
431 field->sizem1 = 1;
432 field->alg = NIX_LSOALG_ADD_SEGNUM;
433 field++;
434 }
435
436
437 field->layer = NIX_TXLAYER_OL4;
438 field->offset = 4;
439 field->sizem1 = 1;
440 field->alg = NIX_LSOALG_ADD_PAYLEN;
441}
442
443
444void otx2_setup_segmentation(struct otx2_nic *pfvf)
445{
446 struct nix_lso_format_cfg_rsp *rsp;
447 struct nix_lso_format_cfg *lso;
448 struct otx2_hw *hw = &pfvf->hw;
449 int err;
450
451 mutex_lock(&pfvf->mbox.lock);
452
453
454 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
455 if (!lso)
456 goto fail;
457
458
459 otx2_setup_udp_segmentation(lso, true);
460
461 err = otx2_sync_mbox_msg(&pfvf->mbox);
462 if (err)
463 goto fail;
464
465 rsp = (struct nix_lso_format_cfg_rsp *)
466 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
467 if (IS_ERR(rsp))
468 goto fail;
469
470 hw->lso_udpv4_idx = rsp->lso_format_idx;
471
472
473 lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
474 if (!lso)
475 goto fail;
476
477
478 otx2_setup_udp_segmentation(lso, false);
479
480 err = otx2_sync_mbox_msg(&pfvf->mbox);
481 if (err)
482 goto fail;
483
484 rsp = (struct nix_lso_format_cfg_rsp *)
485 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
486 if (IS_ERR(rsp))
487 goto fail;
488
489 hw->lso_udpv6_idx = rsp->lso_format_idx;
490 mutex_unlock(&pfvf->mbox.lock);
491 return;
492fail:
493 mutex_unlock(&pfvf->mbox.lock);
494 netdev_info(pfvf->netdev,
495 "Failed to get LSO index for UDP GSO offload, disabling\n");
496 pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
497}
498
499void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
500{
501
502
503
504
505
506
507 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
508 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
509 ((u64)pfvf->hw.cq_qcount_wait << 32) |
510 (pfvf->hw.cq_ecount_wait - 1));
511}
512
513int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
514 dma_addr_t *dma)
515{
516 u8 *buf;
517
518 buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
519 if (unlikely(!buf))
520 return -ENOMEM;
521
522 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
523 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
524 if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
525 page_frag_free(buf);
526 return -ENOMEM;
527 }
528
529 return 0;
530}
531
532static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
533 dma_addr_t *dma)
534{
535 int ret;
536
537 local_bh_disable();
538 ret = __otx2_alloc_rbuf(pfvf, pool, dma);
539 local_bh_enable();
540 return ret;
541}
542
543int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
544 dma_addr_t *dma)
545{
546 if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
547 struct refill_work *work;
548 struct delayed_work *dwork;
549
550 work = &pfvf->refill_wrk[cq->cq_idx];
551 dwork = &work->pool_refill_work;
552
553 if (!cq->refill_task_sched) {
554 cq->refill_task_sched = true;
555 schedule_delayed_work(dwork,
556 msecs_to_jiffies(100));
557 }
558 return -ENOMEM;
559 }
560 return 0;
561}
562
563void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
564{
565 struct otx2_nic *pfvf = netdev_priv(netdev);
566
567 schedule_work(&pfvf->reset_task);
568}
569EXPORT_SYMBOL(otx2_tx_timeout);
570
571void otx2_get_mac_from_af(struct net_device *netdev)
572{
573 struct otx2_nic *pfvf = netdev_priv(netdev);
574 int err;
575
576 err = otx2_hw_get_mac_addr(pfvf, netdev);
577 if (err)
578 dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
579
580
581 if (!is_valid_ether_addr(netdev->dev_addr))
582 eth_hw_addr_random(netdev);
583}
584EXPORT_SYMBOL(otx2_get_mac_from_af);
585
586int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
587{
588 struct otx2_hw *hw = &pfvf->hw;
589 struct nix_txschq_config *req;
590 u64 schq, parent;
591 u64 dwrr_val;
592
593 dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
594
595 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
596 if (!req)
597 return -ENOMEM;
598
599 req->lvl = lvl;
600 req->num_regs = 1;
601
602 schq = hw->txschq_list[lvl][0];
603
604 if (lvl == NIX_TXSCH_LVL_SMQ) {
605 req->reg[0] = NIX_AF_SMQX_CFG(schq);
606 req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
607 | OTX2_MIN_MTU;
608
609 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
610 (0x2ULL << 36);
611 req->num_regs++;
612
613 parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
614 req->reg[1] = NIX_AF_MDQX_PARENT(schq);
615 req->regval[1] = parent << 16;
616 req->num_regs++;
617
618 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
619 req->regval[2] = dwrr_val;
620 } else if (lvl == NIX_TXSCH_LVL_TL4) {
621 parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
622 req->reg[0] = NIX_AF_TL4X_PARENT(schq);
623 req->regval[0] = parent << 16;
624 req->num_regs++;
625 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
626 req->regval[1] = dwrr_val;
627 } else if (lvl == NIX_TXSCH_LVL_TL3) {
628 parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
629 req->reg[0] = NIX_AF_TL3X_PARENT(schq);
630 req->regval[0] = parent << 16;
631 req->num_regs++;
632 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
633 req->regval[1] = dwrr_val;
634 } else if (lvl == NIX_TXSCH_LVL_TL2) {
635 parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
636 req->reg[0] = NIX_AF_TL2X_PARENT(schq);
637 req->regval[0] = parent << 16;
638
639 req->num_regs++;
640 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
641 req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
642
643 req->num_regs++;
644 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
645
646 req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
647
648 } else if (lvl == NIX_TXSCH_LVL_TL1) {
649
650
651
652
653
654
655
656
657 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
658 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
659
660 req->num_regs++;
661 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
662 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
663
664 req->num_regs++;
665 req->reg[2] = NIX_AF_TL1X_CIR(schq);
666 req->regval[2] = 0;
667 }
668
669 return otx2_sync_mbox_msg(&pfvf->mbox);
670}
671
672int otx2_txsch_alloc(struct otx2_nic *pfvf)
673{
674 struct nix_txsch_alloc_req *req;
675 int lvl;
676
677
678 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
679 if (!req)
680 return -ENOMEM;
681
682
683 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
684 req->schq[lvl] = 1;
685
686 return otx2_sync_mbox_msg(&pfvf->mbox);
687}
688
689int otx2_txschq_stop(struct otx2_nic *pfvf)
690{
691 struct nix_txsch_free_req *free_req;
692 int lvl, schq, err;
693
694 mutex_lock(&pfvf->mbox.lock);
695
696 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
697 if (!free_req) {
698 mutex_unlock(&pfvf->mbox.lock);
699 return -ENOMEM;
700 }
701
702 free_req->flags = TXSCHQ_FREE_ALL;
703 err = otx2_sync_mbox_msg(&pfvf->mbox);
704 mutex_unlock(&pfvf->mbox.lock);
705
706
707 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
708 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
709 pfvf->hw.txschq_list[lvl][schq] = 0;
710 }
711 return err;
712}
713
714void otx2_sqb_flush(struct otx2_nic *pfvf)
715{
716 int qidx, sqe_tail, sqe_head;
717 u64 incr, *ptr, val;
718 int timeout = 1000;
719
720 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
721 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
722 incr = (u64)qidx << 32;
723 while (timeout) {
724 val = otx2_atomic64_add(incr, ptr);
725 sqe_head = (val >> 20) & 0x3F;
726 sqe_tail = (val >> 28) & 0x3F;
727 if (sqe_head == sqe_tail)
728 break;
729 usleep_range(1, 3);
730 timeout--;
731 }
732 }
733}
734
735
736
737
738#define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
739#define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
740
741
742
743
744
745
746
747#define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100))
748#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100))
749#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100))
750
751static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
752{
753 struct otx2_qset *qset = &pfvf->qset;
754 struct nix_aq_enq_req *aq;
755
756
757 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
758 if (!aq)
759 return -ENOMEM;
760
761 aq->rq.cq = qidx;
762 aq->rq.ena = 1;
763 aq->rq.pb_caching = 1;
764 aq->rq.lpb_aura = lpb_aura;
765 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
766 aq->rq.xqe_imm_size = 0;
767 aq->rq.flow_tagw = 32;
768 aq->rq.qint_idx = 0;
769 aq->rq.lpb_drop_ena = 1;
770 aq->rq.xqe_drop_ena = 1;
771 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
772 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
773 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
774 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
775
776
777 aq->qidx = qidx;
778 aq->ctype = NIX_AQ_CTYPE_RQ;
779 aq->op = NIX_AQ_INSTOP_INIT;
780
781 return otx2_sync_mbox_msg(&pfvf->mbox);
782}
783
784int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
785{
786 struct otx2_nic *pfvf = dev;
787 struct otx2_snd_queue *sq;
788 struct nix_aq_enq_req *aq;
789
790 sq = &pfvf->qset.sq[qidx];
791 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
792
793 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
794 if (!aq)
795 return -ENOMEM;
796
797 aq->sq.cq = pfvf->hw.rx_queues + qidx;
798 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16;
799 aq->sq.cq_ena = 1;
800 aq->sq.ena = 1;
801
802 aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
803 aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
804 aq->sq.default_chan = pfvf->hw.tx_chan_base;
805 aq->sq.sqe_stype = NIX_STYPE_STF;
806 aq->sq.sqb_aura = sqb_aura;
807 aq->sq.sq_int_ena = NIX_SQINT_BITS;
808 aq->sq.qint_idx = 0;
809
810
811
812 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
813
814
815 aq->qidx = qidx;
816 aq->ctype = NIX_AQ_CTYPE_SQ;
817 aq->op = NIX_AQ_INSTOP_INIT;
818
819 return otx2_sync_mbox_msg(&pfvf->mbox);
820}
821
822static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
823{
824 struct otx2_qset *qset = &pfvf->qset;
825 struct otx2_snd_queue *sq;
826 struct otx2_pool *pool;
827 int err;
828
829 pool = &pfvf->qset.pool[sqb_aura];
830 sq = &qset->sq[qidx];
831 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
832 sq->sqe_cnt = qset->sqe_cnt;
833
834 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
835 if (err)
836 return err;
837
838 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
839 TSO_HEADER_SIZE);
840 if (err)
841 return err;
842
843 sq->sqe_base = sq->sqe->base;
844 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
845 if (!sq->sg)
846 return -ENOMEM;
847
848 if (pfvf->ptp) {
849 err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
850 sizeof(*sq->timestamps));
851 if (err)
852 return err;
853 }
854
855 sq->head = 0;
856 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
857 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
858
859 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
860 sq->aura_id = sqb_aura;
861 sq->aura_fc_addr = pool->fc_addr->base;
862 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
863
864 sq->stats.bytes = 0;
865 sq->stats.pkts = 0;
866
867 return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
868
869}
870
871static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
872{
873 struct otx2_qset *qset = &pfvf->qset;
874 struct nix_aq_enq_req *aq;
875 struct otx2_cq_queue *cq;
876 int err, pool_id;
877
878 cq = &qset->cq[qidx];
879 cq->cq_idx = qidx;
880 if (qidx < pfvf->hw.rx_queues) {
881 cq->cq_type = CQ_RX;
882 cq->cint_idx = qidx;
883 cq->cqe_cnt = qset->rqe_cnt;
884 } else {
885 cq->cq_type = CQ_TX;
886 cq->cint_idx = qidx - pfvf->hw.rx_queues;
887 cq->cqe_cnt = qset->sqe_cnt;
888 }
889 cq->cqe_size = pfvf->qset.xqe_size;
890
891
892 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
893 if (err)
894 return err;
895
896
897 cq->cqe_base = cq->cqe->base;
898
899
900
901 pool_id = ((cq->cq_type == CQ_RX) &&
902 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
903 cq->rbpool = &qset->pool[pool_id];
904 cq->refill_task_sched = false;
905
906
907 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
908 if (!aq)
909 return -ENOMEM;
910
911 aq->cq.ena = 1;
912 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
913 aq->cq.caching = 1;
914 aq->cq.base = cq->cqe->iova;
915 aq->cq.cint_idx = cq->cint_idx;
916 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
917 aq->cq.qint_idx = 0;
918 aq->cq.avg_level = 255;
919
920 if (qidx < pfvf->hw.rx_queues) {
921 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
922 aq->cq.drop_ena = 1;
923
924 if (!is_otx2_lbkvf(pfvf->pdev)) {
925
926 aq->cq.bp_ena = 1;
927 aq->cq.bpid = pfvf->bpid[0];
928
929
930 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
931 }
932 }
933
934
935 aq->qidx = qidx;
936 aq->ctype = NIX_AQ_CTYPE_CQ;
937 aq->op = NIX_AQ_INSTOP_INIT;
938
939 return otx2_sync_mbox_msg(&pfvf->mbox);
940}
941
942static void otx2_pool_refill_task(struct work_struct *work)
943{
944 struct otx2_cq_queue *cq;
945 struct otx2_pool *rbpool;
946 struct refill_work *wrk;
947 int qidx, free_ptrs = 0;
948 struct otx2_nic *pfvf;
949 dma_addr_t bufptr;
950
951 wrk = container_of(work, struct refill_work, pool_refill_work.work);
952 pfvf = wrk->pf;
953 qidx = wrk - pfvf->refill_wrk;
954 cq = &pfvf->qset.cq[qidx];
955 rbpool = cq->rbpool;
956 free_ptrs = cq->pool_ptrs;
957
958 while (cq->pool_ptrs) {
959 if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
960
961
962
963 if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
964 struct delayed_work *dwork;
965
966 dwork = &wrk->pool_refill_work;
967 schedule_delayed_work(dwork,
968 msecs_to_jiffies(100));
969 } else {
970 cq->refill_task_sched = false;
971 }
972 return;
973 }
974 pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
975 cq->pool_ptrs--;
976 }
977 cq->refill_task_sched = false;
978}
979
980int otx2_config_nix_queues(struct otx2_nic *pfvf)
981{
982 int qidx, err;
983
984
985 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
986 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
987
988 err = otx2_rq_init(pfvf, qidx, lpb_aura);
989 if (err)
990 return err;
991 }
992
993
994 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
995 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
996
997 err = otx2_sq_init(pfvf, qidx, sqb_aura);
998 if (err)
999 return err;
1000 }
1001
1002
1003 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1004 err = otx2_cq_init(pfvf, qidx);
1005 if (err)
1006 return err;
1007 }
1008
1009
1010 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
1011 sizeof(struct refill_work), GFP_KERNEL);
1012 if (!pfvf->refill_wrk)
1013 return -ENOMEM;
1014
1015 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1016 pfvf->refill_wrk[qidx].pf = pfvf;
1017 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
1018 otx2_pool_refill_task);
1019 }
1020 return 0;
1021}
1022
1023int otx2_config_nix(struct otx2_nic *pfvf)
1024{
1025 struct nix_lf_alloc_req *nixlf;
1026 struct nix_lf_alloc_rsp *rsp;
1027 int err;
1028
1029 pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
1030
1031
1032 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
1033 if (!nixlf)
1034 return -ENOMEM;
1035
1036
1037 nixlf->rq_cnt = pfvf->hw.rx_queues;
1038 nixlf->sq_cnt = pfvf->hw.tx_queues;
1039 nixlf->cq_cnt = pfvf->qset.cq_cnt;
1040 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
1041 nixlf->rss_grps = MAX_RSS_GROUPS;
1042 nixlf->xqe_sz = NIX_XQESZ_W16;
1043
1044
1045
1046
1047 nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
1048
1049
1050
1051 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
1052
1053 err = otx2_sync_mbox_msg(&pfvf->mbox);
1054 if (err)
1055 return err;
1056
1057 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
1058 &nixlf->hdr);
1059 if (IS_ERR(rsp))
1060 return PTR_ERR(rsp);
1061
1062 if (rsp->qints < 1)
1063 return -ENXIO;
1064
1065 return rsp->hdr.rc;
1066}
1067
1068void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
1069{
1070 struct otx2_qset *qset = &pfvf->qset;
1071 struct otx2_hw *hw = &pfvf->hw;
1072 struct otx2_snd_queue *sq;
1073 int sqb, qidx;
1074 u64 iova, pa;
1075
1076 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1077 sq = &qset->sq[qidx];
1078 if (!sq->sqb_ptrs)
1079 continue;
1080 for (sqb = 0; sqb < sq->sqb_count; sqb++) {
1081 if (!sq->sqb_ptrs[sqb])
1082 continue;
1083 iova = sq->sqb_ptrs[sqb];
1084 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1085 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
1086 DMA_FROM_DEVICE,
1087 DMA_ATTR_SKIP_CPU_SYNC);
1088 put_page(virt_to_page(phys_to_virt(pa)));
1089 }
1090 sq->sqb_count = 0;
1091 }
1092}
1093
1094void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
1095{
1096 int pool_id, pool_start = 0, pool_end = 0, size = 0;
1097 u64 iova, pa;
1098
1099 if (type == AURA_NIX_SQ) {
1100 pool_start = otx2_get_pool_idx(pfvf, type, 0);
1101 pool_end = pool_start + pfvf->hw.sqpool_cnt;
1102 size = pfvf->hw.sqb_size;
1103 }
1104 if (type == AURA_NIX_RQ) {
1105 pool_start = otx2_get_pool_idx(pfvf, type, 0);
1106 pool_end = pfvf->hw.rqpool_cnt;
1107 size = pfvf->rbsize;
1108 }
1109
1110
1111 for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
1112 iova = otx2_aura_allocptr(pfvf, pool_id);
1113 while (iova) {
1114 if (type == AURA_NIX_RQ)
1115 iova -= OTX2_HEAD_ROOM;
1116
1117 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1118 dma_unmap_page_attrs(pfvf->dev, iova, size,
1119 DMA_FROM_DEVICE,
1120 DMA_ATTR_SKIP_CPU_SYNC);
1121 put_page(virt_to_page(phys_to_virt(pa)));
1122 iova = otx2_aura_allocptr(pfvf, pool_id);
1123 }
1124 }
1125}
1126
1127void otx2_aura_pool_free(struct otx2_nic *pfvf)
1128{
1129 struct otx2_pool *pool;
1130 int pool_id;
1131
1132 if (!pfvf->qset.pool)
1133 return;
1134
1135 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
1136 pool = &pfvf->qset.pool[pool_id];
1137 qmem_free(pfvf->dev, pool->stack);
1138 qmem_free(pfvf->dev, pool->fc_addr);
1139 }
1140 devm_kfree(pfvf->dev, pfvf->qset.pool);
1141 pfvf->qset.pool = NULL;
1142}
1143
1144static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1145 int pool_id, int numptrs)
1146{
1147 struct npa_aq_enq_req *aq;
1148 struct otx2_pool *pool;
1149 int err;
1150
1151 pool = &pfvf->qset.pool[pool_id];
1152
1153
1154
1155
1156 if (!pool->fc_addr) {
1157 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
1158 if (err)
1159 return err;
1160 }
1161
1162
1163 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1164 if (!aq) {
1165
1166 err = otx2_sync_mbox_msg(&pfvf->mbox);
1167 if (err)
1168 return err;
1169 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1170 if (!aq)
1171 return -ENOMEM;
1172 }
1173
1174 aq->aura_id = aura_id;
1175
1176 aq->aura.pool_addr = pool_id;
1177 aq->aura.pool_caching = 1;
1178 aq->aura.shift = ilog2(numptrs) - 8;
1179 aq->aura.count = numptrs;
1180 aq->aura.limit = numptrs;
1181 aq->aura.avg_level = 255;
1182 aq->aura.ena = 1;
1183 aq->aura.fc_ena = 1;
1184 aq->aura.fc_addr = pool->fc_addr->iova;
1185 aq->aura.fc_hyst_bits = 0;
1186
1187
1188 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
1189 aq->aura.bp_ena = 0;
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202 if (pfvf->nix_blkaddr == BLKADDR_NIX1)
1203 aq->aura.bp_ena = 1;
1204 aq->aura.nix0_bpid = pfvf->bpid[0];
1205
1206
1207 aq->aura.bp = RQ_BP_LVL_AURA;
1208 }
1209
1210
1211 aq->ctype = NPA_AQ_CTYPE_AURA;
1212 aq->op = NPA_AQ_INSTOP_INIT;
1213
1214 return 0;
1215}
1216
1217static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1218 int stack_pages, int numptrs, int buf_size)
1219{
1220 struct npa_aq_enq_req *aq;
1221 struct otx2_pool *pool;
1222 int err;
1223
1224 pool = &pfvf->qset.pool[pool_id];
1225
1226 err = qmem_alloc(pfvf->dev, &pool->stack,
1227 stack_pages, pfvf->hw.stack_pg_bytes);
1228 if (err)
1229 return err;
1230
1231 pool->rbsize = buf_size;
1232
1233
1234 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1235 if (!aq) {
1236
1237 err = otx2_sync_mbox_msg(&pfvf->mbox);
1238 if (err) {
1239 qmem_free(pfvf->dev, pool->stack);
1240 return err;
1241 }
1242 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1243 if (!aq) {
1244 qmem_free(pfvf->dev, pool->stack);
1245 return -ENOMEM;
1246 }
1247 }
1248
1249 aq->aura_id = pool_id;
1250 aq->pool.stack_base = pool->stack->iova;
1251 aq->pool.stack_caching = 1;
1252 aq->pool.ena = 1;
1253 aq->pool.buf_size = buf_size / 128;
1254 aq->pool.stack_max_pages = stack_pages;
1255 aq->pool.shift = ilog2(numptrs) - 8;
1256 aq->pool.ptr_start = 0;
1257 aq->pool.ptr_end = ~0ULL;
1258
1259
1260 aq->ctype = NPA_AQ_CTYPE_POOL;
1261 aq->op = NPA_AQ_INSTOP_INIT;
1262
1263 return 0;
1264}
1265
1266int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
1267{
1268 int qidx, pool_id, stack_pages, num_sqbs;
1269 struct otx2_qset *qset = &pfvf->qset;
1270 struct otx2_hw *hw = &pfvf->hw;
1271 struct otx2_snd_queue *sq;
1272 struct otx2_pool *pool;
1273 dma_addr_t bufptr;
1274 int err, ptr;
1275
1276
1277
1278
1279
1280
1281 num_sqbs = (hw->sqb_size / 128) - 1;
1282 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
1283
1284
1285 stack_pages =
1286 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1287
1288 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1289 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1290
1291 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
1292 if (err)
1293 goto fail;
1294
1295
1296 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1297 num_sqbs, hw->sqb_size);
1298 if (err)
1299 goto fail;
1300 }
1301
1302
1303 err = otx2_sync_mbox_msg(&pfvf->mbox);
1304 if (err)
1305 goto fail;
1306
1307
1308 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1309 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1310 pool = &pfvf->qset.pool[pool_id];
1311
1312 sq = &qset->sq[qidx];
1313 sq->sqb_count = 0;
1314 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
1315 if (!sq->sqb_ptrs)
1316 return -ENOMEM;
1317
1318 for (ptr = 0; ptr < num_sqbs; ptr++) {
1319 if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
1320 return -ENOMEM;
1321 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
1322 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
1323 }
1324 }
1325
1326 return 0;
1327fail:
1328 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1329 otx2_aura_pool_free(pfvf);
1330 return err;
1331}
1332
1333int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
1334{
1335 struct otx2_hw *hw = &pfvf->hw;
1336 int stack_pages, pool_id, rq;
1337 struct otx2_pool *pool;
1338 int err, ptr, num_ptrs;
1339 dma_addr_t bufptr;
1340
1341 num_ptrs = pfvf->qset.rqe_cnt;
1342
1343 stack_pages =
1344 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1345
1346 for (rq = 0; rq < hw->rx_queues; rq++) {
1347 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
1348
1349 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
1350 if (err)
1351 goto fail;
1352 }
1353 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1354 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1355 num_ptrs, pfvf->rbsize);
1356 if (err)
1357 goto fail;
1358 }
1359
1360
1361 err = otx2_sync_mbox_msg(&pfvf->mbox);
1362 if (err)
1363 goto fail;
1364
1365
1366 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1367 pool = &pfvf->qset.pool[pool_id];
1368 for (ptr = 0; ptr < num_ptrs; ptr++) {
1369 if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
1370 return -ENOMEM;
1371 pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
1372 bufptr + OTX2_HEAD_ROOM);
1373 }
1374 }
1375
1376 return 0;
1377fail:
1378 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1379 otx2_aura_pool_free(pfvf);
1380 return err;
1381}
1382
1383int otx2_config_npa(struct otx2_nic *pfvf)
1384{
1385 struct otx2_qset *qset = &pfvf->qset;
1386 struct npa_lf_alloc_req *npalf;
1387 struct otx2_hw *hw = &pfvf->hw;
1388 int aura_cnt;
1389
1390
1391
1392
1393
1394 if (!hw->pool_cnt)
1395 return -EINVAL;
1396
1397 qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
1398 sizeof(struct otx2_pool), GFP_KERNEL);
1399 if (!qset->pool)
1400 return -ENOMEM;
1401
1402
1403 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
1404 if (!npalf)
1405 return -ENOMEM;
1406
1407
1408 npalf->nr_pools = hw->pool_cnt;
1409 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
1410 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
1411
1412 return otx2_sync_mbox_msg(&pfvf->mbox);
1413}
1414
1415int otx2_detach_resources(struct mbox *mbox)
1416{
1417 struct rsrc_detach *detach;
1418
1419 mutex_lock(&mbox->lock);
1420 detach = otx2_mbox_alloc_msg_detach_resources(mbox);
1421 if (!detach) {
1422 mutex_unlock(&mbox->lock);
1423 return -ENOMEM;
1424 }
1425
1426
1427 detach->partial = false;
1428
1429
1430 otx2_mbox_msg_send(&mbox->mbox, 0);
1431 mutex_unlock(&mbox->lock);
1432 return 0;
1433}
1434EXPORT_SYMBOL(otx2_detach_resources);
1435
1436int otx2_attach_npa_nix(struct otx2_nic *pfvf)
1437{
1438 struct rsrc_attach *attach;
1439 struct msg_req *msix;
1440 int err;
1441
1442 mutex_lock(&pfvf->mbox.lock);
1443
1444 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
1445 if (!attach) {
1446 mutex_unlock(&pfvf->mbox.lock);
1447 return -ENOMEM;
1448 }
1449
1450 attach->npalf = true;
1451 attach->nixlf = true;
1452
1453
1454 err = otx2_sync_mbox_msg(&pfvf->mbox);
1455 if (err) {
1456 mutex_unlock(&pfvf->mbox.lock);
1457 return err;
1458 }
1459
1460 pfvf->nix_blkaddr = BLKADDR_NIX0;
1461
1462
1463
1464
1465 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
1466 pfvf->nix_blkaddr = BLKADDR_NIX1;
1467
1468
1469 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
1470 if (!msix) {
1471 mutex_unlock(&pfvf->mbox.lock);
1472 return -ENOMEM;
1473 }
1474
1475 err = otx2_sync_mbox_msg(&pfvf->mbox);
1476 if (err) {
1477 mutex_unlock(&pfvf->mbox.lock);
1478 return err;
1479 }
1480 mutex_unlock(&pfvf->mbox.lock);
1481
1482 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
1483 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
1484 dev_err(pfvf->dev,
1485 "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
1486 return -EINVAL;
1487 }
1488
1489 return 0;
1490}
1491EXPORT_SYMBOL(otx2_attach_npa_nix);
1492
1493void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
1494{
1495 struct hwctx_disable_req *req;
1496
1497 mutex_lock(&mbox->lock);
1498
1499 if (npa)
1500 req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
1501 else
1502 req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
1503
1504 if (!req) {
1505 mutex_unlock(&mbox->lock);
1506 return;
1507 }
1508
1509 req->ctype = type;
1510
1511 if (otx2_sync_mbox_msg(mbox))
1512 dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
1513 __func__);
1514
1515 mutex_unlock(&mbox->lock);
1516}
1517
1518int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
1519{
1520 struct nix_bp_cfg_req *req;
1521
1522 if (enable)
1523 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
1524 else
1525 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
1526
1527 if (!req)
1528 return -ENOMEM;
1529
1530 req->chan_base = 0;
1531 req->chan_cnt = 1;
1532 req->bpid_per_chan = 0;
1533
1534 return otx2_sync_mbox_msg(&pfvf->mbox);
1535}
1536
1537
1538void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1539 struct cgx_stats_rsp *rsp)
1540{
1541 int id;
1542
1543 for (id = 0; id < CGX_RX_STATS_COUNT; id++)
1544 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
1545 for (id = 0; id < CGX_TX_STATS_COUNT; id++)
1546 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
1547}
1548
1549void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
1550 struct cgx_fec_stats_rsp *rsp)
1551{
1552 pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
1553 pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
1554}
1555
1556void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
1557 struct nix_txsch_alloc_rsp *rsp)
1558{
1559 int lvl, schq;
1560
1561
1562 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
1563 for (schq = 0; schq < rsp->schq[lvl]; schq++)
1564 pf->hw.txschq_list[lvl][schq] =
1565 rsp->schq_list[lvl][schq];
1566}
1567EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
1568
1569void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1570 struct npa_lf_alloc_rsp *rsp)
1571{
1572 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
1573 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
1574}
1575EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
1576
1577void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1578 struct nix_lf_alloc_rsp *rsp)
1579{
1580 pfvf->hw.sqb_size = rsp->sqb_size;
1581 pfvf->hw.rx_chan_base = rsp->rx_chan_base;
1582 pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1583 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1584 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
1585 pfvf->hw.cgx_links = rsp->cgx_links;
1586 pfvf->hw.lbk_links = rsp->lbk_links;
1587 pfvf->hw.tx_link = rsp->tx_link;
1588}
1589EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
1590
1591void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1592 struct msix_offset_rsp *rsp)
1593{
1594 pfvf->hw.npa_msixoff = rsp->npa_msixoff;
1595 pfvf->hw.nix_msixoff = rsp->nix_msixoff;
1596}
1597EXPORT_SYMBOL(mbox_handler_msix_offset);
1598
1599void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1600 struct nix_bp_cfg_rsp *rsp)
1601{
1602 int chan, chan_id;
1603
1604 for (chan = 0; chan < rsp->chan_cnt; chan++) {
1605 chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
1606 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
1607 }
1608}
1609EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
1610
1611void otx2_free_cints(struct otx2_nic *pfvf, int n)
1612{
1613 struct otx2_qset *qset = &pfvf->qset;
1614 struct otx2_hw *hw = &pfvf->hw;
1615 int irq, qidx;
1616
1617 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1618 qidx < n;
1619 qidx++, irq++) {
1620 int vector = pci_irq_vector(pfvf->pdev, irq);
1621
1622 irq_set_affinity_hint(vector, NULL);
1623 free_cpumask_var(hw->affinity_mask[irq]);
1624 free_irq(vector, &qset->napi[qidx]);
1625 }
1626}
1627
1628void otx2_set_cints_affinity(struct otx2_nic *pfvf)
1629{
1630 struct otx2_hw *hw = &pfvf->hw;
1631 int vec, cpu, irq, cint;
1632
1633 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1634 cpu = cpumask_first(cpu_online_mask);
1635
1636
1637 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
1638 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
1639 return;
1640
1641 cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
1642
1643 irq = pci_irq_vector(pfvf->pdev, vec);
1644 irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
1645
1646 cpu = cpumask_next(cpu, cpu_online_mask);
1647 if (unlikely(cpu >= nr_cpu_ids))
1648 cpu = 0;
1649 }
1650}
1651
1652u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
1653{
1654 struct nix_hw_info *rsp;
1655 struct msg_req *req;
1656 u16 max_mtu;
1657 int rc;
1658
1659 mutex_lock(&pfvf->mbox.lock);
1660
1661 req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
1662 if (!req) {
1663 rc = -ENOMEM;
1664 goto out;
1665 }
1666
1667 rc = otx2_sync_mbox_msg(&pfvf->mbox);
1668 if (!rc) {
1669 rsp = (struct nix_hw_info *)
1670 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
1671
1672
1673
1674
1675
1676
1677
1678 max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
1679
1680
1681 pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu;
1682 if (!pfvf->hw.dwrr_mtu)
1683 pfvf->hw.dwrr_mtu = 1;
1684 }
1685
1686out:
1687 mutex_unlock(&pfvf->mbox.lock);
1688 if (rc) {
1689 dev_warn(pfvf->dev,
1690 "Failed to get MTU from hardware setting default value(1500)\n");
1691 max_mtu = 1500;
1692 }
1693 return max_mtu;
1694}
1695EXPORT_SYMBOL(otx2_get_max_mtu);
1696
1697#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1698int __weak \
1699otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
1700 struct _req_type *req, \
1701 struct _rsp_type *rsp) \
1702{ \
1703 \
1704 return 0; \
1705} \
1706EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
1707MBOX_UP_CGX_MESSAGES
1708#undef M
1709