1
2
3
4
5
6
7
8#include <linux/etherdevice.h>
9#include <net/ip.h>
10#include <net/tso.h>
11
12#include "otx2_reg.h"
13#include "otx2_common.h"
14#include "otx2_struct.h"
15#include "otx2_txrx.h"
16#include "otx2_ptp.h"
17#include "cn10k.h"
18
19#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
20
21static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
22{
23 struct nix_cqe_hdr_s *cqe_hdr;
24
25 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
26 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
27 return NULL;
28
29 cq->cq_head++;
30 cq->cq_head &= (cq->cqe_cnt - 1);
31
32 return cqe_hdr;
33}
34
35static unsigned int frag_num(unsigned int i)
36{
37#ifdef __BIG_ENDIAN
38 return (i & ~3) + 3 - (i & 3);
39#else
40 return i;
41#endif
42}
43
44static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
45 struct sk_buff *skb, int seg, int *len)
46{
47 const skb_frag_t *frag;
48 struct page *page;
49 int offset;
50
51
52 if (!seg) {
53 page = virt_to_page(skb->data);
54 offset = offset_in_page(skb->data);
55 *len = skb_headlen(skb);
56 } else {
57 frag = &skb_shinfo(skb)->frags[seg - 1];
58 page = skb_frag_page(frag);
59 offset = skb_frag_off(frag);
60 *len = skb_frag_size(frag);
61 }
62 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
63}
64
65static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
66{
67 int seg;
68
69 for (seg = 0; seg < sg->num_segs; seg++) {
70 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
71 sg->size[seg], DMA_TO_DEVICE);
72 }
73 sg->num_segs = 0;
74}
75
76static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
77 struct otx2_cq_queue *cq,
78 struct otx2_snd_queue *sq,
79 struct nix_cqe_tx_s *cqe,
80 int budget, int *tx_pkts, int *tx_bytes)
81{
82 struct nix_send_comp_s *snd_comp = &cqe->comp;
83 struct skb_shared_hwtstamps ts;
84 struct sk_buff *skb = NULL;
85 u64 timestamp, tsns;
86 struct sg_list *sg;
87 int err;
88
89 if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
90 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
91 pfvf->netdev->name, cq->cint_idx,
92 snd_comp->status);
93
94 sg = &sq->sg[snd_comp->sqe_id];
95 skb = (struct sk_buff *)sg->skb;
96 if (unlikely(!skb))
97 return;
98
99 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
100 timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
101 if (timestamp != 1) {
102 err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
103 if (!err) {
104 memset(&ts, 0, sizeof(ts));
105 ts.hwtstamp = ns_to_ktime(tsns);
106 skb_tstamp_tx(skb, &ts);
107 }
108 }
109 }
110
111 *tx_bytes += skb->len;
112 (*tx_pkts)++;
113 otx2_dma_unmap_skb_frags(pfvf, sg);
114 napi_consume_skb(skb, budget);
115 sg->skb = (u64)NULL;
116}
117
118static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
119 struct sk_buff *skb, void *data)
120{
121 u64 tsns;
122 int err;
123
124 if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
125 return;
126
127
128 err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
129 if (err)
130 return;
131
132 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
133}
134
135static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
136 u64 iova, int len, struct nix_rx_parse_s *parse)
137{
138 struct page *page;
139 int off = 0;
140 void *va;
141
142 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
143
144 if (likely(!skb_shinfo(skb)->nr_frags)) {
145
146
147
148
149
150 if (parse->laptr) {
151 otx2_set_rxtstamp(pfvf, skb, va);
152 off = OTX2_HW_TIMESTAMP_LEN;
153 }
154 }
155
156 page = virt_to_page(va);
157 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
158 va - page_address(page) + off, len - off, pfvf->rbsize);
159
160 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
161 pfvf->rbsize, DMA_FROM_DEVICE);
162}
163
164static void otx2_set_rxhash(struct otx2_nic *pfvf,
165 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
166{
167 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
168 struct otx2_rss_info *rss;
169 u32 hash = 0;
170
171 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
172 return;
173
174 rss = &pfvf->hw.rss_info;
175 if (rss->flowkey_cfg) {
176 if (rss->flowkey_cfg &
177 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
178 hash_type = PKT_HASH_TYPE_L4;
179 else
180 hash_type = PKT_HASH_TYPE_L3;
181 hash = cqe->hdr.flow_tag;
182 }
183 skb_set_hash(skb, hash, hash_type);
184}
185
186static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
187 int qidx)
188{
189 struct nix_rx_sg_s *sg = &cqe->sg;
190 void *end, *start;
191 u64 *seg_addr;
192 int seg;
193
194 start = (void *)sg;
195 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
196 while (start < end) {
197 sg = (struct nix_rx_sg_s *)start;
198 seg_addr = &sg->seg_addr;
199 for (seg = 0; seg < sg->segs; seg++, seg_addr++)
200 pfvf->hw_ops->aura_freeptr(pfvf, qidx,
201 *seg_addr & ~0x07ULL);
202 start += sizeof(*sg);
203 }
204}
205
206static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
207 struct nix_cqe_rx_s *cqe, int qidx)
208{
209 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
210 struct nix_rx_parse_s *parse = &cqe->parse;
211
212 if (netif_msg_rx_err(pfvf))
213 netdev_err(pfvf->netdev,
214 "RQ%d: Error pkt with errlev:0x%x errcode:0x%x\n",
215 qidx, parse->errlev, parse->errcode);
216
217 if (parse->errlev == NPC_ERRLVL_RE) {
218 switch (parse->errcode) {
219 case ERRCODE_FCS:
220 case ERRCODE_FCS_RCV:
221 atomic_inc(&stats->rx_fcs_errs);
222 break;
223 case ERRCODE_UNDERSIZE:
224 atomic_inc(&stats->rx_undersize_errs);
225 break;
226 case ERRCODE_OVERSIZE:
227 atomic_inc(&stats->rx_oversize_errs);
228 break;
229 case ERRCODE_OL2_LEN_MISMATCH:
230 atomic_inc(&stats->rx_len_errs);
231 break;
232 default:
233 atomic_inc(&stats->rx_other_errs);
234 break;
235 }
236 } else if (parse->errlev == NPC_ERRLVL_NIX) {
237 switch (parse->errcode) {
238 case ERRCODE_OL3_LEN:
239 case ERRCODE_OL4_LEN:
240 case ERRCODE_IL3_LEN:
241 case ERRCODE_IL4_LEN:
242 atomic_inc(&stats->rx_len_errs);
243 break;
244 case ERRCODE_OL4_CSUM:
245 case ERRCODE_IL4_CSUM:
246 atomic_inc(&stats->rx_csum_errs);
247 break;
248 default:
249 atomic_inc(&stats->rx_other_errs);
250 break;
251 }
252 } else {
253 atomic_inc(&stats->rx_other_errs);
254
255
256
257 return false;
258 }
259
260
261 if (pfvf->netdev->features & NETIF_F_RXALL)
262 return false;
263
264
265 if (cqe->sg.segs)
266 otx2_free_rcv_seg(pfvf, cqe, qidx);
267 return true;
268}
269
270static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
271 struct napi_struct *napi,
272 struct otx2_cq_queue *cq,
273 struct nix_cqe_rx_s *cqe)
274{
275 struct nix_rx_parse_s *parse = &cqe->parse;
276 struct nix_rx_sg_s *sg = &cqe->sg;
277 struct sk_buff *skb = NULL;
278 void *end, *start;
279 u64 *seg_addr;
280 u16 *seg_size;
281 int seg;
282
283 if (unlikely(parse->errlev || parse->errcode)) {
284 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
285 return;
286 }
287
288 skb = napi_get_frags(napi);
289 if (unlikely(!skb))
290 return;
291
292 start = (void *)sg;
293 end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
294 while (start < end) {
295 sg = (struct nix_rx_sg_s *)start;
296 seg_addr = &sg->seg_addr;
297 seg_size = (void *)sg;
298 for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
299 otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
300 parse);
301 cq->pool_ptrs++;
302 }
303 start += sizeof(*sg);
304 }
305 otx2_set_rxhash(pfvf, cqe, skb);
306
307 skb_record_rx_queue(skb, cq->cq_idx);
308 if (pfvf->netdev->features & NETIF_F_RXCSUM)
309 skb->ip_summed = CHECKSUM_UNNECESSARY;
310
311 napi_gro_frags(napi);
312}
313
314static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
315 struct napi_struct *napi,
316 struct otx2_cq_queue *cq, int budget)
317{
318 struct nix_cqe_rx_s *cqe;
319 int processed_cqe = 0;
320
321 while (likely(processed_cqe < budget)) {
322 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
323 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
324 !cqe->sg.seg_addr) {
325 if (!processed_cqe)
326 return 0;
327 break;
328 }
329 cq->cq_head++;
330 cq->cq_head &= (cq->cqe_cnt - 1);
331
332 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
333
334 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
335 cqe->sg.seg_addr = 0x00;
336 processed_cqe++;
337 }
338
339
340 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
341 ((u64)cq->cq_idx << 32) | processed_cqe);
342
343 if (unlikely(!cq->pool_ptrs))
344 return 0;
345
346 pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
347
348 return processed_cqe;
349}
350
351void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
352{
353 struct otx2_nic *pfvf = dev;
354 dma_addr_t bufptr;
355
356 while (cq->pool_ptrs) {
357 if (otx2_alloc_buffer(pfvf, cq, &bufptr))
358 break;
359 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
360 cq->pool_ptrs--;
361 }
362}
363
364static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
365 struct otx2_cq_queue *cq, int budget)
366{
367 int tx_pkts = 0, tx_bytes = 0;
368 struct nix_cqe_tx_s *cqe;
369 int processed_cqe = 0;
370
371 while (likely(processed_cqe < budget)) {
372 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
373 if (unlikely(!cqe)) {
374 if (!processed_cqe)
375 return 0;
376 break;
377 }
378 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
379 cqe, budget, &tx_pkts, &tx_bytes);
380
381 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
382 processed_cqe++;
383 }
384
385
386 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
387 ((u64)cq->cq_idx << 32) | processed_cqe);
388
389 if (likely(tx_pkts)) {
390 struct netdev_queue *txq;
391
392 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
393 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
394
395 smp_mb();
396 if (netif_tx_queue_stopped(txq) &&
397 netif_carrier_ok(pfvf->netdev))
398 netif_tx_wake_queue(txq);
399 }
400 return 0;
401}
402
403int otx2_napi_handler(struct napi_struct *napi, int budget)
404{
405 struct otx2_cq_poll *cq_poll;
406 int workdone = 0, cq_idx, i;
407 struct otx2_cq_queue *cq;
408 struct otx2_qset *qset;
409 struct otx2_nic *pfvf;
410
411 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
412 pfvf = (struct otx2_nic *)cq_poll->dev;
413 qset = &pfvf->qset;
414
415 for (i = CQS_PER_CINT - 1; i >= 0; i--) {
416 cq_idx = cq_poll->cq_ids[i];
417 if (unlikely(cq_idx == CINT_INVALID_CQ))
418 continue;
419 cq = &qset->cq[cq_idx];
420 if (cq->cq_type == CQ_RX) {
421
422
423
424 if (cq->refill_task_sched)
425 continue;
426 workdone += otx2_rx_napi_handler(pfvf, napi,
427 cq, budget);
428 } else {
429 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
430 }
431 }
432
433
434 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
435
436 if (workdone < budget && napi_complete_done(napi, workdone)) {
437
438 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
439 return workdone;
440
441
442 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
443 BIT_ULL(0));
444 }
445 return workdone;
446}
447
448void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
449 int size, int qidx)
450{
451 u64 status;
452
453
454 dma_wmb();
455
456 do {
457 memcpy(sq->lmt_addr, sq->sqe_base, size);
458 status = otx2_lmt_flush(sq->io_addr);
459 } while (status == 0);
460
461 sq->head++;
462 sq->head &= (sq->sqe_cnt - 1);
463}
464
465#define MAX_SEGS_PER_SG 3
466
467static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
468 struct sk_buff *skb, int num_segs, int *offset)
469{
470 struct nix_sqe_sg_s *sg = NULL;
471 u64 dma_addr, *iova = NULL;
472 u16 *sg_lens = NULL;
473 int seg, len;
474
475 sq->sg[sq->head].num_segs = 0;
476
477 for (seg = 0; seg < num_segs; seg++) {
478 if ((seg % MAX_SEGS_PER_SG) == 0) {
479 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
480 sg->ld_type = NIX_SEND_LDTYPE_LDD;
481 sg->subdc = NIX_SUBDC_SG;
482 sg->segs = 0;
483 sg_lens = (void *)sg;
484 iova = (void *)sg + sizeof(*sg);
485
486
487
488 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
489 *offset += sizeof(*sg) + (3 * sizeof(u64));
490 else
491 *offset += sizeof(*sg) + sizeof(u64);
492 }
493 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
494 if (dma_mapping_error(pfvf->dev, dma_addr))
495 return false;
496
497 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
498 sg->segs++;
499 *iova++ = dma_addr;
500
501
502 sq->sg[sq->head].dma_addr[seg] = dma_addr;
503 sq->sg[sq->head].size[seg] = len;
504 sq->sg[sq->head].num_segs++;
505 }
506
507 sq->sg[sq->head].skb = (u64)skb;
508 return true;
509}
510
511
512static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
513 struct sk_buff *skb, int *offset)
514{
515 struct nix_sqe_ext_s *ext;
516
517 ext = (struct nix_sqe_ext_s *)(sq->sqe_base + *offset);
518 ext->subdc = NIX_SUBDC_EXT;
519 if (skb_shinfo(skb)->gso_size) {
520 ext->lso = 1;
521 ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
522 ext->lso_mps = skb_shinfo(skb)->gso_size;
523
524
525 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
526 ext->lso_format = pfvf->hw.lso_tsov4_idx;
527
528
529
530
531
532 ip_hdr(skb)->tot_len =
533 htons(ext->lso_sb - skb_network_offset(skb));
534 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
535 ext->lso_format = pfvf->hw.lso_tsov6_idx;
536
537 ipv6_hdr(skb)->payload_len =
538 htons(ext->lso_sb - skb_network_offset(skb));
539 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
540 __be16 l3_proto = vlan_get_protocol(skb);
541 struct udphdr *udph = udp_hdr(skb);
542 u16 iplen;
543
544 ext->lso_sb = skb_transport_offset(skb) +
545 sizeof(struct udphdr);
546
547
548
549
550
551 iplen = htons(ext->lso_sb - skb_network_offset(skb));
552 if (l3_proto == htons(ETH_P_IP)) {
553 ip_hdr(skb)->tot_len = iplen;
554 ext->lso_format = pfvf->hw.lso_udpv4_idx;
555 } else {
556 ipv6_hdr(skb)->payload_len = iplen;
557 ext->lso_format = pfvf->hw.lso_udpv6_idx;
558 }
559
560 udph->len = htons(sizeof(struct udphdr));
561 }
562 } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
563 ext->tstmp = 1;
564 }
565
566#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
567 if (skb_vlan_tag_present(skb)) {
568 if (skb->vlan_proto == htons(ETH_P_8021Q)) {
569 ext->vlan1_ins_ena = 1;
570 ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
571 ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
572 } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
573 ext->vlan0_ins_ena = 1;
574 ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
575 ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
576 }
577 }
578
579 *offset += sizeof(*ext);
580}
581
582static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
583 int alg, u64 iova)
584{
585 struct nix_sqe_mem_s *mem;
586
587 mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
588 mem->subdc = NIX_SUBDC_MEM;
589 mem->alg = alg;
590 mem->wmem = 1;
591 mem->addr = iova;
592
593 *offset += sizeof(*mem);
594}
595
596
597static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
598 struct nix_sqe_hdr_s *sqe_hdr,
599 struct sk_buff *skb, u16 qidx)
600{
601 int proto = 0;
602
603
604
605
606 if (!sqe_hdr->total) {
607
608 sqe_hdr->df = 1;
609 sqe_hdr->aura = sq->aura_id;
610
611 sqe_hdr->pnc = 1;
612 sqe_hdr->sq = qidx;
613 }
614 sqe_hdr->total = skb->len;
615
616 sqe_hdr->sqe_id = sq->head;
617
618
619 if (skb->ip_summed == CHECKSUM_PARTIAL) {
620 sqe_hdr->ol3ptr = skb_network_offset(skb);
621 sqe_hdr->ol4ptr = skb_transport_offset(skb);
622
623 if (eth_type_vlan(skb->protocol))
624 skb->protocol = vlan_get_protocol(skb);
625
626 if (skb->protocol == htons(ETH_P_IP)) {
627 proto = ip_hdr(skb)->protocol;
628
629
630
631 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
632 } else if (skb->protocol == htons(ETH_P_IPV6)) {
633 proto = ipv6_hdr(skb)->nexthdr;
634 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP6;
635 }
636
637 if (proto == IPPROTO_TCP)
638 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
639 else if (proto == IPPROTO_UDP)
640 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
641 }
642}
643
644static int otx2_dma_map_tso_skb(struct otx2_nic *pfvf,
645 struct otx2_snd_queue *sq,
646 struct sk_buff *skb, int sqe, int hdr_len)
647{
648 int num_segs = skb_shinfo(skb)->nr_frags + 1;
649 struct sg_list *sg = &sq->sg[sqe];
650 u64 dma_addr;
651 int seg, len;
652
653 sg->num_segs = 0;
654
655
656 len = skb_headlen(skb) - hdr_len;
657
658 for (seg = 0; seg < num_segs; seg++) {
659
660 if (!seg && !len)
661 continue;
662 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
663 if (dma_mapping_error(pfvf->dev, dma_addr))
664 goto unmap;
665
666
667 sg->dma_addr[sg->num_segs] = dma_addr;
668 sg->size[sg->num_segs] = len;
669 sg->num_segs++;
670 }
671 return 0;
672unmap:
673 otx2_dma_unmap_skb_frags(pfvf, sg);
674 return -EINVAL;
675}
676
677static u64 otx2_tso_frag_dma_addr(struct otx2_snd_queue *sq,
678 struct sk_buff *skb, int seg,
679 u64 seg_addr, int hdr_len, int sqe)
680{
681 struct sg_list *sg = &sq->sg[sqe];
682 const skb_frag_t *frag;
683 int offset;
684
685 if (seg < 0)
686 return sg->dma_addr[0] + (seg_addr - (u64)skb->data);
687
688 frag = &skb_shinfo(skb)->frags[seg];
689 offset = seg_addr - (u64)skb_frag_address(frag);
690 if (skb_headlen(skb) - hdr_len)
691 seg++;
692 return sg->dma_addr[seg] + offset;
693}
694
695static void otx2_sqe_tso_add_sg(struct otx2_snd_queue *sq,
696 struct sg_list *list, int *offset)
697{
698 struct nix_sqe_sg_s *sg = NULL;
699 u16 *sg_lens = NULL;
700 u64 *iova = NULL;
701 int seg;
702
703
704 for (seg = 0; seg < list->num_segs; seg++) {
705 if ((seg % MAX_SEGS_PER_SG) == 0) {
706 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
707 sg->ld_type = NIX_SEND_LDTYPE_LDD;
708 sg->subdc = NIX_SUBDC_SG;
709 sg->segs = 0;
710 sg_lens = (void *)sg;
711 iova = (void *)sg + sizeof(*sg);
712
713
714
715 if ((list->num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
716 *offset += sizeof(*sg) + (3 * sizeof(u64));
717 else
718 *offset += sizeof(*sg) + sizeof(u64);
719 }
720 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = list->size[seg];
721 *iova++ = list->dma_addr[seg];
722 sg->segs++;
723 }
724}
725
726static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
727 struct sk_buff *skb, u16 qidx)
728{
729 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
730 int hdr_len, tcp_data, seg_len, pkt_len, offset;
731 struct nix_sqe_hdr_s *sqe_hdr;
732 int first_sqe = sq->head;
733 struct sg_list list;
734 struct tso_t tso;
735
736 hdr_len = tso_start(skb, &tso);
737
738
739
740
741 if (otx2_dma_map_tso_skb(pfvf, sq, skb, first_sqe, hdr_len)) {
742 dev_kfree_skb_any(skb);
743 return;
744 }
745
746 netdev_tx_sent_queue(txq, skb->len);
747
748 tcp_data = skb->len - hdr_len;
749 while (tcp_data > 0) {
750 char *hdr;
751
752 seg_len = min_t(int, skb_shinfo(skb)->gso_size, tcp_data);
753 tcp_data -= seg_len;
754
755
756 memset(sq->sqe_base, 0, sq->sqe_size);
757 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
758 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
759 offset = sizeof(*sqe_hdr);
760
761
762 hdr = sq->tso_hdrs->base + (sq->head * TSO_HEADER_SIZE);
763 tso_build_hdr(skb, hdr, &tso, seg_len, tcp_data == 0);
764 list.dma_addr[0] =
765 sq->tso_hdrs->iova + (sq->head * TSO_HEADER_SIZE);
766 list.size[0] = hdr_len;
767 list.num_segs = 1;
768
769
770 pkt_len = hdr_len;
771 while (seg_len > 0) {
772 int size;
773
774 size = min_t(int, tso.size, seg_len);
775
776 list.size[list.num_segs] = size;
777 list.dma_addr[list.num_segs] =
778 otx2_tso_frag_dma_addr(sq, skb,
779 tso.next_frag_idx - 1,
780 (u64)tso.data, hdr_len,
781 first_sqe);
782 list.num_segs++;
783 pkt_len += size;
784 seg_len -= size;
785 tso_build_data(skb, &tso, size);
786 }
787 sqe_hdr->total = pkt_len;
788 otx2_sqe_tso_add_sg(sq, &list, &offset);
789
790
791
792
793
794
795
796 if (!tcp_data) {
797 sqe_hdr->pnc = 1;
798 sqe_hdr->sqe_id = first_sqe;
799 sq->sg[first_sqe].skb = (u64)skb;
800 } else {
801 sqe_hdr->pnc = 0;
802 }
803
804 sqe_hdr->sizem1 = (offset / 16) - 1;
805
806
807 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
808 }
809}
810
811static bool is_hw_tso_supported(struct otx2_nic *pfvf,
812 struct sk_buff *skb)
813{
814 int payload_len, last_seg_size;
815
816 if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
817 return true;
818
819
820 if (!is_96xx_B0(pfvf->pdev))
821 return false;
822
823
824
825
826
827
828 payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
829 last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
830 if (last_seg_size && last_seg_size < 16)
831 return false;
832
833 return true;
834}
835
836static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
837{
838 if (!skb_shinfo(skb)->gso_size)
839 return 1;
840
841
842 if (is_hw_tso_supported(pfvf, skb))
843 return 1;
844
845
846 return skb_shinfo(skb)->gso_segs;
847}
848
849static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
850 struct otx2_snd_queue *sq, int *offset)
851{
852 u64 iova;
853
854 if (!skb_shinfo(skb)->gso_size &&
855 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
856 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
857 iova = sq->timestamps->iova + (sq->head * sizeof(u64));
858 otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
859 } else {
860 skb_tx_timestamp(skb);
861 }
862}
863
864bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
865 struct sk_buff *skb, u16 qidx)
866{
867 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
868 struct otx2_nic *pfvf = netdev_priv(netdev);
869 int offset, num_segs, free_sqe;
870 struct nix_sqe_hdr_s *sqe_hdr;
871
872
873
874
875
876 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
877
878 if (free_sqe < sq->sqe_thresh ||
879 free_sqe < otx2_get_sqe_count(pfvf, skb))
880 return false;
881
882 num_segs = skb_shinfo(skb)->nr_frags + 1;
883
884
885
886
887 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
888 if (__skb_linearize(skb)) {
889 dev_kfree_skb_any(skb);
890 return true;
891 }
892 num_segs = skb_shinfo(skb)->nr_frags + 1;
893 }
894
895 if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
896
897 if (skb_vlan_tag_present(skb))
898 skb = __vlan_hwaccel_push_inside(skb);
899 otx2_sq_append_tso(pfvf, sq, skb, qidx);
900 return true;
901 }
902
903
904
905
906 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
907 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
908 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
909 offset = sizeof(*sqe_hdr);
910
911
912 otx2_sqe_add_ext(pfvf, sq, skb, &offset);
913
914
915 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
916 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
917 return false;
918 }
919
920 otx2_set_txtstamp(pfvf, skb, sq, &offset);
921
922 sqe_hdr->sizem1 = (offset / 16) - 1;
923
924 netdev_tx_sent_queue(txq, skb->len);
925
926
927 pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
928
929 return true;
930}
931EXPORT_SYMBOL(otx2_sq_append_skb);
932
933void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
934{
935 struct nix_cqe_rx_s *cqe;
936 int processed_cqe = 0;
937 u64 iova, pa;
938
939 while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
940 if (!cqe->sg.subdc)
941 continue;
942 processed_cqe++;
943 if (cqe->sg.segs > 1) {
944 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
945 continue;
946 }
947 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
948 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
949 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
950 put_page(virt_to_page(phys_to_virt(pa)));
951 }
952
953
954 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
955 ((u64)cq->cq_idx << 32) | processed_cqe);
956}
957
958void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
959{
960 struct sk_buff *skb = NULL;
961 struct otx2_snd_queue *sq;
962 struct nix_cqe_tx_s *cqe;
963 int processed_cqe = 0;
964 struct sg_list *sg;
965
966 sq = &pfvf->qset.sq[cq->cint_idx];
967
968 while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
969 sg = &sq->sg[cqe->comp.sqe_id];
970 skb = (struct sk_buff *)sg->skb;
971 if (skb) {
972 otx2_dma_unmap_skb_frags(pfvf, sg);
973 dev_kfree_skb_any(skb);
974 sg->skb = (u64)NULL;
975 }
976 processed_cqe++;
977 }
978
979
980 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
981 ((u64)cq->cq_idx << 32) | processed_cqe);
982}
983
984int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
985{
986 struct msg_req *msg;
987 int err;
988
989 mutex_lock(&pfvf->mbox.lock);
990 if (enable)
991 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
992 else
993 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
994
995 if (!msg) {
996 mutex_unlock(&pfvf->mbox.lock);
997 return -ENOMEM;
998 }
999
1000 err = otx2_sync_mbox_msg(&pfvf->mbox);
1001 mutex_unlock(&pfvf->mbox.lock);
1002 return err;
1003}
1004