1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/bpf_trace.h>
36#include <net/udp_tunnel.h>
37#include <linux/ip.h>
38#include <net/ipv6.h>
39#include <net/tcp.h>
40#include <linux/if_ether.h>
41#include <linux/if_vlan.h>
42#include <net/ip6_checksum.h>
43#include "qede_ptp.h"
44
45#include <linux/qed/qed_if.h>
46#include "qede.h"
47
48
49
50
51int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
52{
53 struct sw_rx_data *sw_rx_data;
54 struct eth_rx_bd *rx_bd;
55 dma_addr_t mapping;
56 struct page *data;
57
58
59
60
61
62 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
63 rxq->filled_buffers--;
64 return 0;
65 }
66
67 data = alloc_pages(GFP_ATOMIC, 0);
68 if (unlikely(!data))
69 return -ENOMEM;
70
71
72
73
74 mapping = dma_map_page(rxq->dev, data, 0,
75 PAGE_SIZE, rxq->data_direction);
76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
77 __free_page(data);
78 return -ENOMEM;
79 }
80
81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
82 sw_rx_data->page_offset = 0;
83 sw_rx_data->data = data;
84 sw_rx_data->mapping = mapping;
85
86
87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
88 WARN_ON(!rx_bd);
89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
90 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
91 rxq->rx_headroom);
92
93 rxq->sw_rx_prod++;
94 rxq->filled_buffers++;
95
96 return 0;
97}
98
99
100int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
101{
102 u16 idx = txq->sw_tx_cons;
103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
104 struct eth_tx_1st_bd *first_bd;
105 struct eth_tx_bd *tx_data_bd;
106 int bds_consumed = 0;
107 int nbds;
108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
109 int i, split_bd_len = 0;
110
111 if (unlikely(!skb)) {
112 DP_ERR(edev,
113 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
114 idx, txq->sw_tx_cons, txq->sw_tx_prod);
115 return -1;
116 }
117
118 *len = skb->len;
119
120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
121
122 bds_consumed++;
123
124 nbds = first_bd->data.nbds;
125
126 if (data_split) {
127 struct eth_tx_bd *split = (struct eth_tx_bd *)
128 qed_chain_consume(&txq->tx_pbl);
129 split_bd_len = BD_UNMAP_LEN(split);
130 bds_consumed++;
131 }
132 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
133 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
134
135
136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
137 tx_data_bd = (struct eth_tx_bd *)
138 qed_chain_consume(&txq->tx_pbl);
139 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
140 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
141 }
142
143 while (bds_consumed++ < nbds)
144 qed_chain_consume(&txq->tx_pbl);
145
146
147 dev_kfree_skb_any(skb);
148 txq->sw_tx_ring.skbs[idx].skb = NULL;
149 txq->sw_tx_ring.skbs[idx].flags = 0;
150
151 return 0;
152}
153
154
155static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
156 struct eth_tx_1st_bd *first_bd,
157 int nbd, bool data_split)
158{
159 u16 idx = txq->sw_tx_prod;
160 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
161 struct eth_tx_bd *tx_data_bd;
162 int i, split_bd_len = 0;
163
164
165 qed_chain_set_prod(&txq->tx_pbl,
166 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
167
168 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
169
170 if (data_split) {
171 struct eth_tx_bd *split = (struct eth_tx_bd *)
172 qed_chain_produce(&txq->tx_pbl);
173 split_bd_len = BD_UNMAP_LEN(split);
174 nbd--;
175 }
176
177 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
178 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
179
180
181 for (i = 0; i < nbd; i++) {
182 tx_data_bd = (struct eth_tx_bd *)
183 qed_chain_produce(&txq->tx_pbl);
184 if (tx_data_bd->nbytes)
185 dma_unmap_page(txq->dev,
186 BD_UNMAP_ADDR(tx_data_bd),
187 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
188 }
189
190
191 qed_chain_set_prod(&txq->tx_pbl,
192 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
193
194
195 dev_kfree_skb_any(skb);
196 txq->sw_tx_ring.skbs[idx].skb = NULL;
197 txq->sw_tx_ring.skbs[idx].flags = 0;
198}
199
200static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
201{
202 u32 rc = XMIT_L4_CSUM;
203 __be16 l3_proto;
204
205 if (skb->ip_summed != CHECKSUM_PARTIAL)
206 return XMIT_PLAIN;
207
208 l3_proto = vlan_get_protocol(skb);
209 if (l3_proto == htons(ETH_P_IPV6) &&
210 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
211 *ipv6_ext = 1;
212
213 if (skb->encapsulation) {
214 rc |= XMIT_ENC;
215 if (skb_is_gso(skb)) {
216 unsigned short gso_type = skb_shinfo(skb)->gso_type;
217
218 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
219 (gso_type & SKB_GSO_GRE_CSUM))
220 rc |= XMIT_ENC_GSO_L4_CSUM;
221
222 rc |= XMIT_LSO;
223 return rc;
224 }
225 }
226
227 if (skb_is_gso(skb))
228 rc |= XMIT_LSO;
229
230 return rc;
231}
232
233static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
234 struct eth_tx_2nd_bd *second_bd,
235 struct eth_tx_3rd_bd *third_bd)
236{
237 u8 l4_proto;
238 u16 bd2_bits1 = 0, bd2_bits2 = 0;
239
240 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
241
242 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
243 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
244 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
245
246 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
247 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
248
249 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
250 l4_proto = ipv6_hdr(skb)->nexthdr;
251 else
252 l4_proto = ip_hdr(skb)->protocol;
253
254 if (l4_proto == IPPROTO_UDP)
255 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
256
257 if (third_bd)
258 third_bd->data.bitfields |=
259 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
261 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
262
263 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
264 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
265}
266
267static int map_frag_to_bd(struct qede_tx_queue *txq,
268 skb_frag_t *frag, struct eth_tx_bd *bd)
269{
270 dma_addr_t mapping;
271
272
273 mapping = skb_frag_dma_map(txq->dev, frag, 0,
274 skb_frag_size(frag), DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(txq->dev, mapping)))
276 return -ENOMEM;
277
278
279 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
280
281 return 0;
282}
283
284static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
285{
286 if (is_encap_pkt)
287 return (skb_inner_transport_header(skb) +
288 inner_tcp_hdrlen(skb) - skb->data);
289 else
290 return (skb_transport_header(skb) +
291 tcp_hdrlen(skb) - skb->data);
292}
293
294
295#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
296static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
297{
298 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
299
300 if (xmit_type & XMIT_LSO) {
301 int hlen;
302
303 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
304
305
306 if (skb_headlen(skb) > hlen)
307 allowed_frags--;
308 }
309
310 return (skb_shinfo(skb)->nr_frags > allowed_frags);
311}
312#endif
313
314static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
315{
316
317
318
319 wmb();
320 barrier();
321 writel(txq->tx_db.raw, txq->doorbell_addr);
322
323
324
325
326
327
328
329 mmiowb();
330}
331
332static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
333 struct sw_rx_data *metadata, u16 padding, u16 length)
334{
335 struct qede_tx_queue *txq = fp->xdp_tx;
336 struct eth_tx_1st_bd *first_bd;
337 u16 idx = txq->sw_tx_prod;
338 u16 val;
339
340 if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
341 txq->stopped_cnt++;
342 return -ENOMEM;
343 }
344
345 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
346
347 memset(first_bd, 0, sizeof(*first_bd));
348 first_bd->data.bd_flags.bitfields =
349 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
350
351 val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
352 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
353
354 first_bd->data.bitfields |= cpu_to_le16(val);
355 first_bd->data.nbds = 1;
356
357
358 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
359
360
361
362
363 dma_sync_single_for_device(&edev->pdev->dev,
364 metadata->mapping + padding,
365 length, PCI_DMA_TODEVICE);
366
367 txq->sw_tx_ring.xdp[idx].page = metadata->data;
368 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
369 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
370
371
372 fp->xdp_xmit = 1;
373
374 return 0;
375}
376
377int qede_txq_has_work(struct qede_tx_queue *txq)
378{
379 u16 hw_bd_cons;
380
381
382 barrier();
383 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
384 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
385 return 0;
386
387 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
388}
389
390static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
391{
392 u16 hw_bd_cons, idx;
393
394 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
395 barrier();
396
397 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
398 qed_chain_consume(&txq->tx_pbl);
399 idx = txq->sw_tx_cons;
400
401 dma_unmap_page(&edev->pdev->dev,
402 txq->sw_tx_ring.xdp[idx].mapping,
403 PAGE_SIZE, DMA_BIDIRECTIONAL);
404 __free_page(txq->sw_tx_ring.xdp[idx].page);
405
406 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
407 txq->xmit_pkts++;
408 }
409}
410
411static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
412{
413 struct netdev_queue *netdev_txq;
414 u16 hw_bd_cons;
415 unsigned int pkts_compl = 0, bytes_compl = 0;
416 int rc;
417
418 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
419
420 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
421 barrier();
422
423 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
424 int len = 0;
425
426 rc = qede_free_tx_pkt(edev, txq, &len);
427 if (rc) {
428 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
429 hw_bd_cons,
430 qed_chain_get_cons_idx(&txq->tx_pbl));
431 break;
432 }
433
434 bytes_compl += len;
435 pkts_compl++;
436 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
437 txq->xmit_pkts++;
438 }
439
440 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
441
442
443
444
445
446
447
448
449
450
451 smp_mb();
452
453 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
454
455
456
457
458
459
460
461
462
463
464 __netif_tx_lock(netdev_txq, smp_processor_id());
465
466 if ((netif_tx_queue_stopped(netdev_txq)) &&
467 (edev->state == QEDE_STATE_OPEN) &&
468 (qed_chain_get_elem_left(&txq->tx_pbl)
469 >= (MAX_SKB_FRAGS + 1))) {
470 netif_tx_wake_queue(netdev_txq);
471 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
472 "Wake queue was called\n");
473 }
474
475 __netif_tx_unlock(netdev_txq);
476 }
477
478 return 0;
479}
480
481bool qede_has_rx_work(struct qede_rx_queue *rxq)
482{
483 u16 hw_comp_cons, sw_comp_cons;
484
485
486 barrier();
487
488 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
489 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
490
491 return hw_comp_cons != sw_comp_cons;
492}
493
494static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
495{
496 qed_chain_consume(&rxq->rx_bd_ring);
497 rxq->sw_rx_cons++;
498}
499
500
501
502
503static inline void qede_reuse_page(struct qede_rx_queue *rxq,
504 struct sw_rx_data *curr_cons)
505{
506 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
507 struct sw_rx_data *curr_prod;
508 dma_addr_t new_mapping;
509
510 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
511 *curr_prod = *curr_cons;
512
513 new_mapping = curr_prod->mapping + curr_prod->page_offset;
514
515 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
516 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
517 rxq->rx_headroom);
518
519 rxq->sw_rx_prod++;
520 curr_cons->data = NULL;
521}
522
523
524
525
526void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
527{
528 struct sw_rx_data *curr_cons;
529
530 for (; count > 0; count--) {
531 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
532 qede_reuse_page(rxq, curr_cons);
533 qede_rx_bd_ring_consume(rxq);
534 }
535}
536
537static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
538 struct sw_rx_data *curr_cons)
539{
540
541 curr_cons->page_offset += rxq->rx_buf_seg_size;
542
543 if (curr_cons->page_offset == PAGE_SIZE) {
544 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
545
546
547
548 curr_cons->page_offset -= rxq->rx_buf_seg_size;
549
550 return -ENOMEM;
551 }
552
553 dma_unmap_page(rxq->dev, curr_cons->mapping,
554 PAGE_SIZE, rxq->data_direction);
555 } else {
556
557
558
559
560 page_ref_inc(curr_cons->data);
561 qede_reuse_page(rxq, curr_cons);
562 }
563
564 return 0;
565}
566
567void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
568{
569 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
570 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
571 struct eth_rx_prod_data rx_prods = {0};
572
573
574 rx_prods.bd_prod = cpu_to_le16(bd_prod);
575 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
576
577
578
579
580
581 wmb();
582
583 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
584 (u32 *)&rx_prods);
585
586
587
588
589
590
591
592 mmiowb();
593}
594
595static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
596{
597 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
598 enum rss_hash_type htype;
599 u32 hash = 0;
600
601 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
602 if (htype) {
603 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
604 (htype == RSS_HASH_TYPE_IPV6)) ?
605 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
606 hash = le32_to_cpu(rss_hash);
607 }
608 skb_set_hash(skb, hash, hash_type);
609}
610
611static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
612{
613 skb_checksum_none_assert(skb);
614
615 if (csum_flag & QEDE_CSUM_UNNECESSARY)
616 skb->ip_summed = CHECKSUM_UNNECESSARY;
617
618 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
619 skb->csum_level = 1;
620 skb->encapsulation = 1;
621 }
622}
623
624static inline void qede_skb_receive(struct qede_dev *edev,
625 struct qede_fastpath *fp,
626 struct qede_rx_queue *rxq,
627 struct sk_buff *skb, u16 vlan_tag)
628{
629 if (vlan_tag)
630 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
631
632 napi_gro_receive(&fp->napi, skb);
633}
634
635static void qede_set_gro_params(struct qede_dev *edev,
636 struct sk_buff *skb,
637 struct eth_fast_path_rx_tpa_start_cqe *cqe)
638{
639 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
640
641 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
642 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
643 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
644 else
645 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
646
647 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
648 cqe->header_len;
649}
650
651static int qede_fill_frag_skb(struct qede_dev *edev,
652 struct qede_rx_queue *rxq,
653 u8 tpa_agg_index, u16 len_on_bd)
654{
655 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
656 NUM_RX_BDS_MAX];
657 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
658 struct sk_buff *skb = tpa_info->skb;
659
660 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
661 goto out;
662
663
664 skb_fill_page_desc(skb, tpa_info->frag_id++,
665 current_bd->data, current_bd->page_offset,
666 len_on_bd);
667
668 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
669
670
671
672 page_ref_inc(current_bd->data);
673 goto out;
674 }
675
676 qed_chain_consume(&rxq->rx_bd_ring);
677 rxq->sw_rx_cons++;
678
679 skb->data_len += len_on_bd;
680 skb->truesize += rxq->rx_buf_seg_size;
681 skb->len += len_on_bd;
682
683 return 0;
684
685out:
686 tpa_info->state = QEDE_AGG_STATE_ERROR;
687 qede_recycle_rx_bd_ring(rxq, 1);
688
689 return -ENOMEM;
690}
691
692static bool qede_tunn_exist(u16 flag)
693{
694 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
695 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
696}
697
698static u8 qede_check_tunn_csum(u16 flag)
699{
700 u16 csum_flag = 0;
701 u8 tcsum = 0;
702
703 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
704 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
705 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
706 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
707
708 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
709 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
710 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
711 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
712 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
713 }
714
715 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
716 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
717 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
718 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
719
720 if (csum_flag & flag)
721 return QEDE_CSUM_ERROR;
722
723 return QEDE_CSUM_UNNECESSARY | tcsum;
724}
725
726static void qede_tpa_start(struct qede_dev *edev,
727 struct qede_rx_queue *rxq,
728 struct eth_fast_path_rx_tpa_start_cqe *cqe)
729{
730 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
731 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
732 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
733 struct sw_rx_data *replace_buf = &tpa_info->buffer;
734 dma_addr_t mapping = tpa_info->buffer_mapping;
735 struct sw_rx_data *sw_rx_data_cons;
736 struct sw_rx_data *sw_rx_data_prod;
737
738 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
739 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
740
741
742
743
744
745 sw_rx_data_prod->mapping = replace_buf->mapping;
746
747 sw_rx_data_prod->data = replace_buf->data;
748 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
749 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
750 sw_rx_data_prod->page_offset = replace_buf->page_offset;
751
752 rxq->sw_rx_prod++;
753
754
755
756
757 tpa_info->buffer = *sw_rx_data_cons;
758 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
759 le32_to_cpu(rx_bd_cons->addr.lo));
760
761 tpa_info->buffer_mapping = mapping;
762 rxq->sw_rx_cons++;
763
764
765
766
767
768 tpa_info->skb = netdev_alloc_skb(edev->ndev,
769 le16_to_cpu(cqe->len_on_first_bd));
770 if (unlikely(!tpa_info->skb)) {
771 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
772 tpa_info->state = QEDE_AGG_STATE_ERROR;
773 goto cons_buf;
774 }
775
776
777 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
778 tpa_info->frag_id = 0;
779 tpa_info->state = QEDE_AGG_STATE_START;
780
781
782 tpa_info->start_cqe_placement_offset = cqe->placement_offset;
783 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
784 if ((le16_to_cpu(cqe->pars_flags.flags) >>
785 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
786 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
787 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
788 else
789 tpa_info->vlan_tag = 0;
790
791 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
792
793
794 qede_set_gro_params(edev, tpa_info->skb, cqe);
795
796cons_buf:
797 if (likely(cqe->ext_bd_len_list[0]))
798 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
799 le16_to_cpu(cqe->ext_bd_len_list[0]));
800
801 if (unlikely(cqe->ext_bd_len_list[1])) {
802 DP_ERR(edev,
803 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
804 tpa_info->state = QEDE_AGG_STATE_ERROR;
805 }
806}
807
808#ifdef CONFIG_INET
809static void qede_gro_ip_csum(struct sk_buff *skb)
810{
811 const struct iphdr *iph = ip_hdr(skb);
812 struct tcphdr *th;
813
814 skb_set_transport_header(skb, sizeof(struct iphdr));
815 th = tcp_hdr(skb);
816
817 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
818 iph->saddr, iph->daddr, 0);
819
820 tcp_gro_complete(skb);
821}
822
823static void qede_gro_ipv6_csum(struct sk_buff *skb)
824{
825 struct ipv6hdr *iph = ipv6_hdr(skb);
826 struct tcphdr *th;
827
828 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
829 th = tcp_hdr(skb);
830
831 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
832 &iph->saddr, &iph->daddr, 0);
833 tcp_gro_complete(skb);
834}
835#endif
836
837static void qede_gro_receive(struct qede_dev *edev,
838 struct qede_fastpath *fp,
839 struct sk_buff *skb,
840 u16 vlan_tag)
841{
842
843
844
845
846
847 if (unlikely(!skb->data_len)) {
848 skb_shinfo(skb)->gso_type = 0;
849 skb_shinfo(skb)->gso_size = 0;
850 goto send_skb;
851 }
852
853#ifdef CONFIG_INET
854 if (skb_shinfo(skb)->gso_size) {
855 skb_reset_network_header(skb);
856
857 switch (skb->protocol) {
858 case htons(ETH_P_IP):
859 qede_gro_ip_csum(skb);
860 break;
861 case htons(ETH_P_IPV6):
862 qede_gro_ipv6_csum(skb);
863 break;
864 default:
865 DP_ERR(edev,
866 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
867 ntohs(skb->protocol));
868 }
869 }
870#endif
871
872send_skb:
873 skb_record_rx_queue(skb, fp->rxq->rxq_id);
874 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
875}
876
877static inline void qede_tpa_cont(struct qede_dev *edev,
878 struct qede_rx_queue *rxq,
879 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
880{
881 int i;
882
883 for (i = 0; cqe->len_list[i]; i++)
884 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
885 le16_to_cpu(cqe->len_list[i]));
886
887 if (unlikely(i > 1))
888 DP_ERR(edev,
889 "Strange - TPA cont with more than a single len_list entry\n");
890}
891
892static int qede_tpa_end(struct qede_dev *edev,
893 struct qede_fastpath *fp,
894 struct eth_fast_path_rx_tpa_end_cqe *cqe)
895{
896 struct qede_rx_queue *rxq = fp->rxq;
897 struct qede_agg_info *tpa_info;
898 struct sk_buff *skb;
899 int i;
900
901 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
902 skb = tpa_info->skb;
903
904 for (i = 0; cqe->len_list[i]; i++)
905 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
906 le16_to_cpu(cqe->len_list[i]));
907 if (unlikely(i > 1))
908 DP_ERR(edev,
909 "Strange - TPA emd with more than a single len_list entry\n");
910
911 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
912 goto err;
913
914
915 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
916 DP_ERR(edev,
917 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
918 cqe->num_of_bds, tpa_info->frag_id);
919 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
920 DP_ERR(edev,
921 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
922 le16_to_cpu(cqe->total_packet_len), skb->len);
923
924 memcpy(skb->data,
925 page_address(tpa_info->buffer.data) +
926 tpa_info->start_cqe_placement_offset +
927 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
928
929
930 skb->protocol = eth_type_trans(skb, edev->ndev);
931 skb->ip_summed = CHECKSUM_UNNECESSARY;
932
933
934
935
936 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
937
938 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
939
940 tpa_info->state = QEDE_AGG_STATE_NONE;
941
942 return 1;
943err:
944 tpa_info->state = QEDE_AGG_STATE_NONE;
945 dev_kfree_skb_any(tpa_info->skb);
946 tpa_info->skb = NULL;
947 return 0;
948}
949
950static u8 qede_check_notunn_csum(u16 flag)
951{
952 u16 csum_flag = 0;
953 u8 csum = 0;
954
955 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
956 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
957 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
958 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
959 csum = QEDE_CSUM_UNNECESSARY;
960 }
961
962 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
963 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
964
965 if (csum_flag & flag)
966 return QEDE_CSUM_ERROR;
967
968 return csum;
969}
970
971static u8 qede_check_csum(u16 flag)
972{
973 if (!qede_tunn_exist(flag))
974 return qede_check_notunn_csum(flag);
975 else
976 return qede_check_tunn_csum(flag);
977}
978
979static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
980 u16 flag)
981{
982 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
983
984 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
985 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
986 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
987 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
988 return true;
989
990 return false;
991}
992
993
994static bool qede_rx_xdp(struct qede_dev *edev,
995 struct qede_fastpath *fp,
996 struct qede_rx_queue *rxq,
997 struct bpf_prog *prog,
998 struct sw_rx_data *bd,
999 struct eth_fast_path_rx_reg_cqe *cqe,
1000 u16 *data_offset, u16 *len)
1001{
1002 struct xdp_buff xdp;
1003 enum xdp_action act;
1004
1005 xdp.data_hard_start = page_address(bd->data);
1006 xdp.data = xdp.data_hard_start + *data_offset;
1007 xdp_set_data_meta_invalid(&xdp);
1008 xdp.data_end = xdp.data + *len;
1009
1010
1011
1012
1013
1014 rcu_read_lock();
1015 act = bpf_prog_run_xdp(prog, &xdp);
1016 rcu_read_unlock();
1017
1018
1019 *data_offset = xdp.data - xdp.data_hard_start;
1020 *len = xdp.data_end - xdp.data;
1021
1022 if (act == XDP_PASS)
1023 return true;
1024
1025
1026 rxq->xdp_no_pass++;
1027
1028 switch (act) {
1029 case XDP_TX:
1030
1031 if (qede_alloc_rx_buffer(rxq, true)) {
1032 qede_recycle_rx_bd_ring(rxq, 1);
1033 trace_xdp_exception(edev->ndev, prog, act);
1034 return false;
1035 }
1036
1037
1038
1039
1040 if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
1041 dma_unmap_page(rxq->dev, bd->mapping,
1042 PAGE_SIZE, DMA_BIDIRECTIONAL);
1043 __free_page(bd->data);
1044 trace_xdp_exception(edev->ndev, prog, act);
1045 }
1046
1047
1048 qede_rx_bd_ring_consume(rxq);
1049 return false;
1050
1051 default:
1052 bpf_warn_invalid_xdp_action(act);
1053 case XDP_ABORTED:
1054 trace_xdp_exception(edev->ndev, prog, act);
1055 case XDP_DROP:
1056 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1057 }
1058
1059 return false;
1060}
1061
1062static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1063 struct qede_rx_queue *rxq,
1064 struct sw_rx_data *bd, u16 len,
1065 u16 pad)
1066{
1067 unsigned int offset = bd->page_offset + pad;
1068 struct skb_frag_struct *frag;
1069 struct page *page = bd->data;
1070 unsigned int pull_len;
1071 struct sk_buff *skb;
1072 unsigned char *va;
1073
1074
1075 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1076 if (unlikely(!skb))
1077 return NULL;
1078
1079
1080
1081
1082 if (len + pad <= edev->rx_copybreak) {
1083 skb_put_data(skb, page_address(page) + offset, len);
1084 qede_reuse_page(rxq, bd);
1085 goto out;
1086 }
1087
1088 frag = &skb_shinfo(skb)->frags[0];
1089
1090 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1091 page, offset, len, rxq->rx_buf_seg_size);
1092
1093 va = skb_frag_address(frag);
1094 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1095
1096
1097 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1098
1099
1100 skb_frag_size_sub(frag, pull_len);
1101 frag->page_offset += pull_len;
1102 skb->data_len -= pull_len;
1103 skb->tail += pull_len;
1104
1105 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
1106
1107
1108
1109
1110 page_ref_inc(page);
1111 dev_kfree_skb_any(skb);
1112 return NULL;
1113 }
1114
1115out:
1116
1117 qede_rx_bd_ring_consume(rxq);
1118 return skb;
1119}
1120
1121static int qede_rx_build_jumbo(struct qede_dev *edev,
1122 struct qede_rx_queue *rxq,
1123 struct sk_buff *skb,
1124 struct eth_fast_path_rx_reg_cqe *cqe,
1125 u16 first_bd_len)
1126{
1127 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1128 struct sw_rx_data *bd;
1129 u16 bd_cons_idx;
1130 u8 num_frags;
1131
1132 pkt_len -= first_bd_len;
1133
1134
1135 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1136 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1137 pkt_len;
1138
1139 if (unlikely(!cur_size)) {
1140 DP_ERR(edev,
1141 "Still got %d BDs for mapping jumbo, but length became 0\n",
1142 num_frags);
1143 goto out;
1144 }
1145
1146
1147 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1148 goto out;
1149
1150
1151
1152
1153 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1154 bd = &rxq->sw_rx_ring[bd_cons_idx];
1155 qede_rx_bd_ring_consume(rxq);
1156
1157 dma_unmap_page(rxq->dev, bd->mapping,
1158 PAGE_SIZE, DMA_FROM_DEVICE);
1159
1160 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1161 bd->data, 0, cur_size);
1162
1163 skb->truesize += PAGE_SIZE;
1164 skb->data_len += cur_size;
1165 skb->len += cur_size;
1166 pkt_len -= cur_size;
1167 }
1168
1169 if (unlikely(pkt_len))
1170 DP_ERR(edev,
1171 "Mapped all BDs of jumbo, but still have %d bytes\n",
1172 pkt_len);
1173
1174out:
1175 return num_frags;
1176}
1177
1178static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1179 struct qede_fastpath *fp,
1180 struct qede_rx_queue *rxq,
1181 union eth_rx_cqe *cqe,
1182 enum eth_rx_cqe_type type)
1183{
1184 switch (type) {
1185 case ETH_RX_CQE_TYPE_TPA_START:
1186 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1187 return 0;
1188 case ETH_RX_CQE_TYPE_TPA_CONT:
1189 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1190 return 0;
1191 case ETH_RX_CQE_TYPE_TPA_END:
1192 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1193 default:
1194 return 0;
1195 }
1196}
1197
1198static int qede_rx_process_cqe(struct qede_dev *edev,
1199 struct qede_fastpath *fp,
1200 struct qede_rx_queue *rxq)
1201{
1202 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1203 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1204 u16 len, pad, bd_cons_idx, parse_flag;
1205 enum eth_rx_cqe_type cqe_type;
1206 union eth_rx_cqe *cqe;
1207 struct sw_rx_data *bd;
1208 struct sk_buff *skb;
1209 __le16 flags;
1210 u8 csum_flag;
1211
1212
1213 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1214 cqe_type = cqe->fast_path_regular.type;
1215
1216
1217 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1218 struct eth_slow_path_rx_cqe *sp_cqe;
1219
1220 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1221 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1222 return 0;
1223 }
1224
1225
1226 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1227 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1228
1229
1230
1231
1232 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1233 bd = &rxq->sw_rx_ring[bd_cons_idx];
1234
1235 fp_cqe = &cqe->fast_path_regular;
1236 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1237 pad = fp_cqe->placement_offset + rxq->rx_headroom;
1238
1239
1240 if (xdp_prog)
1241 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1242 &pad, &len))
1243 return 0;
1244
1245
1246 flags = cqe->fast_path_regular.pars_flags.flags;
1247 parse_flag = le16_to_cpu(flags);
1248
1249 csum_flag = qede_check_csum(parse_flag);
1250 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1251 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
1252 rxq->rx_ip_frags++;
1253 } else {
1254 DP_NOTICE(edev,
1255 "CQE has error, flags = %x, dropping incoming packet\n",
1256 parse_flag);
1257 rxq->rx_hw_errors++;
1258 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1259 return 0;
1260 }
1261 }
1262
1263
1264
1265
1266 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
1267 if (!skb) {
1268 rxq->rx_alloc_errors++;
1269 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1270 return 0;
1271 }
1272
1273
1274
1275
1276 if (fp_cqe->bd_num > 1) {
1277 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1278 fp_cqe, len);
1279
1280 if (unlikely(unmapped_frags > 0)) {
1281 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1282 dev_kfree_skb_any(skb);
1283 return 0;
1284 }
1285 }
1286
1287
1288 skb->protocol = eth_type_trans(skb, edev->ndev);
1289 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1290 qede_set_skb_csum(skb, csum_flag);
1291 skb_record_rx_queue(skb, rxq->rxq_id);
1292 qede_ptp_record_rx_ts(edev, cqe, skb);
1293
1294
1295 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1296
1297 return 1;
1298}
1299
1300static int qede_rx_int(struct qede_fastpath *fp, int budget)
1301{
1302 struct qede_rx_queue *rxq = fp->rxq;
1303 struct qede_dev *edev = fp->edev;
1304 int work_done = 0, rcv_pkts = 0;
1305 u16 hw_comp_cons, sw_comp_cons;
1306
1307 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1308 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1309
1310
1311
1312
1313
1314
1315 rmb();
1316
1317
1318 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1319 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1320 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1321 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1322 work_done++;
1323 }
1324
1325 rxq->rcv_pkts += rcv_pkts;
1326
1327
1328 while (rxq->num_rx_buffers - rxq->filled_buffers)
1329 if (qede_alloc_rx_buffer(rxq, false))
1330 break;
1331
1332
1333 qede_update_rx_prod(edev, rxq);
1334
1335 return work_done;
1336}
1337
1338static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1339{
1340 qed_sb_update_sb_idx(fp->sb_info);
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 rmb();
1353
1354 if (likely(fp->type & QEDE_FASTPATH_RX))
1355 if (qede_has_rx_work(fp->rxq))
1356 return true;
1357
1358 if (fp->type & QEDE_FASTPATH_XDP)
1359 if (qede_txq_has_work(fp->xdp_tx))
1360 return true;
1361
1362 if (likely(fp->type & QEDE_FASTPATH_TX))
1363 if (qede_txq_has_work(fp->txq))
1364 return true;
1365
1366 return false;
1367}
1368
1369
1370
1371
1372int qede_poll(struct napi_struct *napi, int budget)
1373{
1374 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1375 napi);
1376 struct qede_dev *edev = fp->edev;
1377 int rx_work_done = 0;
1378
1379 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1380 qede_tx_int(edev, fp->txq);
1381
1382 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1383 qede_xdp_tx_int(edev, fp->xdp_tx);
1384
1385 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1386 qede_has_rx_work(fp->rxq)) ?
1387 qede_rx_int(fp, budget) : 0;
1388 if (rx_work_done < budget) {
1389 if (!qede_poll_is_more_work(fp)) {
1390 napi_complete_done(napi, rx_work_done);
1391
1392
1393 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1394 } else {
1395 rx_work_done = budget;
1396 }
1397 }
1398
1399 if (fp->xdp_xmit) {
1400 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1401
1402 fp->xdp_xmit = 0;
1403 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1404 qede_update_tx_producer(fp->xdp_tx);
1405 }
1406
1407 return rx_work_done;
1408}
1409
1410irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1411{
1412 struct qede_fastpath *fp = fp_cookie;
1413
1414 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 );
1415
1416 napi_schedule_irqoff(&fp->napi);
1417 return IRQ_HANDLED;
1418}
1419
1420
1421netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1422{
1423 struct qede_dev *edev = netdev_priv(ndev);
1424 struct netdev_queue *netdev_txq;
1425 struct qede_tx_queue *txq;
1426 struct eth_tx_1st_bd *first_bd;
1427 struct eth_tx_2nd_bd *second_bd = NULL;
1428 struct eth_tx_3rd_bd *third_bd = NULL;
1429 struct eth_tx_bd *tx_data_bd = NULL;
1430 u16 txq_index, val = 0;
1431 u8 nbd = 0;
1432 dma_addr_t mapping;
1433 int rc, frag_idx = 0, ipv6_ext = 0;
1434 u8 xmit_type;
1435 u16 idx;
1436 u16 hlen;
1437 bool data_split = false;
1438
1439
1440 txq_index = skb_get_queue_mapping(skb);
1441 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
1442 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
1443 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1444
1445 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1446
1447 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1448
1449#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1450 if (qede_pkt_req_lin(skb, xmit_type)) {
1451 if (skb_linearize(skb)) {
1452 DP_NOTICE(edev,
1453 "SKB linearization failed - silently dropping this SKB\n");
1454 dev_kfree_skb_any(skb);
1455 return NETDEV_TX_OK;
1456 }
1457 }
1458#endif
1459
1460
1461 idx = txq->sw_tx_prod;
1462 txq->sw_tx_ring.skbs[idx].skb = skb;
1463 first_bd = (struct eth_tx_1st_bd *)
1464 qed_chain_produce(&txq->tx_pbl);
1465 memset(first_bd, 0, sizeof(*first_bd));
1466 first_bd->data.bd_flags.bitfields =
1467 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1468
1469 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1470 qede_ptp_tx_ts(edev, skb);
1471
1472
1473 mapping = dma_map_single(txq->dev, skb->data,
1474 skb_headlen(skb), DMA_TO_DEVICE);
1475 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1476 DP_NOTICE(edev, "SKB mapping failed\n");
1477 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1478 qede_update_tx_producer(txq);
1479 return NETDEV_TX_OK;
1480 }
1481 nbd++;
1482 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1483
1484
1485
1486
1487 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1488 second_bd = (struct eth_tx_2nd_bd *)
1489 qed_chain_produce(&txq->tx_pbl);
1490 memset(second_bd, 0, sizeof(*second_bd));
1491
1492 nbd++;
1493 third_bd = (struct eth_tx_3rd_bd *)
1494 qed_chain_produce(&txq->tx_pbl);
1495 memset(third_bd, 0, sizeof(*third_bd));
1496
1497 nbd++;
1498
1499 tx_data_bd = (struct eth_tx_bd *)second_bd;
1500 }
1501
1502 if (skb_vlan_tag_present(skb)) {
1503 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1504 first_bd->data.bd_flags.bitfields |=
1505 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1506 }
1507
1508
1509 if (xmit_type & XMIT_L4_CSUM) {
1510
1511
1512
1513 first_bd->data.bd_flags.bitfields |=
1514 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1515
1516 if (xmit_type & XMIT_ENC) {
1517 first_bd->data.bd_flags.bitfields |=
1518 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1519
1520 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1521 }
1522
1523
1524
1525
1526
1527 if (unlikely(txq->is_legacy))
1528 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1529
1530
1531
1532
1533
1534 if (unlikely(ipv6_ext))
1535 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1536 }
1537
1538 if (xmit_type & XMIT_LSO) {
1539 first_bd->data.bd_flags.bitfields |=
1540 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1541 third_bd->data.lso_mss =
1542 cpu_to_le16(skb_shinfo(skb)->gso_size);
1543
1544 if (unlikely(xmit_type & XMIT_ENC)) {
1545 first_bd->data.bd_flags.bitfields |=
1546 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1547
1548 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1549 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1550
1551 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1552 }
1553 hlen = qede_get_skb_hlen(skb, true);
1554 } else {
1555 first_bd->data.bd_flags.bitfields |=
1556 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1557 hlen = qede_get_skb_hlen(skb, false);
1558 }
1559
1560
1561 third_bd->data.bitfields |=
1562 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1563
1564
1565
1566
1567 if (unlikely(skb_headlen(skb) > hlen)) {
1568 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1569 "TSO split header size is %d (%x:%x)\n",
1570 first_bd->nbytes, first_bd->addr.hi,
1571 first_bd->addr.lo);
1572
1573 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1574 le32_to_cpu(first_bd->addr.lo)) +
1575 hlen;
1576
1577 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1578 le16_to_cpu(first_bd->nbytes) -
1579 hlen);
1580
1581
1582
1583
1584 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1585
1586 first_bd->nbytes = cpu_to_le16(hlen);
1587
1588 tx_data_bd = (struct eth_tx_bd *)third_bd;
1589 data_split = true;
1590 }
1591 } else {
1592 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1593 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1594 }
1595
1596 first_bd->data.bitfields = cpu_to_le16(val);
1597
1598
1599
1600 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1601 rc = map_frag_to_bd(txq,
1602 &skb_shinfo(skb)->frags[frag_idx],
1603 tx_data_bd);
1604 if (rc) {
1605 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1606 qede_update_tx_producer(txq);
1607 return NETDEV_TX_OK;
1608 }
1609
1610 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1611 tx_data_bd = (struct eth_tx_bd *)third_bd;
1612 else
1613 tx_data_bd = NULL;
1614
1615 frag_idx++;
1616 }
1617
1618
1619 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1620 tx_data_bd = (struct eth_tx_bd *)
1621 qed_chain_produce(&txq->tx_pbl);
1622
1623 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1624
1625 rc = map_frag_to_bd(txq,
1626 &skb_shinfo(skb)->frags[frag_idx],
1627 tx_data_bd);
1628 if (rc) {
1629 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1630 qede_update_tx_producer(txq);
1631 return NETDEV_TX_OK;
1632 }
1633 }
1634
1635
1636 first_bd->data.nbds = nbd;
1637
1638 netdev_tx_sent_queue(netdev_txq, skb->len);
1639
1640 skb_tx_timestamp(skb);
1641
1642
1643
1644
1645 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1646
1647
1648 txq->tx_db.data.bd_prod =
1649 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1650
1651 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
1652 qede_update_tx_producer(txq);
1653
1654 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1655 < (MAX_SKB_FRAGS + 1))) {
1656 if (skb->xmit_more)
1657 qede_update_tx_producer(txq);
1658
1659 netif_tx_stop_queue(netdev_txq);
1660 txq->stopped_cnt++;
1661 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1662 "Stop queue was called\n");
1663
1664
1665
1666
1667 smp_mb();
1668
1669 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1670 (MAX_SKB_FRAGS + 1)) &&
1671 (edev->state == QEDE_STATE_OPEN)) {
1672 netif_tx_wake_queue(netdev_txq);
1673 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1674 "Wake queue was called\n");
1675 }
1676 }
1677
1678 return NETDEV_TX_OK;
1679}
1680
1681
1682#define QEDE_MAX_TUN_HDR_LEN 48
1683
1684netdev_features_t qede_features_check(struct sk_buff *skb,
1685 struct net_device *dev,
1686 netdev_features_t features)
1687{
1688 if (skb->encapsulation) {
1689 u8 l4_proto = 0;
1690
1691 switch (vlan_get_protocol(skb)) {
1692 case htons(ETH_P_IP):
1693 l4_proto = ip_hdr(skb)->protocol;
1694 break;
1695 case htons(ETH_P_IPV6):
1696 l4_proto = ipv6_hdr(skb)->nexthdr;
1697 break;
1698 default:
1699 return features;
1700 }
1701
1702
1703
1704
1705
1706 if (l4_proto == IPPROTO_UDP) {
1707 struct qede_dev *edev = netdev_priv(dev);
1708 u16 hdrlen, vxln_port, gnv_port;
1709
1710 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1711 vxln_port = edev->vxlan_dst_port;
1712 gnv_port = edev->geneve_dst_port;
1713
1714 if ((skb_inner_mac_header(skb) -
1715 skb_transport_header(skb)) > hdrlen ||
1716 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1717 ntohs(udp_hdr(skb)->dest) != gnv_port))
1718 return features & ~(NETIF_F_CSUM_MASK |
1719 NETIF_F_GSO_MASK);
1720 }
1721 }
1722
1723 return features;
1724}
1725