1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/bpf_trace.h>
36#include <net/udp_tunnel.h>
37#include <linux/ip.h>
38#include <net/ipv6.h>
39#include <net/tcp.h>
40#include <linux/if_ether.h>
41#include <linux/if_vlan.h>
42#include <net/ip6_checksum.h>
43#include "qede_ptp.h"
44
45#include <linux/qed/qed_if.h>
46#include "qede.h"
47
48
49
50
51int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
52{
53 struct sw_rx_data *sw_rx_data;
54 struct eth_rx_bd *rx_bd;
55 dma_addr_t mapping;
56 struct page *data;
57
58
59
60
61
62 if (allow_lazy && likely(rxq->filled_buffers > 12)) {
63 rxq->filled_buffers--;
64 return 0;
65 }
66
67 data = alloc_pages(GFP_ATOMIC, 0);
68 if (unlikely(!data))
69 return -ENOMEM;
70
71
72
73
74 mapping = dma_map_page(rxq->dev, data, 0,
75 PAGE_SIZE, rxq->data_direction);
76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
77 __free_page(data);
78 return -ENOMEM;
79 }
80
81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
82 sw_rx_data->page_offset = 0;
83 sw_rx_data->data = data;
84 sw_rx_data->mapping = mapping;
85
86
87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
88 WARN_ON(!rx_bd);
89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
90 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
91 rxq->rx_headroom);
92
93 rxq->sw_rx_prod++;
94 rxq->filled_buffers++;
95
96 return 0;
97}
98
99
100int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
101{
102 u16 idx = txq->sw_tx_cons;
103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
104 struct eth_tx_1st_bd *first_bd;
105 struct eth_tx_bd *tx_data_bd;
106 int bds_consumed = 0;
107 int nbds;
108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
109 int i, split_bd_len = 0;
110
111 if (unlikely(!skb)) {
112 DP_ERR(edev,
113 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
114 idx, txq->sw_tx_cons, txq->sw_tx_prod);
115 return -1;
116 }
117
118 *len = skb->len;
119
120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
121
122 bds_consumed++;
123
124 nbds = first_bd->data.nbds;
125
126 if (data_split) {
127 struct eth_tx_bd *split = (struct eth_tx_bd *)
128 qed_chain_consume(&txq->tx_pbl);
129 split_bd_len = BD_UNMAP_LEN(split);
130 bds_consumed++;
131 }
132 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
133 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
134
135
136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
137 tx_data_bd = (struct eth_tx_bd *)
138 qed_chain_consume(&txq->tx_pbl);
139 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
140 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
141 }
142
143 while (bds_consumed++ < nbds)
144 qed_chain_consume(&txq->tx_pbl);
145
146
147 dev_kfree_skb_any(skb);
148 txq->sw_tx_ring.skbs[idx].skb = NULL;
149 txq->sw_tx_ring.skbs[idx].flags = 0;
150
151 return 0;
152}
153
154
155static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
156 struct eth_tx_1st_bd *first_bd,
157 int nbd, bool data_split)
158{
159 u16 idx = txq->sw_tx_prod;
160 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
161 struct eth_tx_bd *tx_data_bd;
162 int i, split_bd_len = 0;
163
164
165 qed_chain_set_prod(&txq->tx_pbl,
166 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
167
168 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
169
170 if (data_split) {
171 struct eth_tx_bd *split = (struct eth_tx_bd *)
172 qed_chain_produce(&txq->tx_pbl);
173 split_bd_len = BD_UNMAP_LEN(split);
174 nbd--;
175 }
176
177 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
178 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
179
180
181 for (i = 0; i < nbd; i++) {
182 tx_data_bd = (struct eth_tx_bd *)
183 qed_chain_produce(&txq->tx_pbl);
184 if (tx_data_bd->nbytes)
185 dma_unmap_page(txq->dev,
186 BD_UNMAP_ADDR(tx_data_bd),
187 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
188 }
189
190
191 qed_chain_set_prod(&txq->tx_pbl,
192 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
193
194
195 dev_kfree_skb_any(skb);
196 txq->sw_tx_ring.skbs[idx].skb = NULL;
197 txq->sw_tx_ring.skbs[idx].flags = 0;
198}
199
200static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
201{
202 u32 rc = XMIT_L4_CSUM;
203 __be16 l3_proto;
204
205 if (skb->ip_summed != CHECKSUM_PARTIAL)
206 return XMIT_PLAIN;
207
208 l3_proto = vlan_get_protocol(skb);
209 if (l3_proto == htons(ETH_P_IPV6) &&
210 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
211 *ipv6_ext = 1;
212
213 if (skb->encapsulation) {
214 rc |= XMIT_ENC;
215 if (skb_is_gso(skb)) {
216 unsigned short gso_type = skb_shinfo(skb)->gso_type;
217
218 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
219 (gso_type & SKB_GSO_GRE_CSUM))
220 rc |= XMIT_ENC_GSO_L4_CSUM;
221
222 rc |= XMIT_LSO;
223 return rc;
224 }
225 }
226
227 if (skb_is_gso(skb))
228 rc |= XMIT_LSO;
229
230 return rc;
231}
232
233static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
234 struct eth_tx_2nd_bd *second_bd,
235 struct eth_tx_3rd_bd *third_bd)
236{
237 u8 l4_proto;
238 u16 bd2_bits1 = 0, bd2_bits2 = 0;
239
240 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
241
242 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
243 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
244 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
245
246 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
247 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
248
249 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
250 l4_proto = ipv6_hdr(skb)->nexthdr;
251 else
252 l4_proto = ip_hdr(skb)->protocol;
253
254 if (l4_proto == IPPROTO_UDP)
255 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
256
257 if (third_bd)
258 third_bd->data.bitfields |=
259 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
260 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
261 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
262
263 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
264 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
265}
266
267static int map_frag_to_bd(struct qede_tx_queue *txq,
268 skb_frag_t *frag, struct eth_tx_bd *bd)
269{
270 dma_addr_t mapping;
271
272
273 mapping = skb_frag_dma_map(txq->dev, frag, 0,
274 skb_frag_size(frag), DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(txq->dev, mapping)))
276 return -ENOMEM;
277
278
279 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
280
281 return 0;
282}
283
284static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
285{
286 if (is_encap_pkt)
287 return (skb_inner_transport_header(skb) +
288 inner_tcp_hdrlen(skb) - skb->data);
289 else
290 return (skb_transport_header(skb) +
291 tcp_hdrlen(skb) - skb->data);
292}
293
294
295#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
296static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
297{
298 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
299
300 if (xmit_type & XMIT_LSO) {
301 int hlen;
302
303 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
304
305
306 if (skb_headlen(skb) > hlen)
307 allowed_frags--;
308 }
309
310 return (skb_shinfo(skb)->nr_frags > allowed_frags);
311}
312#endif
313
314static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
315{
316
317
318
319 wmb();
320 barrier();
321 writel(txq->tx_db.raw, txq->doorbell_addr);
322
323
324
325
326
327 wmb();
328}
329
330static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
331 struct sw_rx_data *metadata, u16 padding, u16 length)
332{
333 struct qede_tx_queue *txq = fp->xdp_tx;
334 struct eth_tx_1st_bd *first_bd;
335 u16 idx = txq->sw_tx_prod;
336 u16 val;
337
338 if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
339 txq->stopped_cnt++;
340 return -ENOMEM;
341 }
342
343 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
344
345 memset(first_bd, 0, sizeof(*first_bd));
346 first_bd->data.bd_flags.bitfields =
347 BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
348
349 val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
350 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
351
352 first_bd->data.bitfields |= cpu_to_le16(val);
353 first_bd->data.nbds = 1;
354
355
356 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
357
358
359
360
361 dma_sync_single_for_device(&edev->pdev->dev,
362 metadata->mapping + padding,
363 length, PCI_DMA_TODEVICE);
364
365 txq->sw_tx_ring.xdp[idx].page = metadata->data;
366 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
367 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
368
369
370 fp->xdp_xmit = 1;
371
372 return 0;
373}
374
375int qede_txq_has_work(struct qede_tx_queue *txq)
376{
377 u16 hw_bd_cons;
378
379
380 barrier();
381 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
382 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
383 return 0;
384
385 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
386}
387
388static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
389{
390 u16 hw_bd_cons, idx;
391
392 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
393 barrier();
394
395 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
396 qed_chain_consume(&txq->tx_pbl);
397 idx = txq->sw_tx_cons;
398
399 dma_unmap_page(&edev->pdev->dev,
400 txq->sw_tx_ring.xdp[idx].mapping,
401 PAGE_SIZE, DMA_BIDIRECTIONAL);
402 __free_page(txq->sw_tx_ring.xdp[idx].page);
403
404 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
405 txq->xmit_pkts++;
406 }
407}
408
409static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
410{
411 unsigned int pkts_compl = 0, bytes_compl = 0;
412 struct netdev_queue *netdev_txq;
413 u16 hw_bd_cons;
414 int rc;
415
416 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
417
418 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
419 barrier();
420
421 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
422 int len = 0;
423
424 rc = qede_free_tx_pkt(edev, txq, &len);
425 if (rc) {
426 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
427 hw_bd_cons,
428 qed_chain_get_cons_idx(&txq->tx_pbl));
429 break;
430 }
431
432 bytes_compl += len;
433 pkts_compl++;
434 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
435 txq->xmit_pkts++;
436 }
437
438 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
439
440
441
442
443
444
445
446
447
448
449 smp_mb();
450
451 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
452
453
454
455
456
457
458
459
460
461
462 __netif_tx_lock(netdev_txq, smp_processor_id());
463
464 if ((netif_tx_queue_stopped(netdev_txq)) &&
465 (edev->state == QEDE_STATE_OPEN) &&
466 (qed_chain_get_elem_left(&txq->tx_pbl)
467 >= (MAX_SKB_FRAGS + 1))) {
468 netif_tx_wake_queue(netdev_txq);
469 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
470 "Wake queue was called\n");
471 }
472
473 __netif_tx_unlock(netdev_txq);
474 }
475
476 return 0;
477}
478
479bool qede_has_rx_work(struct qede_rx_queue *rxq)
480{
481 u16 hw_comp_cons, sw_comp_cons;
482
483
484 barrier();
485
486 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
487 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
488
489 return hw_comp_cons != sw_comp_cons;
490}
491
492static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
493{
494 qed_chain_consume(&rxq->rx_bd_ring);
495 rxq->sw_rx_cons++;
496}
497
498
499
500
501static inline void qede_reuse_page(struct qede_rx_queue *rxq,
502 struct sw_rx_data *curr_cons)
503{
504 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
505 struct sw_rx_data *curr_prod;
506 dma_addr_t new_mapping;
507
508 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
509 *curr_prod = *curr_cons;
510
511 new_mapping = curr_prod->mapping + curr_prod->page_offset;
512
513 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
514 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
515 rxq->rx_headroom);
516
517 rxq->sw_rx_prod++;
518 curr_cons->data = NULL;
519}
520
521
522
523
524void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
525{
526 struct sw_rx_data *curr_cons;
527
528 for (; count > 0; count--) {
529 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
530 qede_reuse_page(rxq, curr_cons);
531 qede_rx_bd_ring_consume(rxq);
532 }
533}
534
535static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
536 struct sw_rx_data *curr_cons)
537{
538
539 curr_cons->page_offset += rxq->rx_buf_seg_size;
540
541 if (curr_cons->page_offset == PAGE_SIZE) {
542 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
543
544
545
546 curr_cons->page_offset -= rxq->rx_buf_seg_size;
547
548 return -ENOMEM;
549 }
550
551 dma_unmap_page(rxq->dev, curr_cons->mapping,
552 PAGE_SIZE, rxq->data_direction);
553 } else {
554
555
556
557
558 page_ref_inc(curr_cons->data);
559 qede_reuse_page(rxq, curr_cons);
560 }
561
562 return 0;
563}
564
565void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
566{
567 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
568 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
569 struct eth_rx_prod_data rx_prods = {0};
570
571
572 rx_prods.bd_prod = cpu_to_le16(bd_prod);
573 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
574
575
576
577
578
579 wmb();
580
581 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
582 (u32 *)&rx_prods);
583}
584
585static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
586{
587 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
588 enum rss_hash_type htype;
589 u32 hash = 0;
590
591 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
592 if (htype) {
593 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
594 (htype == RSS_HASH_TYPE_IPV6)) ?
595 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
596 hash = le32_to_cpu(rss_hash);
597 }
598 skb_set_hash(skb, hash, hash_type);
599}
600
601static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
602{
603 skb_checksum_none_assert(skb);
604
605 if (csum_flag & QEDE_CSUM_UNNECESSARY)
606 skb->ip_summed = CHECKSUM_UNNECESSARY;
607
608 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
609 skb->csum_level = 1;
610 skb->encapsulation = 1;
611 }
612}
613
614static inline void qede_skb_receive(struct qede_dev *edev,
615 struct qede_fastpath *fp,
616 struct qede_rx_queue *rxq,
617 struct sk_buff *skb, u16 vlan_tag)
618{
619 if (vlan_tag)
620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
621
622 napi_gro_receive(&fp->napi, skb);
623}
624
625static void qede_set_gro_params(struct qede_dev *edev,
626 struct sk_buff *skb,
627 struct eth_fast_path_rx_tpa_start_cqe *cqe)
628{
629 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
630
631 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
632 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
633 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
634 else
635 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
636
637 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
638 cqe->header_len;
639}
640
641static int qede_fill_frag_skb(struct qede_dev *edev,
642 struct qede_rx_queue *rxq,
643 u8 tpa_agg_index, u16 len_on_bd)
644{
645 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
646 NUM_RX_BDS_MAX];
647 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
648 struct sk_buff *skb = tpa_info->skb;
649
650 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
651 goto out;
652
653
654 skb_fill_page_desc(skb, tpa_info->frag_id++,
655 current_bd->data,
656 current_bd->page_offset + rxq->rx_headroom,
657 len_on_bd);
658
659 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
660
661
662
663 page_ref_inc(current_bd->data);
664 goto out;
665 }
666
667 qede_rx_bd_ring_consume(rxq);
668
669 skb->data_len += len_on_bd;
670 skb->truesize += rxq->rx_buf_seg_size;
671 skb->len += len_on_bd;
672
673 return 0;
674
675out:
676 tpa_info->state = QEDE_AGG_STATE_ERROR;
677 qede_recycle_rx_bd_ring(rxq, 1);
678
679 return -ENOMEM;
680}
681
682static bool qede_tunn_exist(u16 flag)
683{
684 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
685 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
686}
687
688static u8 qede_check_tunn_csum(u16 flag)
689{
690 u16 csum_flag = 0;
691 u8 tcsum = 0;
692
693 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
694 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
695 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
696 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
697
698 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
699 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
700 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
701 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
702 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
703 }
704
705 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
706 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
707 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
708 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
709
710 if (csum_flag & flag)
711 return QEDE_CSUM_ERROR;
712
713 return QEDE_CSUM_UNNECESSARY | tcsum;
714}
715
716static inline struct sk_buff *
717qede_build_skb(struct qede_rx_queue *rxq,
718 struct sw_rx_data *bd, u16 len, u16 pad)
719{
720 struct sk_buff *skb;
721 void *buf;
722
723 buf = page_address(bd->data) + bd->page_offset;
724 skb = build_skb(buf, rxq->rx_buf_seg_size);
725
726 skb_reserve(skb, pad);
727 skb_put(skb, len);
728
729 return skb;
730}
731
732static struct sk_buff *
733qede_tpa_rx_build_skb(struct qede_dev *edev,
734 struct qede_rx_queue *rxq,
735 struct sw_rx_data *bd, u16 len, u16 pad,
736 bool alloc_skb)
737{
738 struct sk_buff *skb;
739
740 skb = qede_build_skb(rxq, bd, len, pad);
741 bd->page_offset += rxq->rx_buf_seg_size;
742
743 if (bd->page_offset == PAGE_SIZE) {
744 if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
745 DP_NOTICE(edev,
746 "Failed to allocate RX buffer for tpa start\n");
747 bd->page_offset -= rxq->rx_buf_seg_size;
748 page_ref_inc(bd->data);
749 dev_kfree_skb_any(skb);
750 return NULL;
751 }
752 } else {
753 page_ref_inc(bd->data);
754 qede_reuse_page(rxq, bd);
755 }
756
757
758 qede_rx_bd_ring_consume(rxq);
759
760 return skb;
761}
762
763static struct sk_buff *
764qede_rx_build_skb(struct qede_dev *edev,
765 struct qede_rx_queue *rxq,
766 struct sw_rx_data *bd, u16 len, u16 pad)
767{
768 struct sk_buff *skb = NULL;
769
770
771
772
773
774 if ((len + pad <= edev->rx_copybreak)) {
775 unsigned int offset = bd->page_offset + pad;
776
777 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
778 if (unlikely(!skb))
779 return NULL;
780
781 skb_reserve(skb, pad);
782 memcpy(skb_put(skb, len),
783 page_address(bd->data) + offset, len);
784 qede_reuse_page(rxq, bd);
785 goto out;
786 }
787
788 skb = qede_build_skb(rxq, bd, len, pad);
789
790 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
791
792
793
794
795 page_ref_inc(bd->data);
796 dev_kfree_skb_any(skb);
797 return NULL;
798 }
799out:
800
801 qede_rx_bd_ring_consume(rxq);
802
803 return skb;
804}
805
806static void qede_tpa_start(struct qede_dev *edev,
807 struct qede_rx_queue *rxq,
808 struct eth_fast_path_rx_tpa_start_cqe *cqe)
809{
810 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
811 struct sw_rx_data *sw_rx_data_cons;
812 u16 pad;
813
814 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
815 pad = cqe->placement_offset + rxq->rx_headroom;
816
817 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
818 le16_to_cpu(cqe->len_on_first_bd),
819 pad, false);
820 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
821 tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
822
823 if (unlikely(!tpa_info->skb)) {
824 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
825
826
827
828
829
830 tpa_info->tpa_start_fail = true;
831 qede_rx_bd_ring_consume(rxq);
832 tpa_info->state = QEDE_AGG_STATE_ERROR;
833 goto cons_buf;
834 }
835
836 tpa_info->frag_id = 0;
837 tpa_info->state = QEDE_AGG_STATE_START;
838
839 if ((le16_to_cpu(cqe->pars_flags.flags) >>
840 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
841 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
842 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
843 else
844 tpa_info->vlan_tag = 0;
845
846 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
847
848
849 qede_set_gro_params(edev, tpa_info->skb, cqe);
850
851cons_buf:
852 if (likely(cqe->ext_bd_len_list[0]))
853 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
854 le16_to_cpu(cqe->ext_bd_len_list[0]));
855
856 if (unlikely(cqe->ext_bd_len_list[1])) {
857 DP_ERR(edev,
858 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
859 tpa_info->state = QEDE_AGG_STATE_ERROR;
860 }
861}
862
863#ifdef CONFIG_INET
864static void qede_gro_ip_csum(struct sk_buff *skb)
865{
866 const struct iphdr *iph = ip_hdr(skb);
867 struct tcphdr *th;
868
869 skb_set_transport_header(skb, sizeof(struct iphdr));
870 th = tcp_hdr(skb);
871
872 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
873 iph->saddr, iph->daddr, 0);
874
875 tcp_gro_complete(skb);
876}
877
878static void qede_gro_ipv6_csum(struct sk_buff *skb)
879{
880 struct ipv6hdr *iph = ipv6_hdr(skb);
881 struct tcphdr *th;
882
883 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
884 th = tcp_hdr(skb);
885
886 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
887 &iph->saddr, &iph->daddr, 0);
888 tcp_gro_complete(skb);
889}
890#endif
891
892static void qede_gro_receive(struct qede_dev *edev,
893 struct qede_fastpath *fp,
894 struct sk_buff *skb,
895 u16 vlan_tag)
896{
897
898
899
900
901
902 if (unlikely(!skb->data_len)) {
903 skb_shinfo(skb)->gso_type = 0;
904 skb_shinfo(skb)->gso_size = 0;
905 goto send_skb;
906 }
907
908#ifdef CONFIG_INET
909 if (skb_shinfo(skb)->gso_size) {
910 skb_reset_network_header(skb);
911
912 switch (skb->protocol) {
913 case htons(ETH_P_IP):
914 qede_gro_ip_csum(skb);
915 break;
916 case htons(ETH_P_IPV6):
917 qede_gro_ipv6_csum(skb);
918 break;
919 default:
920 DP_ERR(edev,
921 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
922 ntohs(skb->protocol));
923 }
924 }
925#endif
926
927send_skb:
928 skb_record_rx_queue(skb, fp->rxq->rxq_id);
929 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
930}
931
932static inline void qede_tpa_cont(struct qede_dev *edev,
933 struct qede_rx_queue *rxq,
934 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
935{
936 int i;
937
938 for (i = 0; cqe->len_list[i]; i++)
939 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
940 le16_to_cpu(cqe->len_list[i]));
941
942 if (unlikely(i > 1))
943 DP_ERR(edev,
944 "Strange - TPA cont with more than a single len_list entry\n");
945}
946
947static int qede_tpa_end(struct qede_dev *edev,
948 struct qede_fastpath *fp,
949 struct eth_fast_path_rx_tpa_end_cqe *cqe)
950{
951 struct qede_rx_queue *rxq = fp->rxq;
952 struct qede_agg_info *tpa_info;
953 struct sk_buff *skb;
954 int i;
955
956 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
957 skb = tpa_info->skb;
958
959 if (tpa_info->buffer.page_offset == PAGE_SIZE)
960 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
961 PAGE_SIZE, rxq->data_direction);
962
963 for (i = 0; cqe->len_list[i]; i++)
964 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
965 le16_to_cpu(cqe->len_list[i]));
966 if (unlikely(i > 1))
967 DP_ERR(edev,
968 "Strange - TPA emd with more than a single len_list entry\n");
969
970 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
971 goto err;
972
973
974 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
975 DP_ERR(edev,
976 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
977 cqe->num_of_bds, tpa_info->frag_id);
978 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
979 DP_ERR(edev,
980 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
981 le16_to_cpu(cqe->total_packet_len), skb->len);
982
983
984 skb->protocol = eth_type_trans(skb, edev->ndev);
985 skb->ip_summed = CHECKSUM_UNNECESSARY;
986
987
988
989
990 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
991
992 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
993
994 tpa_info->state = QEDE_AGG_STATE_NONE;
995
996 return 1;
997err:
998 tpa_info->state = QEDE_AGG_STATE_NONE;
999
1000 if (tpa_info->tpa_start_fail) {
1001 qede_reuse_page(rxq, &tpa_info->buffer);
1002 tpa_info->tpa_start_fail = false;
1003 }
1004
1005 dev_kfree_skb_any(tpa_info->skb);
1006 tpa_info->skb = NULL;
1007 return 0;
1008}
1009
1010static u8 qede_check_notunn_csum(u16 flag)
1011{
1012 u16 csum_flag = 0;
1013 u8 csum = 0;
1014
1015 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1016 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1017 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1018 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1019 csum = QEDE_CSUM_UNNECESSARY;
1020 }
1021
1022 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1023 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1024
1025 if (csum_flag & flag)
1026 return QEDE_CSUM_ERROR;
1027
1028 return csum;
1029}
1030
1031static u8 qede_check_csum(u16 flag)
1032{
1033 if (!qede_tunn_exist(flag))
1034 return qede_check_notunn_csum(flag);
1035 else
1036 return qede_check_tunn_csum(flag);
1037}
1038
1039static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1040 u16 flag)
1041{
1042 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1043
1044 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1045 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1046 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1047 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1048 return true;
1049
1050 return false;
1051}
1052
1053
1054static bool qede_rx_xdp(struct qede_dev *edev,
1055 struct qede_fastpath *fp,
1056 struct qede_rx_queue *rxq,
1057 struct bpf_prog *prog,
1058 struct sw_rx_data *bd,
1059 struct eth_fast_path_rx_reg_cqe *cqe,
1060 u16 *data_offset, u16 *len)
1061{
1062 struct xdp_buff xdp;
1063 enum xdp_action act;
1064
1065 xdp.data_hard_start = page_address(bd->data);
1066 xdp.data = xdp.data_hard_start + *data_offset;
1067 xdp_set_data_meta_invalid(&xdp);
1068 xdp.data_end = xdp.data + *len;
1069 xdp.rxq = &rxq->xdp_rxq;
1070
1071
1072
1073
1074
1075 rcu_read_lock();
1076 act = bpf_prog_run_xdp(prog, &xdp);
1077 rcu_read_unlock();
1078
1079
1080 *data_offset = xdp.data - xdp.data_hard_start;
1081 *len = xdp.data_end - xdp.data;
1082
1083 if (act == XDP_PASS)
1084 return true;
1085
1086
1087 rxq->xdp_no_pass++;
1088
1089 switch (act) {
1090 case XDP_TX:
1091
1092 if (qede_alloc_rx_buffer(rxq, true)) {
1093 qede_recycle_rx_bd_ring(rxq, 1);
1094 trace_xdp_exception(edev->ndev, prog, act);
1095 return false;
1096 }
1097
1098
1099
1100
1101 if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
1102 dma_unmap_page(rxq->dev, bd->mapping,
1103 PAGE_SIZE, DMA_BIDIRECTIONAL);
1104 __free_page(bd->data);
1105 trace_xdp_exception(edev->ndev, prog, act);
1106 }
1107
1108
1109 qede_rx_bd_ring_consume(rxq);
1110 return false;
1111
1112 default:
1113 bpf_warn_invalid_xdp_action(act);
1114
1115 case XDP_ABORTED:
1116 trace_xdp_exception(edev->ndev, prog, act);
1117
1118 case XDP_DROP:
1119 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1120 }
1121
1122 return false;
1123}
1124
1125static int qede_rx_build_jumbo(struct qede_dev *edev,
1126 struct qede_rx_queue *rxq,
1127 struct sk_buff *skb,
1128 struct eth_fast_path_rx_reg_cqe *cqe,
1129 u16 first_bd_len)
1130{
1131 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1132 struct sw_rx_data *bd;
1133 u16 bd_cons_idx;
1134 u8 num_frags;
1135
1136 pkt_len -= first_bd_len;
1137
1138
1139 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1140 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1141 pkt_len;
1142
1143 if (unlikely(!cur_size)) {
1144 DP_ERR(edev,
1145 "Still got %d BDs for mapping jumbo, but length became 0\n",
1146 num_frags);
1147 goto out;
1148 }
1149
1150
1151 if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1152 goto out;
1153
1154
1155
1156
1157 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1158 bd = &rxq->sw_rx_ring[bd_cons_idx];
1159 qede_rx_bd_ring_consume(rxq);
1160
1161 dma_unmap_page(rxq->dev, bd->mapping,
1162 PAGE_SIZE, DMA_FROM_DEVICE);
1163
1164 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1165 bd->data, rxq->rx_headroom, cur_size);
1166
1167 skb->truesize += PAGE_SIZE;
1168 skb->data_len += cur_size;
1169 skb->len += cur_size;
1170 pkt_len -= cur_size;
1171 }
1172
1173 if (unlikely(pkt_len))
1174 DP_ERR(edev,
1175 "Mapped all BDs of jumbo, but still have %d bytes\n",
1176 pkt_len);
1177
1178out:
1179 return num_frags;
1180}
1181
1182static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1183 struct qede_fastpath *fp,
1184 struct qede_rx_queue *rxq,
1185 union eth_rx_cqe *cqe,
1186 enum eth_rx_cqe_type type)
1187{
1188 switch (type) {
1189 case ETH_RX_CQE_TYPE_TPA_START:
1190 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1191 return 0;
1192 case ETH_RX_CQE_TYPE_TPA_CONT:
1193 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1194 return 0;
1195 case ETH_RX_CQE_TYPE_TPA_END:
1196 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1197 default:
1198 return 0;
1199 }
1200}
1201
1202static int qede_rx_process_cqe(struct qede_dev *edev,
1203 struct qede_fastpath *fp,
1204 struct qede_rx_queue *rxq)
1205{
1206 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1207 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1208 u16 len, pad, bd_cons_idx, parse_flag;
1209 enum eth_rx_cqe_type cqe_type;
1210 union eth_rx_cqe *cqe;
1211 struct sw_rx_data *bd;
1212 struct sk_buff *skb;
1213 __le16 flags;
1214 u8 csum_flag;
1215
1216
1217 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1218 cqe_type = cqe->fast_path_regular.type;
1219
1220
1221 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1222 struct eth_slow_path_rx_cqe *sp_cqe;
1223
1224 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1225 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1226 return 0;
1227 }
1228
1229
1230 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1231 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1232
1233
1234
1235
1236 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1237 bd = &rxq->sw_rx_ring[bd_cons_idx];
1238
1239 fp_cqe = &cqe->fast_path_regular;
1240 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1241 pad = fp_cqe->placement_offset + rxq->rx_headroom;
1242
1243
1244 if (xdp_prog)
1245 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1246 &pad, &len))
1247 return 0;
1248
1249
1250 flags = cqe->fast_path_regular.pars_flags.flags;
1251 parse_flag = le16_to_cpu(flags);
1252
1253 csum_flag = qede_check_csum(parse_flag);
1254 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1255 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1256 rxq->rx_ip_frags++;
1257 else
1258 rxq->rx_hw_errors++;
1259 }
1260
1261
1262
1263
1264 skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1265 if (!skb) {
1266 rxq->rx_alloc_errors++;
1267 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1268 return 0;
1269 }
1270
1271
1272
1273
1274 if (fp_cqe->bd_num > 1) {
1275 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1276 fp_cqe, len);
1277
1278 if (unlikely(unmapped_frags > 0)) {
1279 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1280 dev_kfree_skb_any(skb);
1281 return 0;
1282 }
1283 }
1284
1285
1286 skb->protocol = eth_type_trans(skb, edev->ndev);
1287 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1288 qede_set_skb_csum(skb, csum_flag);
1289 skb_record_rx_queue(skb, rxq->rxq_id);
1290 qede_ptp_record_rx_ts(edev, cqe, skb);
1291
1292
1293 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1294
1295 return 1;
1296}
1297
1298static int qede_rx_int(struct qede_fastpath *fp, int budget)
1299{
1300 struct qede_rx_queue *rxq = fp->rxq;
1301 struct qede_dev *edev = fp->edev;
1302 int work_done = 0, rcv_pkts = 0;
1303 u16 hw_comp_cons, sw_comp_cons;
1304
1305 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1306 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1307
1308
1309
1310
1311
1312
1313 rmb();
1314
1315
1316 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1317 rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1318 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1319 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1320 work_done++;
1321 }
1322
1323 rxq->rcv_pkts += rcv_pkts;
1324
1325
1326 while (rxq->num_rx_buffers - rxq->filled_buffers)
1327 if (qede_alloc_rx_buffer(rxq, false))
1328 break;
1329
1330
1331 qede_update_rx_prod(edev, rxq);
1332
1333 return work_done;
1334}
1335
1336static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1337{
1338 qed_sb_update_sb_idx(fp->sb_info);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 rmb();
1351
1352 if (likely(fp->type & QEDE_FASTPATH_RX))
1353 if (qede_has_rx_work(fp->rxq))
1354 return true;
1355
1356 if (fp->type & QEDE_FASTPATH_XDP)
1357 if (qede_txq_has_work(fp->xdp_tx))
1358 return true;
1359
1360 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1361 int cos;
1362
1363 for_each_cos_in_txq(fp->edev, cos) {
1364 if (qede_txq_has_work(&fp->txq[cos]))
1365 return true;
1366 }
1367 }
1368
1369 return false;
1370}
1371
1372
1373
1374
1375int qede_poll(struct napi_struct *napi, int budget)
1376{
1377 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1378 napi);
1379 struct qede_dev *edev = fp->edev;
1380 int rx_work_done = 0;
1381
1382 if (likely(fp->type & QEDE_FASTPATH_TX)) {
1383 int cos;
1384
1385 for_each_cos_in_txq(fp->edev, cos) {
1386 if (qede_txq_has_work(&fp->txq[cos]))
1387 qede_tx_int(edev, &fp->txq[cos]);
1388 }
1389 }
1390
1391 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1392 qede_xdp_tx_int(edev, fp->xdp_tx);
1393
1394 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1395 qede_has_rx_work(fp->rxq)) ?
1396 qede_rx_int(fp, budget) : 0;
1397 if (rx_work_done < budget) {
1398 if (!qede_poll_is_more_work(fp)) {
1399 napi_complete_done(napi, rx_work_done);
1400
1401
1402 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1403 } else {
1404 rx_work_done = budget;
1405 }
1406 }
1407
1408 if (fp->xdp_xmit) {
1409 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1410
1411 fp->xdp_xmit = 0;
1412 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1413 qede_update_tx_producer(fp->xdp_tx);
1414 }
1415
1416 return rx_work_done;
1417}
1418
1419irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1420{
1421 struct qede_fastpath *fp = fp_cookie;
1422
1423 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 );
1424
1425 napi_schedule_irqoff(&fp->napi);
1426 return IRQ_HANDLED;
1427}
1428
1429
1430netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1431{
1432 struct qede_dev *edev = netdev_priv(ndev);
1433 struct netdev_queue *netdev_txq;
1434 struct qede_tx_queue *txq;
1435 struct eth_tx_1st_bd *first_bd;
1436 struct eth_tx_2nd_bd *second_bd = NULL;
1437 struct eth_tx_3rd_bd *third_bd = NULL;
1438 struct eth_tx_bd *tx_data_bd = NULL;
1439 u16 txq_index, val = 0;
1440 u8 nbd = 0;
1441 dma_addr_t mapping;
1442 int rc, frag_idx = 0, ipv6_ext = 0;
1443 u8 xmit_type;
1444 u16 idx;
1445 u16 hlen;
1446 bool data_split = false;
1447
1448
1449 txq_index = skb_get_queue_mapping(skb);
1450 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
1451 txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1452 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1453
1454 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1455
1456 xmit_type = qede_xmit_type(skb, &ipv6_ext);
1457
1458#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1459 if (qede_pkt_req_lin(skb, xmit_type)) {
1460 if (skb_linearize(skb)) {
1461 txq->tx_mem_alloc_err++;
1462
1463 dev_kfree_skb_any(skb);
1464 return NETDEV_TX_OK;
1465 }
1466 }
1467#endif
1468
1469
1470 idx = txq->sw_tx_prod;
1471 txq->sw_tx_ring.skbs[idx].skb = skb;
1472 first_bd = (struct eth_tx_1st_bd *)
1473 qed_chain_produce(&txq->tx_pbl);
1474 memset(first_bd, 0, sizeof(*first_bd));
1475 first_bd->data.bd_flags.bitfields =
1476 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1477
1478 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1479 qede_ptp_tx_ts(edev, skb);
1480
1481
1482 mapping = dma_map_single(txq->dev, skb->data,
1483 skb_headlen(skb), DMA_TO_DEVICE);
1484 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1485 DP_NOTICE(edev, "SKB mapping failed\n");
1486 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1487 qede_update_tx_producer(txq);
1488 return NETDEV_TX_OK;
1489 }
1490 nbd++;
1491 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1492
1493
1494
1495
1496 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1497 second_bd = (struct eth_tx_2nd_bd *)
1498 qed_chain_produce(&txq->tx_pbl);
1499 memset(second_bd, 0, sizeof(*second_bd));
1500
1501 nbd++;
1502 third_bd = (struct eth_tx_3rd_bd *)
1503 qed_chain_produce(&txq->tx_pbl);
1504 memset(third_bd, 0, sizeof(*third_bd));
1505
1506 nbd++;
1507
1508 tx_data_bd = (struct eth_tx_bd *)second_bd;
1509 }
1510
1511 if (skb_vlan_tag_present(skb)) {
1512 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1513 first_bd->data.bd_flags.bitfields |=
1514 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1515 }
1516
1517
1518 if (xmit_type & XMIT_L4_CSUM) {
1519
1520
1521
1522 first_bd->data.bd_flags.bitfields |=
1523 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1524
1525 if (xmit_type & XMIT_ENC) {
1526 first_bd->data.bd_flags.bitfields |=
1527 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1528
1529 val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1530 }
1531
1532
1533
1534
1535
1536 if (unlikely(txq->is_legacy))
1537 val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1538
1539
1540
1541
1542
1543 if (unlikely(ipv6_ext))
1544 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1545 }
1546
1547 if (xmit_type & XMIT_LSO) {
1548 first_bd->data.bd_flags.bitfields |=
1549 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1550 third_bd->data.lso_mss =
1551 cpu_to_le16(skb_shinfo(skb)->gso_size);
1552
1553 if (unlikely(xmit_type & XMIT_ENC)) {
1554 first_bd->data.bd_flags.bitfields |=
1555 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1556
1557 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1558 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1559
1560 first_bd->data.bd_flags.bitfields |= 1 << tmp;
1561 }
1562 hlen = qede_get_skb_hlen(skb, true);
1563 } else {
1564 first_bd->data.bd_flags.bitfields |=
1565 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1566 hlen = qede_get_skb_hlen(skb, false);
1567 }
1568
1569
1570 third_bd->data.bitfields |=
1571 cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1572
1573
1574
1575
1576 if (unlikely(skb_headlen(skb) > hlen)) {
1577 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1578 "TSO split header size is %d (%x:%x)\n",
1579 first_bd->nbytes, first_bd->addr.hi,
1580 first_bd->addr.lo);
1581
1582 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1583 le32_to_cpu(first_bd->addr.lo)) +
1584 hlen;
1585
1586 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1587 le16_to_cpu(first_bd->nbytes) -
1588 hlen);
1589
1590
1591
1592
1593 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1594
1595 first_bd->nbytes = cpu_to_le16(hlen);
1596
1597 tx_data_bd = (struct eth_tx_bd *)third_bd;
1598 data_split = true;
1599 }
1600 } else {
1601 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1602 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1603 }
1604
1605 first_bd->data.bitfields = cpu_to_le16(val);
1606
1607
1608
1609 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1610 rc = map_frag_to_bd(txq,
1611 &skb_shinfo(skb)->frags[frag_idx],
1612 tx_data_bd);
1613 if (rc) {
1614 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1615 qede_update_tx_producer(txq);
1616 return NETDEV_TX_OK;
1617 }
1618
1619 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1620 tx_data_bd = (struct eth_tx_bd *)third_bd;
1621 else
1622 tx_data_bd = NULL;
1623
1624 frag_idx++;
1625 }
1626
1627
1628 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1629 tx_data_bd = (struct eth_tx_bd *)
1630 qed_chain_produce(&txq->tx_pbl);
1631
1632 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1633
1634 rc = map_frag_to_bd(txq,
1635 &skb_shinfo(skb)->frags[frag_idx],
1636 tx_data_bd);
1637 if (rc) {
1638 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1639 qede_update_tx_producer(txq);
1640 return NETDEV_TX_OK;
1641 }
1642 }
1643
1644
1645 first_bd->data.nbds = nbd;
1646
1647 netdev_tx_sent_queue(netdev_txq, skb->len);
1648
1649 skb_tx_timestamp(skb);
1650
1651
1652
1653
1654 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1655
1656
1657 txq->tx_db.data.bd_prod =
1658 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1659
1660 if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1661 qede_update_tx_producer(txq);
1662
1663 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1664 < (MAX_SKB_FRAGS + 1))) {
1665 if (netdev_xmit_more())
1666 qede_update_tx_producer(txq);
1667
1668 netif_tx_stop_queue(netdev_txq);
1669 txq->stopped_cnt++;
1670 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1671 "Stop queue was called\n");
1672
1673
1674
1675
1676 smp_mb();
1677
1678 if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1679 (MAX_SKB_FRAGS + 1)) &&
1680 (edev->state == QEDE_STATE_OPEN)) {
1681 netif_tx_wake_queue(netdev_txq);
1682 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1683 "Wake queue was called\n");
1684 }
1685 }
1686
1687 return NETDEV_TX_OK;
1688}
1689
1690u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1691 struct net_device *sb_dev)
1692{
1693 struct qede_dev *edev = netdev_priv(dev);
1694 int total_txq;
1695
1696 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1697
1698 return QEDE_TSS_COUNT(edev) ?
1699 netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
1700}
1701
1702
1703#define QEDE_MAX_TUN_HDR_LEN 48
1704
1705netdev_features_t qede_features_check(struct sk_buff *skb,
1706 struct net_device *dev,
1707 netdev_features_t features)
1708{
1709 if (skb->encapsulation) {
1710 u8 l4_proto = 0;
1711
1712 switch (vlan_get_protocol(skb)) {
1713 case htons(ETH_P_IP):
1714 l4_proto = ip_hdr(skb)->protocol;
1715 break;
1716 case htons(ETH_P_IPV6):
1717 l4_proto = ipv6_hdr(skb)->nexthdr;
1718 break;
1719 default:
1720 return features;
1721 }
1722
1723
1724
1725
1726
1727 if (l4_proto == IPPROTO_UDP) {
1728 struct qede_dev *edev = netdev_priv(dev);
1729 u16 hdrlen, vxln_port, gnv_port;
1730
1731 hdrlen = QEDE_MAX_TUN_HDR_LEN;
1732 vxln_port = edev->vxlan_dst_port;
1733 gnv_port = edev->geneve_dst_port;
1734
1735 if ((skb_inner_mac_header(skb) -
1736 skb_transport_header(skb)) > hdrlen ||
1737 (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1738 ntohs(udp_hdr(skb)->dest) != gnv_port))
1739 return features & ~(NETIF_F_CSUM_MASK |
1740 NETIF_F_GSO_MASK);
1741 }
1742 }
1743
1744 return features;
1745}
1746