1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/u64_stats_sync.h>
19#include <linux/errno.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/skbuff.h>
27#include <linux/smp.h>
28#include <asm/byteorder.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/sctp.h>
32#include <linux/ipv6.h>
33#include <net/ipv6.h>
34#include <net/checksum.h>
35#include <net/ip6_checksum.h>
36
37#include "hinic_common.h"
38#include "hinic_hw_if.h"
39#include "hinic_hw_wqe.h"
40#include "hinic_hw_wq.h"
41#include "hinic_hw_qp.h"
42#include "hinic_hw_dev.h"
43#include "hinic_dev.h"
44#include "hinic_tx.h"
45
46#define TX_IRQ_NO_PENDING 0
47#define TX_IRQ_NO_COALESC 0
48#define TX_IRQ_NO_LLI_TIMER 0
49#define TX_IRQ_NO_CREDIT 0
50#define TX_IRQ_NO_RESEND_TIMER 0
51
52#define CI_UPDATE_NO_PENDING 0
53#define CI_UPDATE_NO_COALESC 0
54
55#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
56
57#define MIN_SKB_LEN 17
58
59#define MAX_PAYLOAD_OFFSET 221
60#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
61
62union hinic_l3 {
63 struct iphdr *v4;
64 struct ipv6hdr *v6;
65 unsigned char *hdr;
66};
67
68union hinic_l4 {
69 struct tcphdr *tcp;
70 struct udphdr *udp;
71 unsigned char *hdr;
72};
73
74enum hinic_offload_type {
75 TX_OFFLOAD_TSO = BIT(0),
76 TX_OFFLOAD_CSUM = BIT(1),
77 TX_OFFLOAD_VLAN = BIT(2),
78 TX_OFFLOAD_INVALID = BIT(3),
79};
80
81
82
83
84
85void hinic_txq_clean_stats(struct hinic_txq *txq)
86{
87 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
88
89 u64_stats_update_begin(&txq_stats->syncp);
90 txq_stats->pkts = 0;
91 txq_stats->bytes = 0;
92 txq_stats->tx_busy = 0;
93 txq_stats->tx_wake = 0;
94 txq_stats->tx_dropped = 0;
95 u64_stats_update_end(&txq_stats->syncp);
96}
97
98
99
100
101
102
103void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
104{
105 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
106 unsigned int start;
107
108 u64_stats_update_begin(&stats->syncp);
109 do {
110 start = u64_stats_fetch_begin(&txq_stats->syncp);
111 stats->pkts = txq_stats->pkts;
112 stats->bytes = txq_stats->bytes;
113 stats->tx_busy = txq_stats->tx_busy;
114 stats->tx_wake = txq_stats->tx_wake;
115 stats->tx_dropped = txq_stats->tx_dropped;
116 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
117 u64_stats_update_end(&stats->syncp);
118}
119
120
121
122
123
124static void txq_stats_init(struct hinic_txq *txq)
125{
126 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
127
128 u64_stats_init(&txq_stats->syncp);
129 hinic_txq_clean_stats(txq);
130}
131
132
133
134
135
136
137
138
139
140static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
141 struct hinic_sge *sges)
142{
143 struct hinic_hwdev *hwdev = nic_dev->hwdev;
144 struct hinic_hwif *hwif = hwdev->hwif;
145 struct pci_dev *pdev = hwif->pdev;
146 struct skb_frag_struct *frag;
147 dma_addr_t dma_addr;
148 int i, j;
149
150 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
151 DMA_TO_DEVICE);
152 if (dma_mapping_error(&pdev->dev, dma_addr)) {
153 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
154 return -EFAULT;
155 }
156
157 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
158
159 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
160 frag = &skb_shinfo(skb)->frags[i];
161
162 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
163 skb_frag_size(frag),
164 DMA_TO_DEVICE);
165 if (dma_mapping_error(&pdev->dev, dma_addr)) {
166 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
167 goto err_tx_map;
168 }
169
170 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
171 }
172
173 return 0;
174
175err_tx_map:
176 for (j = 0; j < i; j++)
177 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
178 sges[j + 1].len, DMA_TO_DEVICE);
179
180 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
181 DMA_TO_DEVICE);
182 return -EFAULT;
183}
184
185
186
187
188
189
190
191static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
192 struct hinic_sge *sges)
193{
194 struct hinic_hwdev *hwdev = nic_dev->hwdev;
195 struct hinic_hwif *hwif = hwdev->hwif;
196 struct pci_dev *pdev = hwif->pdev;
197 int i;
198
199 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
200 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
201 sges[i + 1].len, DMA_TO_DEVICE);
202
203 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
204 DMA_TO_DEVICE);
205}
206
207static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
208 union hinic_l4 *l4,
209 enum hinic_offload_type offload_type,
210 enum hinic_l3_offload_type *l3_type,
211 u8 *l4_proto)
212{
213 u8 *exthdr;
214
215 if (ip->v4->version == 4) {
216 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
217 IPV4_PKT_NO_CHKSUM_OFFLOAD :
218 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
219 *l4_proto = ip->v4->protocol;
220 } else if (ip->v4->version == 6) {
221 *l3_type = IPV6_PKT;
222 exthdr = ip->hdr + sizeof(*ip->v6);
223 *l4_proto = ip->v6->nexthdr;
224 if (exthdr != l4->hdr) {
225 int start = exthdr - skb->data;
226 __be16 frag_off;
227
228 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
229 }
230 } else {
231 *l3_type = L3TYPE_UNKNOWN;
232 *l4_proto = 0;
233 }
234}
235
236static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
237 enum hinic_offload_type offload_type, u8 l4_proto,
238 enum hinic_l4_offload_type *l4_offload,
239 u32 *l4_len, u32 *offset)
240{
241 *l4_offload = OFFLOAD_DISABLE;
242 *offset = 0;
243 *l4_len = 0;
244
245 switch (l4_proto) {
246 case IPPROTO_TCP:
247 *l4_offload = TCP_OFFLOAD_ENABLE;
248
249 *l4_len = l4->tcp->doff * 4;
250 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
251 break;
252
253 case IPPROTO_UDP:
254 *l4_offload = UDP_OFFLOAD_ENABLE;
255 *l4_len = sizeof(struct udphdr);
256 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
257 break;
258
259 case IPPROTO_SCTP:
260
261 if (offload_type != TX_OFFLOAD_CSUM)
262 break;
263
264 *l4_offload = SCTP_OFFLOAD_ENABLE;
265 *l4_len = sizeof(struct sctphdr);
266 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
267 break;
268
269 default:
270 break;
271 }
272}
273
274static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
275{
276 return (ip->v4->version == 4) ?
277 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
278 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
279}
280
281static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
282 struct sk_buff *skb)
283{
284 u32 offset, l4_len, ip_identify, network_hdr_len;
285 enum hinic_l3_offload_type l3_offload;
286 enum hinic_l4_offload_type l4_offload;
287 union hinic_l3 ip;
288 union hinic_l4 l4;
289 u8 l4_proto;
290
291 if (!skb_is_gso(skb))
292 return 0;
293
294 if (skb_cow_head(skb, 0) < 0)
295 return -EPROTONOSUPPORT;
296
297 if (skb->encapsulation) {
298 u32 gso_type = skb_shinfo(skb)->gso_type;
299 u32 tunnel_type = 0;
300 u32 l4_tunnel_len;
301
302 ip.hdr = skb_network_header(skb);
303 l4.hdr = skb_transport_header(skb);
304 network_hdr_len = skb_inner_network_header_len(skb);
305
306 if (ip.v4->version == 4) {
307 ip.v4->tot_len = 0;
308 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
309 } else if (ip.v4->version == 6) {
310 l3_offload = IPV6_PKT;
311 } else {
312 l3_offload = 0;
313 }
314
315 hinic_task_set_outter_l3(task, l3_offload,
316 skb_network_header_len(skb));
317
318 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
319 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
320 tunnel_type = TUNNEL_UDP_CSUM;
321 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
322 tunnel_type = TUNNEL_UDP_NO_CSUM;
323 }
324
325 l4_tunnel_len = skb_inner_network_offset(skb) -
326 skb_transport_offset(skb);
327 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
328
329 ip.hdr = skb_inner_network_header(skb);
330 l4.hdr = skb_inner_transport_header(skb);
331 } else {
332 ip.hdr = skb_network_header(skb);
333 l4.hdr = skb_transport_header(skb);
334 network_hdr_len = skb_network_header_len(skb);
335 }
336
337
338 if (ip.v4->version == 4)
339 ip.v4->tot_len = 0;
340 else
341 ip.v6->payload_len = 0;
342
343 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
344 &l4_proto);
345
346 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
347
348 ip_identify = 0;
349 if (l4_proto == IPPROTO_TCP)
350 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
351
352 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
353 &l4_len, &offset);
354
355 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
356 ip_identify, skb_shinfo(skb)->gso_size);
357
358 return 1;
359}
360
361static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
362 struct sk_buff *skb)
363{
364 enum hinic_l4_offload_type l4_offload;
365 u32 offset, l4_len, network_hdr_len;
366 enum hinic_l3_offload_type l3_type;
367 union hinic_l3 ip;
368 union hinic_l4 l4;
369 u8 l4_proto;
370
371 if (skb->ip_summed != CHECKSUM_PARTIAL)
372 return 0;
373
374 if (skb->encapsulation) {
375 u32 l4_tunnel_len;
376
377 ip.hdr = skb_network_header(skb);
378
379 if (ip.v4->version == 4)
380 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
381 else if (ip.v4->version == 6)
382 l3_type = IPV6_PKT;
383 else
384 l3_type = L3TYPE_UNKNOWN;
385
386 hinic_task_set_outter_l3(task, l3_type,
387 skb_network_header_len(skb));
388
389 l4_tunnel_len = skb_inner_network_offset(skb) -
390 skb_transport_offset(skb);
391
392 hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
393 l4_tunnel_len);
394
395 ip.hdr = skb_inner_network_header(skb);
396 l4.hdr = skb_inner_transport_header(skb);
397 network_hdr_len = skb_inner_network_header_len(skb);
398 } else {
399 ip.hdr = skb_network_header(skb);
400 l4.hdr = skb_transport_header(skb);
401 network_hdr_len = skb_network_header_len(skb);
402 }
403
404 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
405 &l4_proto);
406
407 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
408
409 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
410 &l4_len, &offset);
411
412 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
413
414 return 1;
415}
416
417static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
418 u32 *queue_info)
419{
420 enum hinic_offload_type offload = 0;
421 int enabled;
422
423 enabled = offload_tso(task, queue_info, skb);
424 if (enabled > 0) {
425 offload |= TX_OFFLOAD_TSO;
426 } else if (enabled == 0) {
427 enabled = offload_csum(task, queue_info, skb);
428 if (enabled)
429 offload |= TX_OFFLOAD_CSUM;
430 } else {
431 return -EPROTONOSUPPORT;
432 }
433
434 if (offload)
435 hinic_task_set_l2hdr(task, skb_network_offset(skb));
436
437
438 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
439 MAX_PAYLOAD_OFFSET) {
440 return -EPROTONOSUPPORT;
441 }
442
443
444 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
445 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
446 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
447 }
448
449 return 0;
450}
451
452netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
453{
454 struct hinic_dev *nic_dev = netdev_priv(netdev);
455 u16 prod_idx, q_id = skb->queue_mapping;
456 struct netdev_queue *netdev_txq;
457 int nr_sges, err = NETDEV_TX_OK;
458 struct hinic_sq_wqe *sq_wqe;
459 unsigned int wqe_size;
460 struct hinic_txq *txq;
461 struct hinic_qp *qp;
462
463 txq = &nic_dev->txqs[q_id];
464 qp = container_of(txq->sq, struct hinic_qp, sq);
465
466 if (skb->len < MIN_SKB_LEN) {
467 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
468 netdev_err(netdev, "Failed to pad skb\n");
469 goto update_error_stats;
470 }
471
472 skb->len = MIN_SKB_LEN;
473 }
474
475 nr_sges = skb_shinfo(skb)->nr_frags + 1;
476 if (nr_sges > txq->max_sges) {
477 netdev_err(netdev, "Too many Tx sges\n");
478 goto skb_error;
479 }
480
481 err = tx_map_skb(nic_dev, skb, txq->sges);
482 if (err)
483 goto skb_error;
484
485 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
486
487 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
488 if (!sq_wqe) {
489 netif_stop_subqueue(netdev, qp->q_id);
490
491
492
493
494 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
495 if (sq_wqe) {
496 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
497 goto process_sq_wqe;
498 }
499
500 tx_unmap_skb(nic_dev, skb, txq->sges);
501
502 u64_stats_update_begin(&txq->txq_stats.syncp);
503 txq->txq_stats.tx_busy++;
504 u64_stats_update_end(&txq->txq_stats.syncp);
505 err = NETDEV_TX_BUSY;
506 wqe_size = 0;
507 goto flush_skbs;
508 }
509
510process_sq_wqe:
511 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
512
513 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
514 if (err)
515 goto offload_error;
516
517 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
518
519flush_skbs:
520 netdev_txq = netdev_get_tx_queue(netdev, q_id);
521 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
522 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
523
524 return err;
525
526offload_error:
527 hinic_sq_return_wqe(txq->sq, wqe_size);
528 tx_unmap_skb(nic_dev, skb, txq->sges);
529
530skb_error:
531 dev_kfree_skb_any(skb);
532
533update_error_stats:
534 u64_stats_update_begin(&txq->txq_stats.syncp);
535 txq->txq_stats.tx_dropped++;
536 u64_stats_update_end(&txq->txq_stats.syncp);
537
538 return NETDEV_TX_OK;
539}
540
541
542
543
544
545
546
547static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
548 struct hinic_sge *sges)
549{
550 tx_unmap_skb(nic_dev, skb, sges);
551
552 dev_kfree_skb_any(skb);
553}
554
555
556
557
558
559static void free_all_tx_skbs(struct hinic_txq *txq)
560{
561 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
562 struct hinic_sq *sq = txq->sq;
563 struct hinic_sq_wqe *sq_wqe;
564 unsigned int wqe_size;
565 struct sk_buff *skb;
566 int nr_sges;
567 u16 ci;
568
569 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
570 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
571 if (!sq_wqe)
572 break;
573
574 nr_sges = skb_shinfo(skb)->nr_frags + 1;
575
576 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
577
578 hinic_sq_put_wqe(sq, wqe_size);
579
580 tx_free_skb(nic_dev, skb, txq->free_sges);
581 }
582}
583
584
585
586
587
588
589
590
591static int free_tx_poll(struct napi_struct *napi, int budget)
592{
593 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
594 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
595 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
596 struct netdev_queue *netdev_txq;
597 struct hinic_sq *sq = txq->sq;
598 struct hinic_wq *wq = sq->wq;
599 struct hinic_sq_wqe *sq_wqe;
600 unsigned int wqe_size;
601 int nr_sges, pkts = 0;
602 struct sk_buff *skb;
603 u64 tx_bytes = 0;
604 u16 hw_ci, sw_ci;
605
606 do {
607 hw_ci = HW_CONS_IDX(sq) & wq->mask;
608
609
610 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
611 if ((!sq_wqe) ||
612 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
613 break;
614
615
616
617
618 if (wqe_size > wq->wqebb_size) {
619 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
620 if (unlikely(!sq_wqe))
621 break;
622 }
623
624 tx_bytes += skb->len;
625 pkts++;
626
627 nr_sges = skb_shinfo(skb)->nr_frags + 1;
628
629 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
630
631 hinic_sq_put_wqe(sq, wqe_size);
632
633 tx_free_skb(nic_dev, skb, txq->free_sges);
634 } while (pkts < budget);
635
636 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
637 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
638 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
639
640 __netif_tx_lock(netdev_txq, smp_processor_id());
641
642 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
643
644 __netif_tx_unlock(netdev_txq);
645
646 u64_stats_update_begin(&txq->txq_stats.syncp);
647 txq->txq_stats.tx_wake++;
648 u64_stats_update_end(&txq->txq_stats.syncp);
649 }
650
651 u64_stats_update_begin(&txq->txq_stats.syncp);
652 txq->txq_stats.bytes += tx_bytes;
653 txq->txq_stats.pkts += pkts;
654 u64_stats_update_end(&txq->txq_stats.syncp);
655
656 if (pkts < budget) {
657 napi_complete(napi);
658 hinic_hwdev_set_msix_state(nic_dev->hwdev,
659 sq->msix_entry,
660 HINIC_MSIX_ENABLE);
661 return pkts;
662 }
663
664 return budget;
665}
666
667static void tx_napi_add(struct hinic_txq *txq, int weight)
668{
669 netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
670 napi_enable(&txq->napi);
671}
672
673static void tx_napi_del(struct hinic_txq *txq)
674{
675 napi_disable(&txq->napi);
676 netif_napi_del(&txq->napi);
677}
678
679static irqreturn_t tx_irq(int irq, void *data)
680{
681 struct hinic_txq *txq = data;
682 struct hinic_dev *nic_dev;
683
684 nic_dev = netdev_priv(txq->netdev);
685
686
687 hinic_hwdev_set_msix_state(nic_dev->hwdev,
688 txq->sq->msix_entry,
689 HINIC_MSIX_DISABLE);
690
691 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
692
693 napi_schedule(&txq->napi);
694 return IRQ_HANDLED;
695}
696
697static int tx_request_irq(struct hinic_txq *txq)
698{
699 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
700 struct hinic_hwdev *hwdev = nic_dev->hwdev;
701 struct hinic_hwif *hwif = hwdev->hwif;
702 struct pci_dev *pdev = hwif->pdev;
703 struct hinic_sq *sq = txq->sq;
704 int err;
705
706 tx_napi_add(txq, nic_dev->tx_weight);
707
708 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
709 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
710 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
711 TX_IRQ_NO_RESEND_TIMER);
712
713 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
714 if (err) {
715 dev_err(&pdev->dev, "Failed to request Tx irq\n");
716 tx_napi_del(txq);
717 return err;
718 }
719
720 return 0;
721}
722
723static void tx_free_irq(struct hinic_txq *txq)
724{
725 struct hinic_sq *sq = txq->sq;
726
727 free_irq(sq->irq, txq);
728 tx_napi_del(txq);
729}
730
731
732
733
734
735
736
737
738
739int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
740 struct net_device *netdev)
741{
742 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
743 struct hinic_dev *nic_dev = netdev_priv(netdev);
744 struct hinic_hwdev *hwdev = nic_dev->hwdev;
745 int err, irqname_len;
746 size_t sges_size;
747
748 txq->netdev = netdev;
749 txq->sq = sq;
750
751 txq_stats_init(txq);
752
753 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
754
755 sges_size = txq->max_sges * sizeof(*txq->sges);
756 txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
757 if (!txq->sges)
758 return -ENOMEM;
759
760 sges_size = txq->max_sges * sizeof(*txq->free_sges);
761 txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
762 if (!txq->free_sges) {
763 err = -ENOMEM;
764 goto err_alloc_free_sges;
765 }
766
767 irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
768 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
769 if (!txq->irq_name) {
770 err = -ENOMEM;
771 goto err_alloc_irqname;
772 }
773
774 sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
775
776 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
777 CI_UPDATE_NO_COALESC);
778 if (err)
779 goto err_hw_ci;
780
781 err = tx_request_irq(txq);
782 if (err) {
783 netdev_err(netdev, "Failed to request Tx irq\n");
784 goto err_req_tx_irq;
785 }
786
787 return 0;
788
789err_req_tx_irq:
790err_hw_ci:
791 devm_kfree(&netdev->dev, txq->irq_name);
792
793err_alloc_irqname:
794 devm_kfree(&netdev->dev, txq->free_sges);
795
796err_alloc_free_sges:
797 devm_kfree(&netdev->dev, txq->sges);
798 return err;
799}
800
801
802
803
804
805void hinic_clean_txq(struct hinic_txq *txq)
806{
807 struct net_device *netdev = txq->netdev;
808
809 tx_free_irq(txq);
810
811 free_all_tx_skbs(txq);
812
813 devm_kfree(&netdev->dev, txq->irq_name);
814 devm_kfree(&netdev->dev, txq->free_sges);
815 devm_kfree(&netdev->dev, txq->sges);
816}
817