1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <linux/etherdevice.h>
65#include <linux/ieee80211.h>
66#include <linux/slab.h>
67#include <linux/sched.h>
68#include <net/ip6_checksum.h>
69#include <net/tso.h>
70
71#include "iwl-debug.h"
72#include "iwl-csr.h"
73#include "iwl-prph.h"
74#include "iwl-io.h"
75#include "iwl-scd.h"
76#include "iwl-op-mode.h"
77#include "internal.h"
78#include "fw/api/tx.h"
79
80#define IWL_TX_CRC_SIZE 4
81#define IWL_TX_DELIMITER_SIZE 4
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
106{
107 unsigned int max;
108 unsigned int used;
109
110
111
112
113
114
115
116 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
117 max = q->n_window;
118 else
119 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
120
121
122
123
124
125 used = (q->write_ptr - q->read_ptr) &
126 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
127
128 if (WARN_ON(used > max))
129 return 0;
130
131 return max - used;
132}
133
134
135
136
137static int iwl_queue_init(struct iwl_txq *q, int slots_num)
138{
139 q->n_window = slots_num;
140
141
142
143 if (WARN_ON(!is_power_of_2(slots_num)))
144 return -EINVAL;
145
146 q->low_mark = q->n_window / 4;
147 if (q->low_mark < 4)
148 q->low_mark = 4;
149
150 q->high_mark = q->n_window / 8;
151 if (q->high_mark < 2)
152 q->high_mark = 2;
153
154 q->write_ptr = 0;
155 q->read_ptr = 0;
156
157 return 0;
158}
159
160int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
161 struct iwl_dma_ptr *ptr, size_t size)
162{
163 if (WARN_ON(ptr->addr))
164 return -EINVAL;
165
166 ptr->addr = dma_alloc_coherent(trans->dev, size,
167 &ptr->dma, GFP_KERNEL);
168 if (!ptr->addr)
169 return -ENOMEM;
170 ptr->size = size;
171 return 0;
172}
173
174void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
175{
176 if (unlikely(!ptr->addr))
177 return;
178
179 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
180 memset(ptr, 0, sizeof(*ptr));
181}
182
183static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
184{
185 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
186 struct iwl_trans *trans = txq->trans;
187
188 spin_lock(&txq->lock);
189
190 if (txq->read_ptr == txq->write_ptr) {
191 spin_unlock(&txq->lock);
192 return;
193 }
194 spin_unlock(&txq->lock);
195
196 iwl_trans_pcie_log_scd_error(trans, txq);
197
198 iwl_force_nmi(trans);
199}
200
201
202
203
204static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
205 struct iwl_txq *txq, u16 byte_cnt,
206 int num_tbs)
207{
208 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
210 int write_ptr = txq->write_ptr;
211 int txq_id = txq->id;
212 u8 sec_ctl = 0;
213 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
214 __le16 bc_ent;
215 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
216 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
217 u8 sta_id = tx_cmd->sta_id;
218
219 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
220
221 sec_ctl = tx_cmd->sec_ctl;
222
223 switch (sec_ctl & TX_CMD_SEC_MSK) {
224 case TX_CMD_SEC_CCM:
225 len += IEEE80211_CCMP_MIC_LEN;
226 break;
227 case TX_CMD_SEC_TKIP:
228 len += IEEE80211_TKIP_ICV_LEN;
229 break;
230 case TX_CMD_SEC_WEP:
231 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
232 break;
233 }
234 if (trans_pcie->bc_table_dword)
235 len = DIV_ROUND_UP(len, 4);
236
237 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
238 return;
239
240 bc_ent = cpu_to_le16(len | (sta_id << 12));
241
242 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
243
244 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
245 scd_bc_tbl[txq_id].
246 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
247}
248
249static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
250 struct iwl_txq *txq)
251{
252 struct iwl_trans_pcie *trans_pcie =
253 IWL_TRANS_GET_PCIE_TRANS(trans);
254 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
255 int txq_id = txq->id;
256 int read_ptr = txq->read_ptr;
257 u8 sta_id = 0;
258 __le16 bc_ent;
259 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
260 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
261
262 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
263
264 if (txq_id != trans->txqs.cmd.q_id)
265 sta_id = tx_cmd->sta_id;
266
267 bc_ent = cpu_to_le16(1 | (sta_id << 12));
268
269 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
270
271 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
272 scd_bc_tbl[txq_id].
273 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
274}
275
276
277
278
279static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
280 struct iwl_txq *txq)
281{
282 u32 reg = 0;
283 int txq_id = txq->id;
284
285 lockdep_assert_held(&txq->lock);
286
287
288
289
290
291
292
293 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
294 txq_id != trans->txqs.cmd.q_id &&
295 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
296
297
298
299
300
301 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
302
303 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
304 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
305 txq_id, reg);
306 iwl_set_bit(trans, CSR_GP_CNTRL,
307 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
308 txq->need_update = true;
309 return;
310 }
311 }
312
313
314
315
316
317 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
318 if (!txq->block)
319 iwl_write32(trans, HBUS_TARG_WRPTR,
320 txq->write_ptr | (txq_id << 8));
321}
322
323void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
324{
325 int i;
326
327 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
328 struct iwl_txq *txq = trans->txqs.txq[i];
329
330 if (!test_bit(i, trans->txqs.queue_used))
331 continue;
332
333 spin_lock_bh(&txq->lock);
334 if (txq->need_update) {
335 iwl_pcie_txq_inc_wr_ptr(trans, txq);
336 txq->need_update = false;
337 }
338 spin_unlock_bh(&txq->lock);
339 }
340}
341
342static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
343 void *_tfd, u8 idx)
344{
345
346 if (trans->trans_cfg->use_tfh) {
347 struct iwl_tfh_tfd *tfd = _tfd;
348 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
349
350 return (dma_addr_t)(le64_to_cpu(tb->addr));
351 } else {
352 struct iwl_tfd *tfd = _tfd;
353 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
354 dma_addr_t addr = get_unaligned_le32(&tb->lo);
355 dma_addr_t hi_len;
356
357 if (sizeof(dma_addr_t) <= sizeof(u32))
358 return addr;
359
360 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
361
362
363
364
365
366
367 return addr | ((hi_len << 16) << 16);
368 }
369}
370
371static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
372 u8 idx, dma_addr_t addr, u16 len)
373{
374 struct iwl_tfd *tfd_fh = (void *)tfd;
375 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
376
377 u16 hi_n_len = len << 4;
378
379 put_unaligned_le32(addr, &tb->lo);
380 hi_n_len |= iwl_get_dma_hi_addr(addr);
381
382 tb->hi_n_len = cpu_to_le16(hi_n_len);
383
384 tfd_fh->num_tbs = idx + 1;
385}
386
387static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
388{
389 if (trans->trans_cfg->use_tfh) {
390 struct iwl_tfh_tfd *tfd = _tfd;
391
392 return le16_to_cpu(tfd->num_tbs) & 0x1f;
393 } else {
394 struct iwl_tfd *tfd = _tfd;
395
396 return tfd->num_tbs & 0x1f;
397 }
398}
399
400static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
401 struct iwl_cmd_meta *meta,
402 struct iwl_txq *txq, int index)
403{
404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
405 int i, num_tbs;
406 void *tfd = iwl_pcie_get_tfd(trans, txq, index);
407
408
409 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
410
411 if (num_tbs > trans_pcie->max_tbs) {
412 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
413
414 return;
415 }
416
417
418
419 for (i = 1; i < num_tbs; i++) {
420 if (meta->tbs & BIT(i))
421 dma_unmap_page(trans->dev,
422 iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
423 iwl_pcie_tfd_tb_get_len(trans, tfd, i),
424 DMA_TO_DEVICE);
425 else
426 dma_unmap_single(trans->dev,
427 iwl_pcie_tfd_tb_get_addr(trans, tfd,
428 i),
429 iwl_pcie_tfd_tb_get_len(trans, tfd,
430 i),
431 DMA_TO_DEVICE);
432 }
433
434 meta->tbs = 0;
435
436 if (trans->trans_cfg->use_tfh) {
437 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
438
439 tfd_fh->num_tbs = 0;
440 } else {
441 struct iwl_tfd *tfd_fh = (void *)tfd;
442
443 tfd_fh->num_tbs = 0;
444 }
445
446}
447
448
449
450
451
452
453
454
455
456
457void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
458{
459
460
461
462 int rd_ptr = txq->read_ptr;
463 int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
464
465 lockdep_assert_held(&txq->lock);
466
467
468
469
470 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
471
472
473 if (txq->entries) {
474 struct sk_buff *skb;
475
476 skb = txq->entries[idx].skb;
477
478
479
480
481
482 if (skb) {
483 iwl_op_mode_free_skb(trans->op_mode, skb);
484 txq->entries[idx].skb = NULL;
485 }
486 }
487}
488
489static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
490 dma_addr_t addr, u16 len, bool reset)
491{
492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 void *tfd;
494 u32 num_tbs;
495
496 tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
497
498 if (reset)
499 memset(tfd, 0, trans_pcie->tfd_size);
500
501 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
502
503
504 if (num_tbs >= trans_pcie->max_tbs) {
505 IWL_ERR(trans, "Error can not send more than %d chunks\n",
506 trans_pcie->max_tbs);
507 return -EINVAL;
508 }
509
510 if (WARN(addr & ~IWL_TX_DMA_MASK,
511 "Unaligned address = %llx\n", (unsigned long long)addr))
512 return -EINVAL;
513
514 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
515
516 return num_tbs;
517}
518
519int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
520 int slots_num, bool cmd_queue)
521{
522 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
523 size_t tfd_sz = trans_pcie->tfd_size *
524 trans->trans_cfg->base_params->max_tfd_queue_size;
525 size_t tb0_buf_sz;
526 int i;
527
528 if (WARN_ON(txq->entries || txq->tfds))
529 return -EINVAL;
530
531 if (trans->trans_cfg->use_tfh)
532 tfd_sz = trans_pcie->tfd_size * slots_num;
533
534 timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
535 txq->trans = trans;
536
537 txq->n_window = slots_num;
538
539 txq->entries = kcalloc(slots_num,
540 sizeof(struct iwl_pcie_txq_entry),
541 GFP_KERNEL);
542
543 if (!txq->entries)
544 goto error;
545
546 if (cmd_queue)
547 for (i = 0; i < slots_num; i++) {
548 txq->entries[i].cmd =
549 kmalloc(sizeof(struct iwl_device_cmd),
550 GFP_KERNEL);
551 if (!txq->entries[i].cmd)
552 goto error;
553 }
554
555
556
557 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
558 &txq->dma_addr, GFP_KERNEL);
559 if (!txq->tfds)
560 goto error;
561
562 BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
563
564 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
565
566 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
567 &txq->first_tb_dma,
568 GFP_KERNEL);
569 if (!txq->first_tb_bufs)
570 goto err_free_tfds;
571
572 return 0;
573err_free_tfds:
574 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
575error:
576 if (txq->entries && cmd_queue)
577 for (i = 0; i < slots_num; i++)
578 kfree(txq->entries[i].cmd);
579 kfree(txq->entries);
580 txq->entries = NULL;
581
582 return -ENOMEM;
583
584}
585
586int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
587 int slots_num, bool cmd_queue)
588{
589 int ret;
590 u32 tfd_queue_max_size =
591 trans->trans_cfg->base_params->max_tfd_queue_size;
592
593 txq->need_update = false;
594
595
596
597 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
598 "Max tfd queue size must be a power of two, but is %d",
599 tfd_queue_max_size))
600 return -EINVAL;
601
602
603 ret = iwl_queue_init(txq, slots_num);
604 if (ret)
605 return ret;
606
607 spin_lock_init(&txq->lock);
608
609 if (cmd_queue) {
610 static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
611
612 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
613 }
614
615 __skb_queue_head_init(&txq->overflow_q);
616
617 return 0;
618}
619
620void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
621 struct sk_buff *skb)
622{
623 struct page **page_ptr;
624 struct page *next;
625
626 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
627 next = *page_ptr;
628 *page_ptr = NULL;
629
630 while (next) {
631 struct page *tmp = next;
632
633 next = *(void **)(page_address(next) + PAGE_SIZE -
634 sizeof(void *));
635 __free_page(tmp);
636 }
637}
638
639static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
640{
641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
642
643 lockdep_assert_held(&trans_pcie->reg_lock);
644
645 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
646 return;
647 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
648 return;
649
650 trans_pcie->cmd_hold_nic_awake = false;
651 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
652 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
653}
654
655
656
657
658static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
659{
660 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
661 struct iwl_txq *txq = trans->txqs.txq[txq_id];
662
663 spin_lock_bh(&txq->lock);
664 while (txq->write_ptr != txq->read_ptr) {
665 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
666 txq_id, txq->read_ptr);
667
668 if (txq_id != trans->txqs.cmd.q_id) {
669 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
670
671 if (WARN_ON_ONCE(!skb))
672 continue;
673
674 iwl_pcie_free_tso_page(trans_pcie, skb);
675 }
676 iwl_pcie_txq_free_tfd(trans, txq);
677 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
678
679 if (txq->read_ptr == txq->write_ptr) {
680 unsigned long flags;
681
682 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
683 if (txq_id == trans->txqs.cmd.q_id)
684 iwl_pcie_clear_cmd_in_flight(trans);
685 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
686 }
687 }
688
689 while (!skb_queue_empty(&txq->overflow_q)) {
690 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
691
692 iwl_op_mode_free_skb(trans->op_mode, skb);
693 }
694
695 spin_unlock_bh(&txq->lock);
696
697
698 iwl_wake_queue(trans, txq);
699}
700
701
702
703
704
705
706
707
708
709static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
710{
711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
712 struct iwl_txq *txq = trans->txqs.txq[txq_id];
713 struct device *dev = trans->dev;
714 int i;
715
716 if (WARN_ON(!txq))
717 return;
718
719 iwl_pcie_txq_unmap(trans, txq_id);
720
721
722 if (txq_id == trans->txqs.cmd.q_id)
723 for (i = 0; i < txq->n_window; i++) {
724 kfree_sensitive(txq->entries[i].cmd);
725 kfree_sensitive(txq->entries[i].free_buf);
726 }
727
728
729 if (txq->tfds) {
730 dma_free_coherent(dev,
731 trans_pcie->tfd_size *
732 trans->trans_cfg->base_params->max_tfd_queue_size,
733 txq->tfds, txq->dma_addr);
734 txq->dma_addr = 0;
735 txq->tfds = NULL;
736
737 dma_free_coherent(dev,
738 sizeof(*txq->first_tb_bufs) * txq->n_window,
739 txq->first_tb_bufs, txq->first_tb_dma);
740 }
741
742 kfree(txq->entries);
743 txq->entries = NULL;
744
745 del_timer_sync(&txq->stuck_timer);
746
747
748 memset(txq, 0, sizeof(*txq));
749}
750
751void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
752{
753 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
754 int nq = trans->trans_cfg->base_params->num_of_queues;
755 int chan;
756 u32 reg_val;
757 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
758 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
759
760
761 memset(trans->txqs.queue_stopped, 0,
762 sizeof(trans->txqs.queue_stopped));
763 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
764
765 trans_pcie->scd_base_addr =
766 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
767
768 WARN_ON(scd_base_addr != 0 &&
769 scd_base_addr != trans_pcie->scd_base_addr);
770
771
772 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
773 SCD_CONTEXT_MEM_LOWER_BOUND,
774 NULL, clear_dwords);
775
776 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
777 trans_pcie->scd_bc_tbls.dma >> 10);
778
779
780
781
782 if (trans->trans_cfg->base_params->scd_chain_ext_wa)
783 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
784
785 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
786 trans->txqs.cmd.fifo,
787 trans->txqs.cmd.wdg_timeout);
788
789
790 iwl_scd_activate_fifos(trans);
791
792
793 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
794 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
795 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
796 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
797
798
799 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
800 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
801 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
802
803
804 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
805 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
806 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
807}
808
809void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
810{
811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812 int txq_id;
813
814
815
816
817
818 if (WARN_ON_ONCE(trans->trans_cfg->gen2))
819 return;
820
821 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
822 txq_id++) {
823 struct iwl_txq *txq = trans->txqs.txq[txq_id];
824 if (trans->trans_cfg->use_tfh)
825 iwl_write_direct64(trans,
826 FH_MEM_CBBC_QUEUE(trans, txq_id),
827 txq->dma_addr);
828 else
829 iwl_write_direct32(trans,
830 FH_MEM_CBBC_QUEUE(trans, txq_id),
831 txq->dma_addr >> 8);
832 iwl_pcie_txq_unmap(trans, txq_id);
833 txq->read_ptr = 0;
834 txq->write_ptr = 0;
835 }
836
837
838 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
839 trans_pcie->kw.dma >> 4);
840
841
842
843
844
845
846 iwl_pcie_tx_start(trans, 0);
847}
848
849static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
850{
851 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
852 unsigned long flags;
853 int ch, ret;
854 u32 mask = 0;
855
856 spin_lock(&trans_pcie->irq_lock);
857
858 if (!iwl_trans_grab_nic_access(trans, &flags))
859 goto out;
860
861
862 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
863 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
864 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
865 }
866
867
868 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
869 if (ret < 0)
870 IWL_ERR(trans,
871 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
872 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
873
874 iwl_trans_release_nic_access(trans, &flags);
875
876out:
877 spin_unlock(&trans_pcie->irq_lock);
878}
879
880
881
882
883int iwl_pcie_tx_stop(struct iwl_trans *trans)
884{
885 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
886 int txq_id;
887
888
889 iwl_scd_deactivate_fifos(trans);
890
891
892 iwl_pcie_tx_stop_fh(trans);
893
894
895
896
897
898
899 memset(trans->txqs.queue_stopped, 0,
900 sizeof(trans->txqs.queue_stopped));
901 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
902
903
904 if (!trans_pcie->txq_memory)
905 return 0;
906
907
908 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
909 txq_id++)
910 iwl_pcie_txq_unmap(trans, txq_id);
911
912 return 0;
913}
914
915
916
917
918
919
920void iwl_pcie_tx_free(struct iwl_trans *trans)
921{
922 int txq_id;
923 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
924
925 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
926
927
928 if (trans_pcie->txq_memory) {
929 for (txq_id = 0;
930 txq_id < trans->trans_cfg->base_params->num_of_queues;
931 txq_id++) {
932 iwl_pcie_txq_free(trans, txq_id);
933 trans->txqs.txq[txq_id] = NULL;
934 }
935 }
936
937 kfree(trans_pcie->txq_memory);
938 trans_pcie->txq_memory = NULL;
939
940 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
941
942 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
943}
944
945
946
947
948
949static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
950{
951 int ret;
952 int txq_id, slots_num;
953 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
954 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
955
956 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
957 return -EINVAL;
958
959 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
960
961
962
963 if (WARN_ON(trans_pcie->txq_memory)) {
964 ret = -EINVAL;
965 goto error;
966 }
967
968 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
969 bc_tbls_size);
970 if (ret) {
971 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
972 goto error;
973 }
974
975
976 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
977 if (ret) {
978 IWL_ERR(trans, "Keep Warm allocation failed\n");
979 goto error;
980 }
981
982 trans_pcie->txq_memory =
983 kcalloc(trans->trans_cfg->base_params->num_of_queues,
984 sizeof(struct iwl_txq), GFP_KERNEL);
985 if (!trans_pcie->txq_memory) {
986 IWL_ERR(trans, "Not enough memory for txq\n");
987 ret = -ENOMEM;
988 goto error;
989 }
990
991
992 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
993 txq_id++) {
994 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
995
996 if (cmd_queue)
997 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
998 trans->cfg->min_txq_size);
999 else
1000 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
1001 trans->cfg->min_256_ba_txq_size);
1002 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
1003 ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
1004 slots_num, cmd_queue);
1005 if (ret) {
1006 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1007 goto error;
1008 }
1009 trans->txqs.txq[txq_id]->id = txq_id;
1010 }
1011
1012 return 0;
1013
1014error:
1015 iwl_pcie_tx_free(trans);
1016
1017 return ret;
1018}
1019
1020int iwl_pcie_tx_init(struct iwl_trans *trans)
1021{
1022 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1023 int ret;
1024 int txq_id, slots_num;
1025 bool alloc = false;
1026
1027 if (!trans_pcie->txq_memory) {
1028 ret = iwl_pcie_tx_alloc(trans);
1029 if (ret)
1030 goto error;
1031 alloc = true;
1032 }
1033
1034 spin_lock(&trans_pcie->irq_lock);
1035
1036
1037 iwl_scd_deactivate_fifos(trans);
1038
1039
1040 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
1041 trans_pcie->kw.dma >> 4);
1042
1043 spin_unlock(&trans_pcie->irq_lock);
1044
1045
1046 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
1047 txq_id++) {
1048 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
1049
1050 if (cmd_queue)
1051 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
1052 trans->cfg->min_txq_size);
1053 else
1054 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
1055 trans->cfg->min_256_ba_txq_size);
1056 ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
1057 slots_num, cmd_queue);
1058 if (ret) {
1059 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1060 goto error;
1061 }
1062
1063
1064
1065
1066
1067
1068
1069 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
1070 trans->txqs.txq[txq_id]->dma_addr >> 8);
1071 }
1072
1073 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1074 if (trans->trans_cfg->base_params->num_of_queues > 20)
1075 iwl_set_bits_prph(trans, SCD_GP_CTRL,
1076 SCD_GP_CTRL_ENABLE_31_QUEUES);
1077
1078 return 0;
1079error:
1080
1081 if (alloc)
1082 iwl_pcie_tx_free(trans);
1083 return ret;
1084}
1085
1086static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
1087{
1088 lockdep_assert_held(&txq->lock);
1089
1090 if (!txq->wd_timeout)
1091 return;
1092
1093
1094
1095
1096
1097 if (txq->frozen)
1098 return;
1099
1100
1101
1102
1103
1104 if (txq->read_ptr == txq->write_ptr)
1105 del_timer(&txq->stuck_timer);
1106 else
1107 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1108}
1109
1110
1111void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1112 struct sk_buff_head *skbs)
1113{
1114 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1115 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1116 int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
1117 int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1118 int last_to_free;
1119
1120
1121 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1122 return;
1123
1124 spin_lock_bh(&txq->lock);
1125
1126 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1127 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1128 txq_id, ssn);
1129 goto out;
1130 }
1131
1132 if (read_ptr == tfd_num)
1133 goto out;
1134
1135 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1136 txq_id, txq->read_ptr, tfd_num, ssn);
1137
1138
1139
1140 last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
1141
1142 if (!iwl_queue_used(txq, last_to_free)) {
1143 IWL_ERR(trans,
1144 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1145 __func__, txq_id, last_to_free,
1146 trans->trans_cfg->base_params->max_tfd_queue_size,
1147 txq->write_ptr, txq->read_ptr);
1148 goto out;
1149 }
1150
1151 if (WARN_ON(!skb_queue_empty(skbs)))
1152 goto out;
1153
1154 for (;
1155 read_ptr != tfd_num;
1156 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
1157 read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
1158 struct sk_buff *skb = txq->entries[read_ptr].skb;
1159
1160 if (WARN_ON_ONCE(!skb))
1161 continue;
1162
1163 iwl_pcie_free_tso_page(trans_pcie, skb);
1164
1165 __skb_queue_tail(skbs, skb);
1166
1167 txq->entries[read_ptr].skb = NULL;
1168
1169 if (!trans->trans_cfg->use_tfh)
1170 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1171
1172 iwl_pcie_txq_free_tfd(trans, txq);
1173 }
1174
1175 iwl_pcie_txq_progress(txq);
1176
1177 if (iwl_queue_space(trans, txq) > txq->low_mark &&
1178 test_bit(txq_id, trans->txqs.queue_stopped)) {
1179 struct sk_buff_head overflow_skbs;
1180
1181 __skb_queue_head_init(&overflow_skbs);
1182 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1183
1184
1185
1186
1187
1188
1189
1190
1191 txq->overflow_tx = true;
1192
1193
1194
1195
1196
1197
1198
1199
1200 spin_unlock_bh(&txq->lock);
1201
1202 while (!skb_queue_empty(&overflow_skbs)) {
1203 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1204 struct iwl_device_tx_cmd *dev_cmd_ptr;
1205
1206 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1207 trans_pcie->dev_cmd_offs);
1208
1209
1210
1211
1212
1213
1214 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1215 }
1216
1217 if (iwl_queue_space(trans, txq) > txq->low_mark)
1218 iwl_wake_queue(trans, txq);
1219
1220 spin_lock_bh(&txq->lock);
1221 txq->overflow_tx = false;
1222 }
1223
1224out:
1225 spin_unlock_bh(&txq->lock);
1226}
1227
1228
1229void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1230{
1231 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1232
1233 spin_lock_bh(&txq->lock);
1234
1235 txq->write_ptr = ptr;
1236 txq->read_ptr = txq->write_ptr;
1237
1238 spin_unlock_bh(&txq->lock);
1239}
1240
1241static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1242 const struct iwl_host_cmd *cmd)
1243{
1244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1245 int ret;
1246
1247 lockdep_assert_held(&trans_pcie->reg_lock);
1248
1249
1250 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1251 return -ENODEV;
1252
1253
1254
1255
1256
1257
1258
1259 if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
1260 !trans_pcie->cmd_hold_nic_awake) {
1261 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1262 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1263
1264 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1265 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1266 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1267 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1268 15000);
1269 if (ret < 0) {
1270 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1271 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1272 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1273 return -EIO;
1274 }
1275 trans_pcie->cmd_hold_nic_awake = true;
1276 }
1277
1278 return 0;
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1289{
1290 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1291 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1292 unsigned long flags;
1293 int nfreed = 0;
1294 u16 r;
1295
1296 lockdep_assert_held(&txq->lock);
1297
1298 idx = iwl_pcie_get_cmd_index(txq, idx);
1299 r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1300
1301 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
1302 (!iwl_queue_used(txq, idx))) {
1303 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
1304 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1305 __func__, txq_id, idx,
1306 trans->trans_cfg->base_params->max_tfd_queue_size,
1307 txq->write_ptr, txq->read_ptr);
1308 return;
1309 }
1310
1311 for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
1312 r = iwl_queue_inc_wrap(trans, r)) {
1313 txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1314
1315 if (nfreed++ > 0) {
1316 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1317 idx, txq->write_ptr, r);
1318 iwl_force_nmi(trans);
1319 }
1320 }
1321
1322 if (txq->read_ptr == txq->write_ptr) {
1323 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1324 iwl_pcie_clear_cmd_in_flight(trans);
1325 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1326 }
1327
1328 iwl_pcie_txq_progress(txq);
1329}
1330
1331static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1332 u16 txq_id)
1333{
1334 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1335 u32 tbl_dw_addr;
1336 u32 tbl_dw;
1337 u16 scd_q2ratid;
1338
1339 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1340
1341 tbl_dw_addr = trans_pcie->scd_base_addr +
1342 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1343
1344 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1345
1346 if (txq_id & 0x1)
1347 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1348 else
1349 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1350
1351 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1352
1353 return 0;
1354}
1355
1356
1357
1358#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1359
1360bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1361 const struct iwl_trans_txq_scd_cfg *cfg,
1362 unsigned int wdg_timeout)
1363{
1364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1365 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1366 int fifo = -1;
1367 bool scd_bug = false;
1368
1369 if (test_and_set_bit(txq_id, trans->txqs.queue_used))
1370 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1371
1372 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1373
1374 if (cfg) {
1375 fifo = cfg->fifo;
1376
1377
1378 if (txq_id == trans->txqs.cmd.q_id &&
1379 trans_pcie->scd_set_active)
1380 iwl_scd_enable_set_active(trans, 0);
1381
1382
1383 iwl_scd_txq_set_inactive(trans, txq_id);
1384
1385
1386 if (txq_id != trans->txqs.cmd.q_id)
1387 iwl_scd_txq_set_chain(trans, txq_id);
1388
1389 if (cfg->aggregate) {
1390 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1391
1392
1393 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1394
1395
1396 iwl_scd_txq_enable_agg(trans, txq_id);
1397 txq->ampdu = true;
1398 } else {
1399
1400
1401
1402
1403
1404 iwl_scd_txq_disable_agg(trans, txq_id);
1405
1406 ssn = txq->read_ptr;
1407 }
1408 } else {
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420 scd_bug = !trans->trans_cfg->mq_rx_supported &&
1421 !((ssn - txq->write_ptr) & 0x3f) &&
1422 (ssn != txq->write_ptr);
1423 if (scd_bug)
1424 ssn++;
1425 }
1426
1427
1428
1429 txq->read_ptr = (ssn & 0xff);
1430 txq->write_ptr = (ssn & 0xff);
1431 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1432 (ssn & 0xff) | (txq_id << 8));
1433
1434 if (cfg) {
1435 u8 frame_limit = cfg->frame_limit;
1436
1437 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1438
1439
1440 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1441 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1442 iwl_trans_write_mem32(trans,
1443 trans_pcie->scd_base_addr +
1444 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1445 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
1446 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1447
1448
1449 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1450 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1451 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1452 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1453 SCD_QUEUE_STTS_REG_MSK);
1454
1455
1456 if (txq_id == trans->txqs.cmd.q_id &&
1457 trans_pcie->scd_set_active)
1458 iwl_scd_enable_set_active(trans, BIT(txq_id));
1459
1460 IWL_DEBUG_TX_QUEUES(trans,
1461 "Activate queue %d on FIFO %d WrPtr: %d\n",
1462 txq_id, fifo, ssn & 0xff);
1463 } else {
1464 IWL_DEBUG_TX_QUEUES(trans,
1465 "Activate queue %d WrPtr: %d\n",
1466 txq_id, ssn & 0xff);
1467 }
1468
1469 return scd_bug;
1470}
1471
1472void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1473 bool shared_mode)
1474{
1475 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1476
1477 txq->ampdu = !shared_mode;
1478}
1479
1480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1481 bool configure_scd)
1482{
1483 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1484 u32 stts_addr = trans_pcie->scd_base_addr +
1485 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1486 static const u32 zero_val[4] = {};
1487
1488 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
1489 trans->txqs.txq[txq_id]->frozen = false;
1490
1491
1492
1493
1494
1495
1496
1497 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
1498 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1499 "queue %d not used", txq_id);
1500 return;
1501 }
1502
1503 if (configure_scd) {
1504 iwl_scd_txq_set_inactive(trans, txq_id);
1505
1506 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1507 ARRAY_SIZE(zero_val));
1508 }
1509
1510 iwl_pcie_txq_unmap(trans, txq_id);
1511 trans->txqs.txq[txq_id]->ampdu = false;
1512
1513 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1528 struct iwl_host_cmd *cmd)
1529{
1530 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1531 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1532 struct iwl_device_cmd *out_cmd;
1533 struct iwl_cmd_meta *out_meta;
1534 unsigned long flags;
1535 void *dup_buf = NULL;
1536 dma_addr_t phys_addr;
1537 int idx;
1538 u16 copy_size, cmd_size, tb0_size;
1539 bool had_nocopy = false;
1540 u8 group_id = iwl_cmd_groupid(cmd->id);
1541 int i, ret;
1542 u32 cmd_pos;
1543 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1544 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1545
1546 if (WARN(!trans->wide_cmd_header &&
1547 group_id > IWL_ALWAYS_LONG_GROUP,
1548 "unsupported wide command %#x\n", cmd->id))
1549 return -EINVAL;
1550
1551 if (group_id != 0) {
1552 copy_size = sizeof(struct iwl_cmd_header_wide);
1553 cmd_size = sizeof(struct iwl_cmd_header_wide);
1554 } else {
1555 copy_size = sizeof(struct iwl_cmd_header);
1556 cmd_size = sizeof(struct iwl_cmd_header);
1557 }
1558
1559
1560 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1561
1562 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1563 cmddata[i] = cmd->data[i];
1564 cmdlen[i] = cmd->len[i];
1565
1566 if (!cmd->len[i])
1567 continue;
1568
1569
1570 if (copy_size < IWL_FIRST_TB_SIZE) {
1571 int copy = IWL_FIRST_TB_SIZE - copy_size;
1572
1573 if (copy > cmdlen[i])
1574 copy = cmdlen[i];
1575 cmdlen[i] -= copy;
1576 cmddata[i] += copy;
1577 copy_size += copy;
1578 }
1579
1580 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1581 had_nocopy = true;
1582 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1583 idx = -EINVAL;
1584 goto free_dup_buf;
1585 }
1586 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1587
1588
1589
1590
1591 had_nocopy = true;
1592
1593
1594 if (WARN_ON(dup_buf)) {
1595 idx = -EINVAL;
1596 goto free_dup_buf;
1597 }
1598
1599 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1600 GFP_ATOMIC);
1601 if (!dup_buf)
1602 return -ENOMEM;
1603 } else {
1604
1605 if (WARN_ON(had_nocopy)) {
1606 idx = -EINVAL;
1607 goto free_dup_buf;
1608 }
1609 copy_size += cmdlen[i];
1610 }
1611 cmd_size += cmd->len[i];
1612 }
1613
1614
1615
1616
1617
1618
1619
1620 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1621 "Command %s (%#x) is too large (%d bytes)\n",
1622 iwl_get_cmd_string(trans, cmd->id),
1623 cmd->id, copy_size)) {
1624 idx = -EINVAL;
1625 goto free_dup_buf;
1626 }
1627
1628 spin_lock_bh(&txq->lock);
1629
1630 if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1631 spin_unlock_bh(&txq->lock);
1632
1633 IWL_ERR(trans, "No space in command queue\n");
1634 iwl_op_mode_cmd_queue_full(trans->op_mode);
1635 idx = -ENOSPC;
1636 goto free_dup_buf;
1637 }
1638
1639 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
1640 out_cmd = txq->entries[idx].cmd;
1641 out_meta = &txq->entries[idx].meta;
1642
1643 memset(out_meta, 0, sizeof(*out_meta));
1644 if (cmd->flags & CMD_WANT_SKB)
1645 out_meta->source = cmd;
1646
1647
1648 if (group_id != 0) {
1649 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1650 out_cmd->hdr_wide.group_id = group_id;
1651 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1652 out_cmd->hdr_wide.length =
1653 cpu_to_le16(cmd_size -
1654 sizeof(struct iwl_cmd_header_wide));
1655 out_cmd->hdr_wide.reserved = 0;
1656 out_cmd->hdr_wide.sequence =
1657 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1658 INDEX_TO_SEQ(txq->write_ptr));
1659
1660 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1661 copy_size = sizeof(struct iwl_cmd_header_wide);
1662 } else {
1663 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1664 out_cmd->hdr.sequence =
1665 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
1666 INDEX_TO_SEQ(txq->write_ptr));
1667 out_cmd->hdr.group_id = 0;
1668
1669 cmd_pos = sizeof(struct iwl_cmd_header);
1670 copy_size = sizeof(struct iwl_cmd_header);
1671 }
1672
1673
1674 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1675 int copy;
1676
1677 if (!cmd->len[i])
1678 continue;
1679
1680
1681 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1682 IWL_HCMD_DFL_DUP))) {
1683 copy = cmd->len[i];
1684
1685 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1686 cmd_pos += copy;
1687 copy_size += copy;
1688 continue;
1689 }
1690
1691
1692
1693
1694
1695
1696 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1697
1698 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1699 cmd_pos += copy;
1700
1701
1702 if (copy_size < IWL_FIRST_TB_SIZE) {
1703 copy = IWL_FIRST_TB_SIZE - copy_size;
1704
1705 if (copy > cmd->len[i])
1706 copy = cmd->len[i];
1707 copy_size += copy;
1708 }
1709 }
1710
1711 IWL_DEBUG_HC(trans,
1712 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1713 iwl_get_cmd_string(trans, cmd->id),
1714 group_id, out_cmd->hdr.cmd,
1715 le16_to_cpu(out_cmd->hdr.sequence),
1716 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
1717
1718
1719 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1720 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1721 iwl_pcie_txq_build_tfd(trans, txq,
1722 iwl_pcie_get_first_tb_dma(txq, idx),
1723 tb0_size, true);
1724
1725
1726 if (copy_size > tb0_size) {
1727 phys_addr = dma_map_single(trans->dev,
1728 ((u8 *)&out_cmd->hdr) + tb0_size,
1729 copy_size - tb0_size,
1730 DMA_TO_DEVICE);
1731 if (dma_mapping_error(trans->dev, phys_addr)) {
1732 iwl_pcie_tfd_unmap(trans, out_meta, txq,
1733 txq->write_ptr);
1734 idx = -ENOMEM;
1735 goto out;
1736 }
1737
1738 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1739 copy_size - tb0_size, false);
1740 }
1741
1742
1743 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1744 const void *data = cmddata[i];
1745
1746 if (!cmdlen[i])
1747 continue;
1748 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1749 IWL_HCMD_DFL_DUP)))
1750 continue;
1751 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1752 data = dup_buf;
1753 phys_addr = dma_map_single(trans->dev, (void *)data,
1754 cmdlen[i], DMA_TO_DEVICE);
1755 if (dma_mapping_error(trans->dev, phys_addr)) {
1756 iwl_pcie_tfd_unmap(trans, out_meta, txq,
1757 txq->write_ptr);
1758 idx = -ENOMEM;
1759 goto out;
1760 }
1761
1762 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1763 }
1764
1765 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1766 out_meta->flags = cmd->flags;
1767 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1768 kfree_sensitive(txq->entries[idx].free_buf);
1769 txq->entries[idx].free_buf = dup_buf;
1770
1771 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1772
1773
1774 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1775 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1776
1777 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1778 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1779 if (ret < 0) {
1780 idx = ret;
1781 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1782 goto out;
1783 }
1784
1785
1786 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
1787 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1788
1789 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1790
1791 out:
1792 spin_unlock_bh(&txq->lock);
1793 free_dup_buf:
1794 if (idx < 0)
1795 kfree(dup_buf);
1796 return idx;
1797}
1798
1799
1800
1801
1802
1803void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1804 struct iwl_rx_cmd_buffer *rxb)
1805{
1806 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1807 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1808 u8 group_id;
1809 u32 cmd_id;
1810 int txq_id = SEQ_TO_QUEUE(sequence);
1811 int index = SEQ_TO_INDEX(sequence);
1812 int cmd_index;
1813 struct iwl_device_cmd *cmd;
1814 struct iwl_cmd_meta *meta;
1815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1816 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1817
1818
1819
1820
1821 if (WARN(txq_id != trans->txqs.cmd.q_id,
1822 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1823 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1824 txq->write_ptr)) {
1825 iwl_print_hex_error(trans, pkt, 32);
1826 return;
1827 }
1828
1829 spin_lock_bh(&txq->lock);
1830
1831 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1832 cmd = txq->entries[cmd_index].cmd;
1833 meta = &txq->entries[cmd_index].meta;
1834 group_id = cmd->hdr.group_id;
1835 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1836
1837 iwl_pcie_tfd_unmap(trans, meta, txq, index);
1838
1839
1840 if (meta->flags & CMD_WANT_SKB) {
1841 struct page *p = rxb_steal_page(rxb);
1842
1843 meta->source->resp_pkt = pkt;
1844 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1845 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1846 }
1847
1848 if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1849 iwl_op_mode_async_cb(trans->op_mode, cmd);
1850
1851 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1852
1853 if (!(meta->flags & CMD_ASYNC)) {
1854 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1855 IWL_WARN(trans,
1856 "HCMD_ACTIVE already clear for command %s\n",
1857 iwl_get_cmd_string(trans, cmd_id));
1858 }
1859 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1860 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1861 iwl_get_cmd_string(trans, cmd_id));
1862 wake_up(&trans_pcie->wait_command_queue);
1863 }
1864
1865 meta->flags = 0;
1866
1867 spin_unlock_bh(&txq->lock);
1868}
1869
1870#define HOST_COMPLETE_TIMEOUT (2 * HZ)
1871
1872static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1873 struct iwl_host_cmd *cmd)
1874{
1875 int ret;
1876
1877
1878 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1879 return -EINVAL;
1880
1881 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1882 if (ret < 0) {
1883 IWL_ERR(trans,
1884 "Error sending %s: enqueue_hcmd failed: %d\n",
1885 iwl_get_cmd_string(trans, cmd->id), ret);
1886 return ret;
1887 }
1888 return 0;
1889}
1890
1891static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1892 struct iwl_host_cmd *cmd)
1893{
1894 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1895 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1896 int cmd_idx;
1897 int ret;
1898
1899 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1900 iwl_get_cmd_string(trans, cmd->id));
1901
1902 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1903 &trans->status),
1904 "Command %s: a command is already active!\n",
1905 iwl_get_cmd_string(trans, cmd->id)))
1906 return -EIO;
1907
1908 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1909 iwl_get_cmd_string(trans, cmd->id));
1910
1911 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1912 if (cmd_idx < 0) {
1913 ret = cmd_idx;
1914 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1915 IWL_ERR(trans,
1916 "Error sending %s: enqueue_hcmd failed: %d\n",
1917 iwl_get_cmd_string(trans, cmd->id), ret);
1918 return ret;
1919 }
1920
1921 ret = wait_event_timeout(trans_pcie->wait_command_queue,
1922 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1923 &trans->status),
1924 HOST_COMPLETE_TIMEOUT);
1925 if (!ret) {
1926 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1927 iwl_get_cmd_string(trans, cmd->id),
1928 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1929
1930 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1931 txq->read_ptr, txq->write_ptr);
1932
1933 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1934 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1935 iwl_get_cmd_string(trans, cmd->id));
1936 ret = -ETIMEDOUT;
1937
1938 iwl_trans_pcie_sync_nmi(trans);
1939 goto cancel;
1940 }
1941
1942 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1943 iwl_trans_pcie_dump_regs(trans);
1944 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1945 iwl_get_cmd_string(trans, cmd->id));
1946 dump_stack();
1947 ret = -EIO;
1948 goto cancel;
1949 }
1950
1951 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1952 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1953 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1954 ret = -ERFKILL;
1955 goto cancel;
1956 }
1957
1958 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1959 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1960 iwl_get_cmd_string(trans, cmd->id));
1961 ret = -EIO;
1962 goto cancel;
1963 }
1964
1965 return 0;
1966
1967cancel:
1968 if (cmd->flags & CMD_WANT_SKB) {
1969
1970
1971
1972
1973
1974
1975 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1976 }
1977
1978 if (cmd->resp_pkt) {
1979 iwl_free_resp(cmd);
1980 cmd->resp_pkt = NULL;
1981 }
1982
1983 return ret;
1984}
1985
1986int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1987{
1988
1989 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1990 return -ENODEV;
1991
1992 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1993 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1994 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1995 cmd->id);
1996 return -ERFKILL;
1997 }
1998
1999 if (cmd->flags & CMD_ASYNC)
2000 return iwl_pcie_send_hcmd_async(trans, cmd);
2001
2002
2003 return iwl_pcie_send_hcmd_sync(trans, cmd);
2004}
2005
2006static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
2007 struct iwl_txq *txq, u8 hdr_len,
2008 struct iwl_cmd_meta *out_meta)
2009{
2010 u16 head_tb_len;
2011 int i;
2012
2013
2014
2015
2016
2017 head_tb_len = skb_headlen(skb) - hdr_len;
2018
2019 if (head_tb_len > 0) {
2020 dma_addr_t tb_phys = dma_map_single(trans->dev,
2021 skb->data + hdr_len,
2022 head_tb_len, DMA_TO_DEVICE);
2023 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
2024 return -EINVAL;
2025 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
2026 tb_phys, head_tb_len);
2027 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
2028 }
2029
2030
2031 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2032 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2033 dma_addr_t tb_phys;
2034 int tb_idx;
2035
2036 if (!skb_frag_size(frag))
2037 continue;
2038
2039 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
2040 skb_frag_size(frag), DMA_TO_DEVICE);
2041
2042 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
2043 return -EINVAL;
2044 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
2045 tb_phys, skb_frag_size(frag));
2046 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2047 skb_frag_size(frag), false);
2048 if (tb_idx < 0)
2049 return tb_idx;
2050
2051 out_meta->tbs |= BIT(tb_idx);
2052 }
2053
2054 return 0;
2055}
2056
2057#ifdef CONFIG_INET
2058struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
2059 struct sk_buff *skb)
2060{
2061 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2062 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
2063 struct page **page_ptr;
2064
2065 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
2066
2067 if (WARN_ON(*page_ptr))
2068 return NULL;
2069
2070 if (!p->page)
2071 goto alloc;
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
2084 sizeof(void *))
2085 goto out;
2086
2087
2088 __free_page(p->page);
2089
2090alloc:
2091 p->page = alloc_page(GFP_ATOMIC);
2092 if (!p->page)
2093 return NULL;
2094 p->pos = page_address(p->page);
2095
2096 *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
2097out:
2098 *page_ptr = p->page;
2099 get_page(p->page);
2100 return p;
2101}
2102
2103static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
2104 bool ipv6, unsigned int len)
2105{
2106 if (ipv6) {
2107 struct ipv6hdr *iphv6 = iph;
2108
2109 tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
2110 len + tcph->doff * 4,
2111 IPPROTO_TCP, 0);
2112 } else {
2113 struct iphdr *iphv4 = iph;
2114
2115 ip_send_check(iphv4);
2116 tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
2117 len + tcph->doff * 4,
2118 IPPROTO_TCP, 0);
2119 }
2120}
2121
2122static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2123 struct iwl_txq *txq, u8 hdr_len,
2124 struct iwl_cmd_meta *out_meta,
2125 struct iwl_device_tx_cmd *dev_cmd,
2126 u16 tb1_len)
2127{
2128 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2129 struct iwl_trans_pcie *trans_pcie =
2130 IWL_TRANS_GET_PCIE_TRANS(txq->trans);
2131 struct ieee80211_hdr *hdr = (void *)skb->data;
2132 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
2133 unsigned int mss = skb_shinfo(skb)->gso_size;
2134 u16 length, iv_len, amsdu_pad;
2135 u8 *start_hdr;
2136 struct iwl_tso_hdr_page *hdr_page;
2137 struct tso_t tso;
2138
2139
2140 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
2141 iv_len = ieee80211_has_protected(hdr->frame_control) ?
2142 IEEE80211_CCMP_HDR_LEN : 0;
2143
2144 trace_iwlwifi_dev_tx(trans->dev, skb,
2145 iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2146 trans_pcie->tfd_size,
2147 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2148
2149 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
2150 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
2151 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
2152 amsdu_pad = 0;
2153
2154
2155 hdr_room = DIV_ROUND_UP(total_len, mss) *
2156 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
2157
2158
2159 hdr_page = get_page_hdr(trans, hdr_room, skb);
2160 if (!hdr_page)
2161 return -ENOMEM;
2162
2163 start_hdr = hdr_page->pos;
2164 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
2165 hdr_page->pos += iv_len;
2166
2167
2168
2169
2170
2171 skb_pull(skb, hdr_len + iv_len);
2172
2173
2174
2175
2176
2177
2178 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
2179
2180 tso_start(skb, &tso);
2181
2182 while (total_len) {
2183
2184 unsigned int data_left =
2185 min_t(unsigned int, mss, total_len);
2186 struct sk_buff *csum_skb = NULL;
2187 unsigned int hdr_tb_len;
2188 dma_addr_t hdr_tb_phys;
2189 struct tcphdr *tcph;
2190 u8 *iph, *subf_hdrs_start = hdr_page->pos;
2191
2192 total_len -= data_left;
2193
2194 memset(hdr_page->pos, 0, amsdu_pad);
2195 hdr_page->pos += amsdu_pad;
2196 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
2197 data_left)) & 0x3;
2198 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
2199 hdr_page->pos += ETH_ALEN;
2200 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
2201 hdr_page->pos += ETH_ALEN;
2202
2203 length = snap_ip_tcp_hdrlen + data_left;
2204 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
2205 hdr_page->pos += sizeof(length);
2206
2207
2208
2209
2210
2211 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
2212 iph = hdr_page->pos + 8;
2213 tcph = (void *)(iph + ip_hdrlen);
2214
2215
2216 if (trans_pcie->sw_csum_tx) {
2217 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
2218 GFP_ATOMIC);
2219 if (!csum_skb)
2220 return -ENOMEM;
2221
2222 iwl_compute_pseudo_hdr_csum(iph, tcph,
2223 skb->protocol ==
2224 htons(ETH_P_IPV6),
2225 data_left);
2226
2227 skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
2228 skb_reset_transport_header(csum_skb);
2229 csum_skb->csum_start =
2230 (unsigned char *)tcp_hdr(csum_skb) -
2231 csum_skb->head;
2232 }
2233
2234 hdr_page->pos += snap_ip_tcp_hdrlen;
2235
2236 hdr_tb_len = hdr_page->pos - start_hdr;
2237 hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
2238 hdr_tb_len, DMA_TO_DEVICE);
2239 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
2240 dev_kfree_skb(csum_skb);
2241 return -EINVAL;
2242 }
2243 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2244 hdr_tb_len, false);
2245 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2246 hdr_tb_phys, hdr_tb_len);
2247
2248 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
2249
2250
2251 start_hdr = hdr_page->pos;
2252
2253
2254 while (data_left) {
2255 unsigned int size = min_t(unsigned int, tso.size,
2256 data_left);
2257 dma_addr_t tb_phys;
2258
2259 if (trans_pcie->sw_csum_tx)
2260 skb_put_data(csum_skb, tso.data, size);
2261
2262 tb_phys = dma_map_single(trans->dev, tso.data,
2263 size, DMA_TO_DEVICE);
2264 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2265 dev_kfree_skb(csum_skb);
2266 return -EINVAL;
2267 }
2268
2269 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2270 size, false);
2271 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2272 tb_phys, size);
2273
2274 data_left -= size;
2275 tso_build_data(skb, &tso, size);
2276 }
2277
2278
2279 if (trans_pcie->sw_csum_tx) {
2280 __wsum csum;
2281
2282 csum = skb_checksum(csum_skb,
2283 skb_checksum_start_offset(csum_skb),
2284 csum_skb->len -
2285 skb_checksum_start_offset(csum_skb),
2286 0);
2287 dev_kfree_skb(csum_skb);
2288 dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
2289 hdr_tb_len, DMA_TO_DEVICE);
2290 tcph->check = csum_fold(csum);
2291 dma_sync_single_for_device(trans->dev, hdr_tb_phys,
2292 hdr_tb_len, DMA_TO_DEVICE);
2293 }
2294 }
2295
2296
2297 skb_push(skb, hdr_len + iv_len);
2298
2299 return 0;
2300}
2301#else
2302static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2303 struct iwl_txq *txq, u8 hdr_len,
2304 struct iwl_cmd_meta *out_meta,
2305 struct iwl_device_tx_cmd *dev_cmd,
2306 u16 tb1_len)
2307{
2308
2309 WARN_ON(1);
2310
2311 return -1;
2312}
2313#endif
2314
2315int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2316 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
2317{
2318 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2319 struct ieee80211_hdr *hdr;
2320 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2321 struct iwl_cmd_meta *out_meta;
2322 struct iwl_txq *txq;
2323 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2324 void *tb1_addr;
2325 void *tfd;
2326 u16 len, tb1_len;
2327 bool wait_write_ptr;
2328 __le16 fc;
2329 u8 hdr_len;
2330 u16 wifi_seq;
2331 bool amsdu;
2332
2333 txq = trans->txqs.txq[txq_id];
2334
2335 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
2336 "TX on unused queue %d\n", txq_id))
2337 return -EINVAL;
2338
2339 if (unlikely(trans_pcie->sw_csum_tx &&
2340 skb->ip_summed == CHECKSUM_PARTIAL)) {
2341 int offs = skb_checksum_start_offset(skb);
2342 int csum_offs = offs + skb->csum_offset;
2343 __wsum csum;
2344
2345 if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
2346 return -1;
2347
2348 csum = skb_checksum(skb, offs, skb->len - offs, 0);
2349 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
2350
2351 skb->ip_summed = CHECKSUM_UNNECESSARY;
2352 }
2353
2354 if (skb_is_nonlinear(skb) &&
2355 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
2356 __skb_linearize(skb))
2357 return -ENOMEM;
2358
2359
2360
2361
2362 hdr = (struct ieee80211_hdr *)skb->data;
2363 fc = hdr->frame_control;
2364 hdr_len = ieee80211_hdrlen(fc);
2365
2366 spin_lock(&txq->lock);
2367
2368 if (iwl_queue_space(trans, txq) < txq->high_mark) {
2369 iwl_stop_queue(trans, txq);
2370
2371
2372 if (unlikely(iwl_queue_space(trans, txq) < 3)) {
2373 struct iwl_device_tx_cmd **dev_cmd_ptr;
2374
2375 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2376 trans_pcie->dev_cmd_offs);
2377
2378 *dev_cmd_ptr = dev_cmd;
2379 __skb_queue_tail(&txq->overflow_q, skb);
2380
2381 spin_unlock(&txq->lock);
2382 return 0;
2383 }
2384 }
2385
2386
2387
2388
2389
2390
2391 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2392 WARN_ONCE(txq->ampdu &&
2393 (wifi_seq & 0xff) != txq->write_ptr,
2394 "Q: %d WiFi Seq %d tfdNum %d",
2395 txq_id, wifi_seq, txq->write_ptr);
2396
2397
2398 txq->entries[txq->write_ptr].skb = skb;
2399 txq->entries[txq->write_ptr].cmd = dev_cmd;
2400
2401 dev_cmd->hdr.sequence =
2402 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2403 INDEX_TO_SEQ(txq->write_ptr)));
2404
2405 tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
2406 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2407 offsetof(struct iwl_tx_cmd, scratch);
2408
2409 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2410 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2411
2412
2413 out_meta = &txq->entries[txq->write_ptr].meta;
2414 out_meta->flags = 0;
2415
2416
2417
2418
2419
2420
2421
2422 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2423 hdr_len - IWL_FIRST_TB_SIZE;
2424
2425 amsdu = ieee80211_is_data_qos(fc) &&
2426 (*ieee80211_get_qos_ctl(hdr) &
2427 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2428 if (trans_pcie->sw_csum_tx || !amsdu) {
2429 tb1_len = ALIGN(len, 4);
2430
2431 if (tb1_len != len)
2432 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2433 } else {
2434 tb1_len = len;
2435 }
2436
2437
2438
2439
2440
2441 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2442 IWL_FIRST_TB_SIZE, true);
2443
2444
2445 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
2446
2447
2448 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2449 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2450 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2451 goto out_err;
2452 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2453
2454 trace_iwlwifi_dev_tx(trans->dev, skb,
2455 iwl_pcie_get_tfd(trans, txq,
2456 txq->write_ptr),
2457 trans_pcie->tfd_size,
2458 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2459 hdr_len);
2460
2461
2462
2463
2464
2465
2466
2467 if (amsdu && skb_shinfo(skb)->gso_size) {
2468 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2469 out_meta, dev_cmd,
2470 tb1_len)))
2471 goto out_err;
2472 } else {
2473 struct sk_buff *frag;
2474
2475 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2476 out_meta)))
2477 goto out_err;
2478
2479 skb_walk_frags(skb, frag) {
2480 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
2481 out_meta)))
2482 goto out_err;
2483 }
2484 }
2485
2486
2487 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
2488
2489 tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2490
2491 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2492 iwl_pcie_tfd_get_num_tbs(trans, tfd));
2493
2494 wait_write_ptr = ieee80211_has_morefrags(fc);
2495
2496
2497 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2498
2499
2500
2501
2502
2503
2504 if (!txq->frozen)
2505 mod_timer(&txq->stuck_timer,
2506 jiffies + txq->wd_timeout);
2507 else
2508 txq->frozen_expiry_remainder = txq->wd_timeout;
2509 }
2510
2511
2512 txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
2513 if (!wait_write_ptr)
2514 iwl_pcie_txq_inc_wr_ptr(trans, txq);
2515
2516
2517
2518
2519
2520 spin_unlock(&txq->lock);
2521 return 0;
2522out_err:
2523 iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2524 spin_unlock(&txq->lock);
2525 return -1;
2526}
2527