1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/pm_runtime.h>
52#include <net/tso.h>
53
54#include "iwl-debug.h"
55#include "iwl-csr.h"
56#include "iwl-io.h"
57#include "internal.h"
58#include "fw/api/tx.h"
59
60
61
62
63void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
64{
65 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
66 int txq_id;
67
68
69
70
71
72
73 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
74 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
75
76
77 for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
78 if (!trans_pcie->txq[txq_id])
79 continue;
80 iwl_pcie_gen2_txq_unmap(trans, txq_id);
81 }
82}
83
84
85
86
87static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
88 int num_tbs)
89{
90 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
91 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
92 u8 filled_tfd_size, num_fetch_chunks;
93 u16 len = byte_cnt;
94 __le16 bc_ent;
95
96 len = DIV_ROUND_UP(len, 4);
97
98 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
99 return;
100
101 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
102 num_tbs * sizeof(struct iwl_tfh_tb);
103
104
105
106
107
108
109
110
111 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
112
113 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
114 scd_bc_tbl->tfd_offset[idx] = bc_ent;
115}
116
117
118
119
120static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
121 struct iwl_txq *txq)
122{
123 lockdep_assert_held(&txq->lock);
124
125 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
126
127
128
129
130
131 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
132}
133
134static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
135 struct iwl_tfh_tfd *tfd)
136{
137 return le16_to_cpu(tfd->num_tbs) & 0x1f;
138}
139
140static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
141 struct iwl_cmd_meta *meta,
142 struct iwl_tfh_tfd *tfd)
143{
144 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
145 int i, num_tbs;
146
147
148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
149
150 if (num_tbs >= trans_pcie->max_tbs) {
151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
152 return;
153 }
154
155
156 for (i = 1; i < num_tbs; i++) {
157 if (meta->tbs & BIT(i))
158 dma_unmap_page(trans->dev,
159 le64_to_cpu(tfd->tbs[i].addr),
160 le16_to_cpu(tfd->tbs[i].tb_len),
161 DMA_TO_DEVICE);
162 else
163 dma_unmap_single(trans->dev,
164 le64_to_cpu(tfd->tbs[i].addr),
165 le16_to_cpu(tfd->tbs[i].tb_len),
166 DMA_TO_DEVICE);
167 }
168
169 tfd->num_tbs = 0;
170}
171
172static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
173{
174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
175
176
177
178
179 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
180
181 lockdep_assert_held(&txq->lock);
182
183 iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
184 iwl_pcie_get_tfd(trans_pcie, txq, idx));
185
186
187 if (txq->entries) {
188 struct sk_buff *skb;
189
190 skb = txq->entries[idx].skb;
191
192
193
194
195
196 if (skb) {
197 iwl_op_mode_free_skb(trans->op_mode, skb);
198 txq->entries[idx].skb = NULL;
199 }
200 }
201}
202
203static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
204 struct iwl_tfh_tfd *tfd, dma_addr_t addr,
205 u16 len)
206{
207 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
208 int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
209 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
210
211
212 if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
213 IWL_ERR(trans, "Error can not send more than %d chunks\n",
214 trans_pcie->max_tbs);
215 return -EINVAL;
216 }
217
218 put_unaligned_le64(addr, &tb->addr);
219 tb->tb_len = cpu_to_le16(len);
220
221 tfd->num_tbs = cpu_to_le16(idx + 1);
222
223 return idx;
224}
225
226static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
227 struct sk_buff *skb,
228 struct iwl_tfh_tfd *tfd, int start_len,
229 u8 hdr_len, struct iwl_device_cmd *dev_cmd)
230{
231#ifdef CONFIG_INET
232 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
233 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
234 struct ieee80211_hdr *hdr = (void *)skb->data;
235 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
236 unsigned int mss = skb_shinfo(skb)->gso_size;
237 u16 length, iv_len, amsdu_pad;
238 u8 *start_hdr;
239 struct iwl_tso_hdr_page *hdr_page;
240 struct page **page_ptr;
241 struct tso_t tso;
242
243
244 iv_len = ieee80211_has_protected(hdr->frame_control) ?
245 IEEE80211_CCMP_HDR_LEN : 0;
246
247 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
248 &dev_cmd->hdr, start_len, 0);
249
250 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
251 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
252 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
253 amsdu_pad = 0;
254
255
256 hdr_room = DIV_ROUND_UP(total_len, mss) *
257 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
258
259
260 hdr_page = get_page_hdr(trans, hdr_room);
261 if (!hdr_page)
262 return -ENOMEM;
263
264 get_page(hdr_page->page);
265 start_hdr = hdr_page->pos;
266 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
267 *page_ptr = hdr_page->page;
268 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
269 hdr_page->pos += iv_len;
270
271
272
273
274
275 skb_pull(skb, hdr_len + iv_len);
276
277
278
279
280
281
282 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
283
284 tso_start(skb, &tso);
285
286 while (total_len) {
287
288 unsigned int data_left = min_t(unsigned int, mss, total_len);
289 struct sk_buff *csum_skb = NULL;
290 unsigned int tb_len;
291 dma_addr_t tb_phys;
292 struct tcphdr *tcph;
293 u8 *iph, *subf_hdrs_start = hdr_page->pos;
294
295 total_len -= data_left;
296
297 memset(hdr_page->pos, 0, amsdu_pad);
298 hdr_page->pos += amsdu_pad;
299 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
300 data_left)) & 0x3;
301 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
302 hdr_page->pos += ETH_ALEN;
303 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
304 hdr_page->pos += ETH_ALEN;
305
306 length = snap_ip_tcp_hdrlen + data_left;
307 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
308 hdr_page->pos += sizeof(length);
309
310
311
312
313
314 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
315 iph = hdr_page->pos + 8;
316 tcph = (void *)(iph + ip_hdrlen);
317
318 hdr_page->pos += snap_ip_tcp_hdrlen;
319
320 tb_len = hdr_page->pos - start_hdr;
321 tb_phys = dma_map_single(trans->dev, start_hdr,
322 tb_len, DMA_TO_DEVICE);
323 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
324 dev_kfree_skb(csum_skb);
325 goto out_err;
326 }
327 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
328 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
329
330 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
331
332
333 start_hdr = hdr_page->pos;
334
335
336 while (data_left) {
337 tb_len = min_t(unsigned int, tso.size, data_left);
338 tb_phys = dma_map_single(trans->dev, tso.data,
339 tb_len, DMA_TO_DEVICE);
340 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
341 dev_kfree_skb(csum_skb);
342 goto out_err;
343 }
344 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
345 trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
346 tb_len);
347
348 data_left -= tb_len;
349 tso_build_data(skb, &tso, tb_len);
350 }
351 }
352
353
354 skb_push(skb, hdr_len + iv_len);
355
356 return 0;
357
358out_err:
359#endif
360 return -EINVAL;
361}
362
363static
364struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
365 struct iwl_txq *txq,
366 struct iwl_device_cmd *dev_cmd,
367 struct sk_buff *skb,
368 struct iwl_cmd_meta *out_meta)
369{
370 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
371 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
372 int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
373 struct iwl_tfh_tfd *tfd =
374 iwl_pcie_get_tfd(trans_pcie, txq, idx);
375 dma_addr_t tb_phys;
376 bool amsdu;
377 int i, len, tb1_len, tb2_len, hdr_len;
378 void *tb1_addr;
379
380 memset(tfd, 0, sizeof(*tfd));
381
382 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
383 (*ieee80211_get_qos_ctl(hdr) &
384 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
385
386 tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
387
388 if (!amsdu)
389 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
390 IWL_FIRST_TB_SIZE);
391
392 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
393
394
395 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
396
397
398
399
400
401
402
403 len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
404 ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
405
406
407 if (amsdu)
408 tb1_len = len;
409 else
410 tb1_len = ALIGN(len, 4);
411
412
413 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
414 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
415 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
416 goto out_err;
417 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
418
419 hdr_len = ieee80211_hdrlen(hdr->frame_control);
420
421 if (amsdu) {
422 if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
423 tb1_len + IWL_FIRST_TB_SIZE,
424 hdr_len, dev_cmd))
425 goto out_err;
426
427
428
429
430
431 memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
432 IWL_FIRST_TB_SIZE);
433 return tfd;
434 }
435
436
437 tb2_len = skb_headlen(skb) - hdr_len;
438
439 if (tb2_len > 0) {
440 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
441 tb2_len, DMA_TO_DEVICE);
442 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
443 goto out_err;
444 iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
445 }
446
447
448 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
449 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
450 int tb_idx;
451
452 if (!skb_frag_size(frag))
453 continue;
454
455 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
456 skb_frag_size(frag), DMA_TO_DEVICE);
457
458 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
459 goto out_err;
460 tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
461 skb_frag_size(frag));
462
463 out_meta->tbs |= BIT(tb_idx);
464 }
465
466 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
467 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
468 trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
469
470 return tfd;
471
472out_err:
473 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
474 return NULL;
475}
476
477int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
478 struct iwl_device_cmd *dev_cmd, int txq_id)
479{
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
482 struct iwl_cmd_meta *out_meta;
483 struct iwl_txq *txq = trans_pcie->txq[txq_id];
484 int idx;
485 void *tfd;
486
487 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
488 "TX on unused queue %d\n", txq_id))
489 return -EINVAL;
490
491 if (skb_is_nonlinear(skb) &&
492 skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
493 __skb_linearize(skb))
494 return -ENOMEM;
495
496 spin_lock(&txq->lock);
497
498 idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
499
500
501 txq->entries[idx].skb = skb;
502 txq->entries[idx].cmd = dev_cmd;
503
504 dev_cmd->hdr.sequence =
505 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
506 INDEX_TO_SEQ(idx)));
507
508
509 out_meta = &txq->entries[idx].meta;
510 out_meta->flags = 0;
511
512 tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
513 if (!tfd) {
514 spin_unlock(&txq->lock);
515 return -1;
516 }
517
518
519 iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
520 iwl_pcie_gen2_get_num_tbs(trans, tfd));
521
522
523 if (txq->read_ptr == txq->write_ptr) {
524 if (txq->wd_timeout)
525 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
526 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
527 iwl_trans_ref(trans);
528 }
529
530
531 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
532 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
533 if (iwl_queue_space(txq) < txq->high_mark)
534 iwl_stop_queue(trans, txq);
535
536
537
538
539
540 spin_unlock(&txq->lock);
541 return 0;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
556 struct iwl_host_cmd *cmd)
557{
558 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
559 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
560 struct iwl_device_cmd *out_cmd;
561 struct iwl_cmd_meta *out_meta;
562 unsigned long flags;
563 void *dup_buf = NULL;
564 dma_addr_t phys_addr;
565 int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
566 u16 copy_size, cmd_size, tb0_size;
567 bool had_nocopy = false;
568 u8 group_id = iwl_cmd_groupid(cmd->id);
569 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
570 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
571 struct iwl_tfh_tfd *tfd =
572 iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
573
574 memset(tfd, 0, sizeof(*tfd));
575
576 copy_size = sizeof(struct iwl_cmd_header_wide);
577 cmd_size = sizeof(struct iwl_cmd_header_wide);
578
579 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
580 cmddata[i] = cmd->data[i];
581 cmdlen[i] = cmd->len[i];
582
583 if (!cmd->len[i])
584 continue;
585
586
587 if (copy_size < IWL_FIRST_TB_SIZE) {
588 int copy = IWL_FIRST_TB_SIZE - copy_size;
589
590 if (copy > cmdlen[i])
591 copy = cmdlen[i];
592 cmdlen[i] -= copy;
593 cmddata[i] += copy;
594 copy_size += copy;
595 }
596
597 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
598 had_nocopy = true;
599 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
600 idx = -EINVAL;
601 goto free_dup_buf;
602 }
603 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
604
605
606
607
608 had_nocopy = true;
609
610
611 if (WARN_ON(dup_buf)) {
612 idx = -EINVAL;
613 goto free_dup_buf;
614 }
615
616 dup_buf = kmemdup(cmddata[i], cmdlen[i],
617 GFP_ATOMIC);
618 if (!dup_buf)
619 return -ENOMEM;
620 } else {
621
622 if (WARN_ON(had_nocopy)) {
623 idx = -EINVAL;
624 goto free_dup_buf;
625 }
626 copy_size += cmdlen[i];
627 }
628 cmd_size += cmd->len[i];
629 }
630
631
632
633
634
635
636 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
637 "Command %s (%#x) is too large (%d bytes)\n",
638 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
639 idx = -EINVAL;
640 goto free_dup_buf;
641 }
642
643 spin_lock_bh(&txq->lock);
644
645 if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
646 spin_unlock_bh(&txq->lock);
647
648 IWL_ERR(trans, "No space in command queue\n");
649 iwl_op_mode_cmd_queue_full(trans->op_mode);
650 idx = -ENOSPC;
651 goto free_dup_buf;
652 }
653
654 out_cmd = txq->entries[idx].cmd;
655 out_meta = &txq->entries[idx].meta;
656
657
658 memset(out_meta, 0, sizeof(*out_meta));
659 if (cmd->flags & CMD_WANT_SKB)
660 out_meta->source = cmd;
661
662
663 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
664 out_cmd->hdr_wide.group_id = group_id;
665 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
666 out_cmd->hdr_wide.length =
667 cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
668 out_cmd->hdr_wide.reserved = 0;
669 out_cmd->hdr_wide.sequence =
670 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
671 INDEX_TO_SEQ(txq->write_ptr));
672
673 cmd_pos = sizeof(struct iwl_cmd_header_wide);
674 copy_size = sizeof(struct iwl_cmd_header_wide);
675
676
677 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
678 int copy;
679
680 if (!cmd->len[i])
681 continue;
682
683
684 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
685 IWL_HCMD_DFL_DUP))) {
686 copy = cmd->len[i];
687
688 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
689 cmd_pos += copy;
690 copy_size += copy;
691 continue;
692 }
693
694
695
696
697
698
699 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
700
701 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
702 cmd_pos += copy;
703
704
705 if (copy_size < IWL_FIRST_TB_SIZE) {
706 copy = IWL_FIRST_TB_SIZE - copy_size;
707
708 if (copy > cmd->len[i])
709 copy = cmd->len[i];
710 copy_size += copy;
711 }
712 }
713
714 IWL_DEBUG_HC(trans,
715 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
716 iwl_get_cmd_string(trans, cmd->id), group_id,
717 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
718 cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
719
720
721 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
722 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
723 iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
724 tb0_size);
725
726
727 if (copy_size > tb0_size) {
728 phys_addr = dma_map_single(trans->dev,
729 ((u8 *)&out_cmd->hdr) + tb0_size,
730 copy_size - tb0_size,
731 DMA_TO_DEVICE);
732 if (dma_mapping_error(trans->dev, phys_addr)) {
733 idx = -ENOMEM;
734 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
735 goto out;
736 }
737 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
738 copy_size - tb0_size);
739 }
740
741
742 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
743 const void *data = cmddata[i];
744
745 if (!cmdlen[i])
746 continue;
747 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
748 IWL_HCMD_DFL_DUP)))
749 continue;
750 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
751 data = dup_buf;
752 phys_addr = dma_map_single(trans->dev, (void *)data,
753 cmdlen[i], DMA_TO_DEVICE);
754 if (dma_mapping_error(trans->dev, phys_addr)) {
755 idx = -ENOMEM;
756 iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
757 goto out;
758 }
759 iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
760 }
761
762 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
763 out_meta->flags = cmd->flags;
764 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
765 kzfree(txq->entries[idx].free_buf);
766 txq->entries[idx].free_buf = dup_buf;
767
768 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
769
770
771 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
772 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
773
774 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
775 if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
776 !trans_pcie->ref_cmd_in_flight) {
777 trans_pcie->ref_cmd_in_flight = true;
778 IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
779 iwl_trans_ref(trans);
780 }
781
782 txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
783 iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
784 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
785
786out:
787 spin_unlock_bh(&txq->lock);
788free_dup_buf:
789 if (idx < 0)
790 kfree(dup_buf);
791 return idx;
792}
793
794#define HOST_COMPLETE_TIMEOUT (2 * HZ)
795
796static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
797 struct iwl_host_cmd *cmd)
798{
799 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
800 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
801 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
802 int cmd_idx;
803 int ret;
804
805 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
806
807 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
808 &trans->status),
809 "Command %s: a command is already active!\n", cmd_str))
810 return -EIO;
811
812 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
813
814 if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
815 ret = wait_event_timeout(trans_pcie->d0i3_waitq,
816 pm_runtime_active(&trans_pcie->pci_dev->dev),
817 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
818 if (!ret) {
819 IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
820 return -ETIMEDOUT;
821 }
822 }
823
824 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
825 if (cmd_idx < 0) {
826 ret = cmd_idx;
827 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
828 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
829 cmd_str, ret);
830 return ret;
831 }
832
833 ret = wait_event_timeout(trans_pcie->wait_command_queue,
834 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
835 &trans->status),
836 HOST_COMPLETE_TIMEOUT);
837 if (!ret) {
838 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
839 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
840
841 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
842 txq->read_ptr, txq->write_ptr);
843
844 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
845 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
846 cmd_str);
847 ret = -ETIMEDOUT;
848
849 iwl_force_nmi(trans);
850 iwl_trans_fw_error(trans);
851
852 goto cancel;
853 }
854
855 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
856 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
857 dump_stack();
858 ret = -EIO;
859 goto cancel;
860 }
861
862 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
863 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
864 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
865 ret = -ERFKILL;
866 goto cancel;
867 }
868
869 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
870 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
871 ret = -EIO;
872 goto cancel;
873 }
874
875 return 0;
876
877cancel:
878 if (cmd->flags & CMD_WANT_SKB) {
879
880
881
882
883
884
885 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
886 }
887
888 if (cmd->resp_pkt) {
889 iwl_free_resp(cmd);
890 cmd->resp_pkt = NULL;
891 }
892
893 return ret;
894}
895
896int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
897 struct iwl_host_cmd *cmd)
898{
899 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
900 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
901 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
902 cmd->id);
903 return -ERFKILL;
904 }
905
906 if (cmd->flags & CMD_ASYNC) {
907 int ret;
908
909
910 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
911 return -EINVAL;
912
913 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
914 if (ret < 0) {
915 IWL_ERR(trans,
916 "Error sending %s: enqueue_hcmd failed: %d\n",
917 iwl_get_cmd_string(trans, cmd->id), ret);
918 return ret;
919 }
920 return 0;
921 }
922
923 return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
924}
925
926
927
928
929void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
930{
931 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
932 struct iwl_txq *txq = trans_pcie->txq[txq_id];
933
934 spin_lock_bh(&txq->lock);
935 while (txq->write_ptr != txq->read_ptr) {
936 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
937 txq_id, txq->read_ptr);
938
939 if (txq_id != trans_pcie->cmd_queue) {
940 int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
941 struct sk_buff *skb = txq->entries[idx].skb;
942
943 if (WARN_ON_ONCE(!skb))
944 continue;
945
946 iwl_pcie_free_tso_page(trans_pcie, skb);
947 }
948 iwl_pcie_gen2_free_tfd(trans, txq);
949 txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
950
951 if (txq->read_ptr == txq->write_ptr) {
952 unsigned long flags;
953
954 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
955 if (txq_id != trans_pcie->cmd_queue) {
956 IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
957 txq->id);
958 iwl_trans_unref(trans);
959 } else if (trans_pcie->ref_cmd_in_flight) {
960 trans_pcie->ref_cmd_in_flight = false;
961 IWL_DEBUG_RPM(trans,
962 "clear ref_cmd_in_flight\n");
963 iwl_trans_unref(trans);
964 }
965 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
966 }
967 }
968 spin_unlock_bh(&txq->lock);
969
970
971 iwl_wake_queue(trans, txq);
972}
973
974static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
975 struct iwl_txq *txq)
976{
977 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
978 struct device *dev = trans->dev;
979
980
981 if (txq->tfds) {
982 dma_free_coherent(dev,
983 trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
984 txq->tfds, txq->dma_addr);
985 dma_free_coherent(dev,
986 sizeof(*txq->first_tb_bufs) * txq->n_window,
987 txq->first_tb_bufs, txq->first_tb_dma);
988 }
989
990 kfree(txq->entries);
991 iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
992 kfree(txq);
993}
994
995
996
997
998
999
1000
1001
1002
1003static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
1004{
1005 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1006 struct iwl_txq *txq = trans_pcie->txq[txq_id];
1007 int i;
1008
1009 if (WARN_ON(!txq))
1010 return;
1011
1012 iwl_pcie_gen2_txq_unmap(trans, txq_id);
1013
1014
1015 if (txq_id == trans_pcie->cmd_queue)
1016 for (i = 0; i < txq->n_window; i++) {
1017 kzfree(txq->entries[i].cmd);
1018 kzfree(txq->entries[i].free_buf);
1019 }
1020 del_timer_sync(&txq->stuck_timer);
1021
1022 iwl_pcie_gen2_txq_free_memory(trans, txq);
1023
1024 trans_pcie->txq[txq_id] = NULL;
1025
1026 clear_bit(txq_id, trans_pcie->queue_used);
1027}
1028
1029int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1030 struct iwl_tx_queue_cfg_cmd *cmd,
1031 int cmd_id,
1032 unsigned int timeout)
1033{
1034 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1035 struct iwl_tx_queue_cfg_rsp *rsp;
1036 struct iwl_txq *txq;
1037 struct iwl_host_cmd hcmd = {
1038 .id = cmd_id,
1039 .len = { sizeof(*cmd) },
1040 .data = { cmd, },
1041 .flags = CMD_WANT_SKB,
1042 };
1043 int ret, qid;
1044 u32 wr_ptr;
1045
1046 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1047 if (!txq)
1048 return -ENOMEM;
1049 ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
1050 sizeof(struct iwlagn_scd_bc_tbl));
1051 if (ret) {
1052 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1053 kfree(txq);
1054 return -ENOMEM;
1055 }
1056
1057 ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false);
1058 if (ret) {
1059 IWL_ERR(trans, "Tx queue alloc failed\n");
1060 goto error;
1061 }
1062 ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false);
1063 if (ret) {
1064 IWL_ERR(trans, "Tx queue init failed\n");
1065 goto error;
1066 }
1067
1068 txq->wd_timeout = msecs_to_jiffies(timeout);
1069
1070 cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
1071 cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1072 cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS));
1073
1074 ret = iwl_trans_send_cmd(trans, &hcmd);
1075 if (ret)
1076 goto error;
1077
1078 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
1079 ret = -EINVAL;
1080 goto error_free_resp;
1081 }
1082
1083 rsp = (void *)hcmd.resp_pkt->data;
1084 qid = le16_to_cpu(rsp->queue_number);
1085 wr_ptr = le16_to_cpu(rsp->write_pointer);
1086
1087 if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
1088 WARN_ONCE(1, "queue index %d unsupported", qid);
1089 ret = -EIO;
1090 goto error_free_resp;
1091 }
1092
1093 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
1094 WARN_ONCE(1, "queue %d already used", qid);
1095 ret = -EIO;
1096 goto error_free_resp;
1097 }
1098
1099 txq->id = qid;
1100 trans_pcie->txq[qid] = txq;
1101 wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
1102
1103
1104 txq->read_ptr = wr_ptr;
1105 txq->write_ptr = wr_ptr;
1106 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1107 (txq->write_ptr) | (qid << 16));
1108 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1109
1110 iwl_free_resp(&hcmd);
1111 return qid;
1112
1113error_free_resp:
1114 iwl_free_resp(&hcmd);
1115error:
1116 iwl_pcie_gen2_txq_free_memory(trans, txq);
1117 return ret;
1118}
1119
1120void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
1121{
1122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1123
1124
1125
1126
1127
1128
1129
1130 if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
1131 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1132 "queue %d not used", queue);
1133 return;
1134 }
1135
1136 iwl_pcie_gen2_txq_unmap(trans, queue);
1137
1138 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1139}
1140
1141void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
1142{
1143 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1144 int i;
1145
1146 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1147
1148
1149 for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
1150 if (!trans_pcie->txq[i])
1151 continue;
1152
1153 iwl_pcie_gen2_txq_free(trans, i);
1154 }
1155}
1156
1157int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
1158{
1159 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1160 struct iwl_txq *cmd_queue;
1161 int txq_id = trans_pcie->cmd_queue, ret;
1162
1163
1164 if (!trans_pcie->txq[txq_id]) {
1165 cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
1166 if (!cmd_queue) {
1167 IWL_ERR(trans, "Not enough memory for command queue\n");
1168 return -ENOMEM;
1169 }
1170 trans_pcie->txq[txq_id] = cmd_queue;
1171 ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
1172 if (ret) {
1173 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1174 goto error;
1175 }
1176 } else {
1177 cmd_queue = trans_pcie->txq[txq_id];
1178 }
1179
1180 ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
1181 if (ret) {
1182 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1183 goto error;
1184 }
1185 trans_pcie->txq[txq_id]->id = txq_id;
1186 set_bit(txq_id, trans_pcie->queue_used);
1187
1188 return 0;
1189
1190error:
1191 iwl_pcie_gen2_tx_free(trans);
1192 return ret;
1193}
1194
1195