1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#include <net/mac80211.h>
65
66#include "iwl-debug.h"
67#include "iwl-io.h"
68#include "iwl-prph.h"
69#include "iwl-csr.h"
70#include "mvm.h"
71#include "fw/api/rs.h"
72
73
74
75
76
77int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
78{
79 int ret;
80
81#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
82 if (WARN_ON(mvm->d3_test_active))
83 return -EIO;
84#endif
85
86
87
88
89
90
91 if (!(cmd->flags & CMD_ASYNC)) {
92 lockdep_assert_held(&mvm->mutex);
93 if (!(cmd->flags & CMD_SEND_IN_IDLE))
94 iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
95 }
96
97 ret = iwl_trans_send_cmd(mvm->trans, cmd);
98
99 if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
100 iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
101
102
103
104
105
106
107 if (cmd->flags & CMD_WANT_SKB)
108 return ret;
109
110
111 if (!ret || ret == -ERFKILL)
112 return 0;
113 return ret;
114}
115
116int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
117 u32 flags, u16 len, const void *data)
118{
119 struct iwl_host_cmd cmd = {
120 .id = id,
121 .len = { len, },
122 .data = { data, },
123 .flags = flags,
124 };
125
126 return iwl_mvm_send_cmd(mvm, &cmd);
127}
128
129
130
131
132int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
133 u32 *status)
134{
135 struct iwl_rx_packet *pkt;
136 struct iwl_cmd_response *resp;
137 int ret, resp_len;
138
139 lockdep_assert_held(&mvm->mutex);
140
141#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
142 if (WARN_ON(mvm->d3_test_active))
143 return -EIO;
144#endif
145
146
147
148
149
150 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
151 "cmd flags %x", cmd->flags))
152 return -EINVAL;
153
154 cmd->flags |= CMD_WANT_SKB;
155
156 ret = iwl_trans_send_cmd(mvm->trans, cmd);
157 if (ret == -ERFKILL) {
158
159
160
161
162 return 0;
163 } else if (ret) {
164 return ret;
165 }
166
167 pkt = cmd->resp_pkt;
168
169 resp_len = iwl_rx_packet_payload_len(pkt);
170 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
171 ret = -EIO;
172 goto out_free_resp;
173 }
174
175 resp = (void *)pkt->data;
176 *status = le32_to_cpu(resp->status);
177 out_free_resp:
178 iwl_free_resp(cmd);
179 return ret;
180}
181
182
183
184
185int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
186 const void *data, u32 *status)
187{
188 struct iwl_host_cmd cmd = {
189 .id = id,
190 .len = { len, },
191 .data = { data, },
192 };
193
194 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
195}
196
197#define IWL_DECLARE_RATE_INFO(r) \
198 [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
199
200
201
202
203static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
204 IWL_DECLARE_RATE_INFO(1),
205 IWL_DECLARE_RATE_INFO(2),
206 IWL_DECLARE_RATE_INFO(5),
207 IWL_DECLARE_RATE_INFO(11),
208 IWL_DECLARE_RATE_INFO(6),
209 IWL_DECLARE_RATE_INFO(9),
210 IWL_DECLARE_RATE_INFO(12),
211 IWL_DECLARE_RATE_INFO(18),
212 IWL_DECLARE_RATE_INFO(24),
213 IWL_DECLARE_RATE_INFO(36),
214 IWL_DECLARE_RATE_INFO(48),
215 IWL_DECLARE_RATE_INFO(54),
216};
217
218int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
219 enum nl80211_band band)
220{
221 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
222 int idx;
223 int band_offset = 0;
224
225
226 if (band == NL80211_BAND_5GHZ)
227 band_offset = IWL_FIRST_OFDM_RATE;
228 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
229 if (fw_rate_idx_to_plcp[idx] == rate)
230 return idx - band_offset;
231
232 return -1;
233}
234
235u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
236{
237
238 return fw_rate_idx_to_plcp[rate_idx];
239}
240
241u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
242{
243 static const u8 mac80211_ac_to_ucode_ac[] = {
244 AC_VO,
245 AC_VI,
246 AC_BE,
247 AC_BK
248 };
249
250 return mac80211_ac_to_ucode_ac[ac];
251}
252
253void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
254{
255 struct iwl_rx_packet *pkt = rxb_addr(rxb);
256 struct iwl_error_resp *err_resp = (void *)pkt->data;
257
258 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
259 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
260 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
261 le16_to_cpu(err_resp->bad_cmd_seq_num),
262 le32_to_cpu(err_resp->error_service));
263 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
264 le64_to_cpu(err_resp->timestamp));
265}
266
267
268
269
270
271u8 first_antenna(u8 mask)
272{
273 BUILD_BUG_ON(ANT_A != BIT(0));
274 if (WARN_ON_ONCE(!mask))
275 return BIT(0);
276 return BIT(ffs(mask) - 1);
277}
278
279
280
281
282
283
284
285u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
286{
287 u8 ind = last_idx;
288 int i;
289
290 for (i = 0; i < MAX_ANT_NUM; i++) {
291 ind = (ind + 1) % MAX_ANT_NUM;
292 if (valid & BIT(ind))
293 return ind;
294 }
295
296 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
297 return last_idx;
298}
299
300#define FW_SYSASSERT_CPU_MASK 0xf0000000
301static const struct {
302 const char *name;
303 u8 num;
304} advanced_lookup[] = {
305 { "NMI_INTERRUPT_WDG", 0x34 },
306 { "SYSASSERT", 0x35 },
307 { "UCODE_VERSION_MISMATCH", 0x37 },
308 { "BAD_COMMAND", 0x38 },
309 { "BAD_COMMAND", 0x39 },
310 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
311 { "FATAL_ERROR", 0x3D },
312 { "NMI_TRM_HW_ERR", 0x46 },
313 { "NMI_INTERRUPT_TRM", 0x4C },
314 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
315 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
316 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
317 { "NMI_INTERRUPT_HOST", 0x66 },
318 { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
319 { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
320 { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
321 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
322 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
323 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
324 { "ADVANCED_SYSASSERT", 0 },
325};
326
327static const char *desc_lookup(u32 num)
328{
329 int i;
330
331 for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
332 if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
333 return advanced_lookup[i].name;
334
335
336 return advanced_lookup[i].name;
337}
338
339
340
341
342
343
344
345struct iwl_error_event_table_v1 {
346 u32 valid;
347 u32 error_id;
348 u32 pc;
349 u32 blink1;
350 u32 blink2;
351 u32 ilink1;
352 u32 ilink2;
353 u32 data1;
354 u32 data2;
355 u32 data3;
356 u32 bcon_time;
357 u32 tsf_low;
358 u32 tsf_hi;
359 u32 gp1;
360 u32 gp2;
361 u32 gp3;
362 u32 ucode_ver;
363 u32 hw_ver;
364 u32 brd_ver;
365 u32 log_pc;
366 u32 frame_ptr;
367 u32 stack_ptr;
368 u32 hcmd;
369 u32 isr0;
370
371 u32 isr1;
372
373 u32 isr2;
374
375 u32 isr3;
376
377 u32 isr4;
378
379 u32 isr_pref;
380 u32 wait_event;
381 u32 l2p_control;
382 u32 l2p_duration;
383 u32 l2p_mhvalid;
384 u32 l2p_addr_match;
385 u32 lmpm_pmg_sel;
386
387 u32 u_timestamp;
388
389 u32 flow_handler;
390} __packed ;
391
392struct iwl_error_event_table {
393 u32 valid;
394 u32 error_id;
395 u32 trm_hw_status0;
396 u32 trm_hw_status1;
397 u32 blink2;
398 u32 ilink1;
399 u32 ilink2;
400 u32 data1;
401 u32 data2;
402 u32 data3;
403 u32 bcon_time;
404 u32 tsf_low;
405 u32 tsf_hi;
406 u32 gp1;
407 u32 gp2;
408 u32 fw_rev_type;
409 u32 major;
410 u32 minor;
411 u32 hw_ver;
412 u32 brd_ver;
413 u32 log_pc;
414 u32 frame_ptr;
415 u32 stack_ptr;
416 u32 hcmd;
417 u32 isr0;
418
419 u32 isr1;
420
421 u32 isr2;
422
423 u32 isr3;
424
425 u32 isr4;
426
427 u32 last_cmd_id;
428 u32 wait_event;
429 u32 l2p_control;
430 u32 l2p_duration;
431 u32 l2p_mhvalid;
432 u32 l2p_addr_match;
433 u32 lmpm_pmg_sel;
434
435 u32 u_timestamp;
436
437 u32 flow_handler;
438} __packed ;
439
440
441
442
443
444
445
446
447struct iwl_umac_error_event_table {
448 u32 valid;
449 u32 error_id;
450 u32 blink1;
451 u32 blink2;
452 u32 ilink1;
453 u32 ilink2;
454 u32 data1;
455 u32 data2;
456 u32 data3;
457 u32 umac_major;
458 u32 umac_minor;
459 u32 frame_pointer;
460 u32 stack_pointer;
461 u32 cmd_header;
462 u32 nic_isr_pref;
463} __packed;
464
465#define ERROR_START_OFFSET (1 * sizeof(u32))
466#define ERROR_ELEM_SIZE (7 * sizeof(u32))
467
468static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
469{
470 struct iwl_trans *trans = mvm->trans;
471 struct iwl_umac_error_event_table table;
472 u32 base = mvm->trans->dbg.umac_error_event_table;
473
474 if (!mvm->support_umac_log &&
475 !(mvm->trans->dbg.error_event_table_tlv_status &
476 IWL_ERROR_EVENT_TABLE_UMAC))
477 return;
478
479 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
480
481 if (table.valid)
482 mvm->fwrt.dump.umac_err_id = table.error_id;
483
484 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
485 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
486 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
487 mvm->status, table.valid);
488 }
489
490 IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
491 desc_lookup(table.error_id));
492 IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
493 IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
494 IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
495 IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
496 IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
497 IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
498 IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
499 IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
500 IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
501 IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
502 IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
503 IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
504 IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
505}
506
507static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
508{
509 struct iwl_trans *trans = mvm->trans;
510 struct iwl_error_event_table table;
511 u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
512
513 if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
514 if (!base)
515 base = mvm->fw->init_errlog_ptr;
516 } else {
517 if (!base)
518 base = mvm->fw->inst_errlog_ptr;
519 }
520
521 if (base < 0x400000) {
522 IWL_ERR(mvm,
523 "Not valid error log pointer 0x%08X for %s uCode\n",
524 base,
525 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
526 ? "Init" : "RT");
527 return;
528 }
529
530
531 val = iwl_trans_read_mem32(trans, base);
532 if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
533 int err;
534
535 IWL_ERR(trans, "HW error, resetting before reading\n");
536
537
538 iwl_trans_sw_reset(trans);
539
540 err = iwl_finish_nic_init(trans);
541 if (err)
542 return;
543 }
544
545 iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
546
547 if (table.valid)
548 mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
549
550 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
551 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
552 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
553 mvm->status, table.valid);
554 }
555
556
557
558 IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
559
560 IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
561 desc_lookup(table.error_id));
562 IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
563 IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
564 IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
565 IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
566 IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
567 IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
568 IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
569 IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
570 IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
571 IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
572 IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
573 IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
574 IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
575 IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
576 IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
577 IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
578 IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
579 IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
580 IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
581 IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
582 IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
583 IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
584 IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
585 IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
586 IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
587 IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
588 IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
589 IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
590 IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
591 IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
592 IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
593 IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
594 IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
595}
596
597void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
598{
599 if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
600 IWL_ERR(mvm,
601 "DEVICE_ENABLED bit is not set. Aborting dump.\n");
602 return;
603 }
604
605 iwl_mvm_dump_lmac_error_log(mvm, 0);
606
607 if (mvm->trans->dbg.lmac_error_event_table[1])
608 iwl_mvm_dump_lmac_error_log(mvm, 1);
609
610 iwl_mvm_dump_umac_error_log(mvm);
611
612 iwl_fw_error_print_fseq_regs(&mvm->fwrt);
613}
614
615int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
616 int tid, int frame_limit, u16 ssn)
617{
618 struct iwl_scd_txq_cfg_cmd cmd = {
619 .scd_queue = queue,
620 .action = SCD_CFG_ENABLE_QUEUE,
621 .window = frame_limit,
622 .sta_id = sta_id,
623 .ssn = cpu_to_le16(ssn),
624 .tx_fifo = fifo,
625 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
626 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
627 .tid = tid,
628 };
629 int ret;
630
631 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
632 return -EINVAL;
633
634 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
635 "Trying to reconfig unallocated queue %d\n", queue))
636 return -ENXIO;
637
638 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
639
640 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
641 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
642 queue, fifo, ret);
643
644 return ret;
645}
646
647
648
649
650
651
652
653
654
655
656int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
657{
658 struct iwl_host_cmd cmd = {
659 .id = LQ_CMD,
660 .len = { sizeof(struct iwl_lq_cmd), },
661 .flags = CMD_ASYNC,
662 .data = { lq, },
663 };
664
665 if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
666 iwl_mvm_has_tlc_offload(mvm)))
667 return -EINVAL;
668
669 return iwl_mvm_send_cmd(mvm, &cmd);
670}
671
672
673
674
675
676
677
678
679
680void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
681 enum iwl_mvm_smps_type_request req_type,
682 enum ieee80211_smps_mode smps_request)
683{
684 struct iwl_mvm_vif *mvmvif;
685 enum ieee80211_smps_mode smps_mode;
686 int i;
687
688 lockdep_assert_held(&mvm->mutex);
689
690
691 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
692 return;
693
694 if (vif->type == NL80211_IFTYPE_AP)
695 smps_mode = IEEE80211_SMPS_OFF;
696 else
697 smps_mode = IEEE80211_SMPS_AUTOMATIC;
698
699 mvmvif = iwl_mvm_vif_from_mac80211(vif);
700 mvmvif->smps_requests[req_type] = smps_request;
701 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
702 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
703 smps_mode = IEEE80211_SMPS_STATIC;
704 break;
705 }
706 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
707 smps_mode = IEEE80211_SMPS_DYNAMIC;
708 }
709
710 ieee80211_request_smps(vif, smps_mode);
711}
712
713int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
714{
715 struct iwl_statistics_cmd scmd = {
716 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
717 };
718 struct iwl_host_cmd cmd = {
719 .id = STATISTICS_CMD,
720 .len[0] = sizeof(scmd),
721 .data[0] = &scmd,
722 .flags = CMD_WANT_SKB,
723 };
724 int ret;
725
726 ret = iwl_mvm_send_cmd(mvm, &cmd);
727 if (ret)
728 return ret;
729
730 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
731 iwl_free_resp(&cmd);
732
733 if (clear)
734 iwl_mvm_accu_radio_stats(mvm);
735
736 return 0;
737}
738
739void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
740{
741 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
742 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
743 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
744 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
745}
746
747static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
748 struct ieee80211_vif *vif)
749{
750 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
751 bool *result = _data;
752 int i;
753
754 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
755 if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
756 mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
757 *result = false;
758 }
759}
760
761bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
762{
763 bool result = true;
764
765 lockdep_assert_held(&mvm->mutex);
766
767 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
768 return false;
769
770 if (mvm->cfg->rx_with_siso_diversity)
771 return false;
772
773 ieee80211_iterate_active_interfaces_atomic(
774 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
775 iwl_mvm_diversity_iter, &result);
776
777 return result;
778}
779
780void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
781 bool low_latency, u16 mac_id)
782{
783 struct iwl_mac_low_latency_cmd cmd = {
784 .mac_id = cpu_to_le32(mac_id)
785 };
786
787 if (!fw_has_capa(&mvm->fw->ucode_capa,
788 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
789 return;
790
791 if (low_latency) {
792
793 cmd.low_latency_rx = 1;
794 cmd.low_latency_tx = 1;
795 }
796
797 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
798 MAC_CONF_GROUP, 0),
799 0, sizeof(cmd), &cmd))
800 IWL_ERR(mvm, "Failed to send low latency command\n");
801}
802
803int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
804 bool low_latency,
805 enum iwl_mvm_low_latency_cause cause)
806{
807 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
808 int res;
809 bool prev;
810
811 lockdep_assert_held(&mvm->mutex);
812
813 prev = iwl_mvm_vif_low_latency(mvmvif);
814 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
815
816 low_latency = iwl_mvm_vif_low_latency(mvmvif);
817
818 if (low_latency == prev)
819 return 0;
820
821 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
822
823 res = iwl_mvm_update_quotas(mvm, false, NULL);
824 if (res)
825 return res;
826
827 iwl_mvm_bt_coex_vif_change(mvm);
828
829 return iwl_mvm_power_update_mac(mvm);
830}
831
832struct iwl_mvm_low_latency_iter {
833 bool result;
834 bool result_per_band[NUM_NL80211_BANDS];
835};
836
837static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
838{
839 struct iwl_mvm_low_latency_iter *result = _data;
840 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
841 enum nl80211_band band;
842
843 if (iwl_mvm_vif_low_latency(mvmvif)) {
844 result->result = true;
845
846 if (!mvmvif->phy_ctxt)
847 return;
848
849 band = mvmvif->phy_ctxt->channel->band;
850 result->result_per_band[band] = true;
851 }
852}
853
854bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
855{
856 struct iwl_mvm_low_latency_iter data = {};
857
858 ieee80211_iterate_active_interfaces_atomic(
859 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
860 iwl_mvm_ll_iter, &data);
861
862 return data.result;
863}
864
865bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
866{
867 struct iwl_mvm_low_latency_iter data = {};
868
869 ieee80211_iterate_active_interfaces_atomic(
870 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
871 iwl_mvm_ll_iter, &data);
872
873 return data.result_per_band[band];
874}
875
876struct iwl_bss_iter_data {
877 struct ieee80211_vif *vif;
878 bool error;
879};
880
881static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
882 struct ieee80211_vif *vif)
883{
884 struct iwl_bss_iter_data *data = _data;
885
886 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
887 return;
888
889 if (data->vif) {
890 data->error = true;
891 return;
892 }
893
894 data->vif = vif;
895}
896
897struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
898{
899 struct iwl_bss_iter_data bss_iter_data = {};
900
901 ieee80211_iterate_active_interfaces_atomic(
902 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
903 iwl_mvm_bss_iface_iterator, &bss_iter_data);
904
905 if (bss_iter_data.error) {
906 IWL_ERR(mvm, "More than one managed interface active!\n");
907 return ERR_PTR(-EINVAL);
908 }
909
910 return bss_iter_data.vif;
911}
912
913struct iwl_sta_iter_data {
914 bool assoc;
915};
916
917static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
918 struct ieee80211_vif *vif)
919{
920 struct iwl_sta_iter_data *data = _data;
921
922 if (vif->type != NL80211_IFTYPE_STATION)
923 return;
924
925 if (vif->bss_conf.assoc)
926 data->assoc = true;
927}
928
929bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
930{
931 struct iwl_sta_iter_data data = {
932 .assoc = false,
933 };
934
935 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
936 IEEE80211_IFACE_ITER_NORMAL,
937 iwl_mvm_sta_iface_iterator,
938 &data);
939 return data.assoc;
940}
941
942unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
943 struct ieee80211_vif *vif,
944 bool tdls, bool cmd_q)
945{
946 struct iwl_fw_dbg_trigger_tlv *trigger;
947 struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
948 unsigned int default_timeout =
949 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
950
951 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
952
953
954
955
956 if (fw_has_capa(&mvm->fw->ucode_capa,
957 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
958 vif && vif->type == NL80211_IFTYPE_AP)
959 return IWL_WATCHDOG_DISABLED;
960 return iwlmvm_mod_params.tfd_q_hang_detect ?
961 default_timeout : IWL_WATCHDOG_DISABLED;
962 }
963
964 trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
965 txq_timer = (void *)trigger->data;
966
967 if (tdls)
968 return le32_to_cpu(txq_timer->tdls);
969
970 if (cmd_q)
971 return le32_to_cpu(txq_timer->command_queue);
972
973 if (WARN_ON(!vif))
974 return default_timeout;
975
976 switch (ieee80211_vif_type_p2p(vif)) {
977 case NL80211_IFTYPE_ADHOC:
978 return le32_to_cpu(txq_timer->ibss);
979 case NL80211_IFTYPE_STATION:
980 return le32_to_cpu(txq_timer->bss);
981 case NL80211_IFTYPE_AP:
982 return le32_to_cpu(txq_timer->softap);
983 case NL80211_IFTYPE_P2P_CLIENT:
984 return le32_to_cpu(txq_timer->p2p_client);
985 case NL80211_IFTYPE_P2P_GO:
986 return le32_to_cpu(txq_timer->p2p_go);
987 case NL80211_IFTYPE_P2P_DEVICE:
988 return le32_to_cpu(txq_timer->p2p_device);
989 case NL80211_IFTYPE_MONITOR:
990 return default_timeout;
991 default:
992 WARN_ON(1);
993 return mvm->cfg->base_params->wd_timeout;
994 }
995}
996
997void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
998 const char *errmsg)
999{
1000 struct iwl_fw_dbg_trigger_tlv *trig;
1001 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
1002
1003 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
1004 FW_DBG_TRIGGER_MLME);
1005 if (!trig)
1006 goto out;
1007
1008 trig_mlme = (void *)trig->data;
1009
1010 if (trig_mlme->stop_connection_loss &&
1011 --trig_mlme->stop_connection_loss)
1012 goto out;
1013
1014 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
1015
1016out:
1017 ieee80211_connection_loss(vif);
1018}
1019
1020void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
1021 struct ieee80211_vif *vif,
1022 const struct ieee80211_sta *sta,
1023 u16 tid)
1024{
1025 struct iwl_fw_dbg_trigger_tlv *trig;
1026 struct iwl_fw_dbg_trigger_ba *ba_trig;
1027
1028 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
1029 FW_DBG_TRIGGER_BA);
1030 if (!trig)
1031 return;
1032
1033 ba_trig = (void *)trig->data;
1034
1035 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
1036 return;
1037
1038 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1039 "Frame from %pM timed out, tid %d",
1040 sta->addr, tid);
1041}
1042
1043u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
1044{
1045 if (!elapsed)
1046 return 0;
1047
1048 return (100 * airtime / elapsed) / USEC_PER_MSEC;
1049}
1050
1051static enum iwl_mvm_traffic_load
1052iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
1053{
1054 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
1055
1056 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
1057 return IWL_MVM_TRAFFIC_HIGH;
1058 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
1059 return IWL_MVM_TRAFFIC_MEDIUM;
1060
1061 return IWL_MVM_TRAFFIC_LOW;
1062}
1063
1064struct iwl_mvm_tcm_iter_data {
1065 struct iwl_mvm *mvm;
1066 bool any_sent;
1067};
1068
1069static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
1070{
1071 struct iwl_mvm_tcm_iter_data *data = _data;
1072 struct iwl_mvm *mvm = data->mvm;
1073 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1074 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
1075
1076 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
1077 return;
1078
1079 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
1080
1081 if (!mvm->tcm.result.change[mvmvif->id] &&
1082 prev == low_latency) {
1083 iwl_mvm_update_quotas(mvm, false, NULL);
1084 return;
1085 }
1086
1087 if (prev != low_latency) {
1088
1089 iwl_mvm_update_low_latency(mvm, vif, low_latency,
1090 LOW_LATENCY_TRAFFIC);
1091 } else {
1092 iwl_mvm_update_quotas(mvm, false, NULL);
1093 }
1094
1095 data->any_sent = true;
1096}
1097
1098static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
1099{
1100 struct iwl_mvm_tcm_iter_data data = {
1101 .mvm = mvm,
1102 .any_sent = false,
1103 };
1104
1105 mutex_lock(&mvm->mutex);
1106
1107 ieee80211_iterate_active_interfaces(
1108 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1109 iwl_mvm_tcm_iter, &data);
1110
1111 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1112 iwl_mvm_config_scan(mvm);
1113
1114 mutex_unlock(&mvm->mutex);
1115}
1116
1117static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
1118{
1119 struct iwl_mvm *mvm;
1120 struct iwl_mvm_vif *mvmvif;
1121 struct ieee80211_vif *vif;
1122
1123 mvmvif = container_of(wk, struct iwl_mvm_vif,
1124 uapsd_nonagg_detected_wk.work);
1125 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
1126 mvm = mvmvif->mvm;
1127
1128 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
1129 return;
1130
1131
1132 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
1133 vif->bss_conf.bssid, ETH_ALEN);
1134 mvm->uapsd_noagg_bssid_write_idx++;
1135 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
1136 mvm->uapsd_noagg_bssid_write_idx = 0;
1137
1138 iwl_mvm_connection_loss(mvm, vif,
1139 "AP isn't using AMPDU with uAPSD enabled");
1140}
1141
1142static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
1143 struct ieee80211_vif *vif)
1144{
1145 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1146
1147 if (vif->type != NL80211_IFTYPE_STATION)
1148 return;
1149
1150 if (!vif->bss_conf.assoc)
1151 return;
1152
1153 if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
1154 !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
1155 !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
1156 !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
1157 return;
1158
1159 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
1160 return;
1161
1162 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
1163 IWL_INFO(mvm,
1164 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
1165 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
1166}
1167
1168static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
1169 unsigned int elapsed,
1170 int mac)
1171{
1172 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
1173 u64 tpt;
1174 unsigned long rate;
1175 struct ieee80211_vif *vif;
1176
1177 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
1178
1179 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
1180 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
1181 return;
1182
1183 if (iwl_mvm_has_new_rx_api(mvm)) {
1184 tpt = 8 * bytes;
1185 do_div(tpt, elapsed);
1186 rate *= 1000;
1187 if (tpt < 22 * rate / 100)
1188 return;
1189 } else {
1190
1191
1192
1193
1194
1195
1196
1197
1198 tpt = (8 * bytes);
1199 do_div(tpt, elapsed * 100);
1200 if (tpt < rate)
1201 return;
1202 }
1203
1204 rcu_read_lock();
1205 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
1206 if (vif)
1207 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
1208 rcu_read_unlock();
1209}
1210
1211static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
1212 struct ieee80211_vif *vif)
1213{
1214 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1215 u32 *band = _data;
1216
1217 if (!mvmvif->phy_ctxt)
1218 return;
1219
1220 band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
1221}
1222
1223static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
1224 unsigned long ts,
1225 bool handle_uapsd)
1226{
1227 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
1228 unsigned int uapsd_elapsed =
1229 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
1230 u32 total_airtime = 0;
1231 u32 band_airtime[NUM_NL80211_BANDS] = {0};
1232 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
1233 int ac, mac, i;
1234 bool low_latency = false;
1235 enum iwl_mvm_traffic_load load, band_load;
1236 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
1237
1238 if (handle_ll)
1239 mvm->tcm.ll_ts = ts;
1240 if (handle_uapsd)
1241 mvm->tcm.uapsd_nonagg_ts = ts;
1242
1243 mvm->tcm.result.elapsed = elapsed;
1244
1245 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
1246 IEEE80211_IFACE_ITER_NORMAL,
1247 iwl_mvm_tcm_iterator,
1248 &band);
1249
1250 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1251 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1252 u32 vo_vi_pkts = 0;
1253 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1254
1255 total_airtime += airtime;
1256 band_airtime[band[mac]] += airtime;
1257
1258 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1259 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1260 mvm->tcm.result.load[mac] = load;
1261 mvm->tcm.result.airtime[mac] = airtime;
1262
1263 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1264 vo_vi_pkts += mdata->rx.pkts[ac] +
1265 mdata->tx.pkts[ac];
1266
1267
1268 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1269 mvm->tcm.result.low_latency[mac] = true;
1270 else if (handle_ll)
1271 mvm->tcm.result.low_latency[mac] = false;
1272
1273 if (handle_ll) {
1274
1275 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1276 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1277 }
1278 low_latency |= mvm->tcm.result.low_latency[mac];
1279
1280 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1281 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1282 mac);
1283
1284 if (handle_uapsd)
1285 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1286 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1287 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1288 }
1289
1290 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1291 mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
1292 mvm->tcm.result.global_load = load;
1293
1294 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1295 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1296 mvm->tcm.result.band_load[i] = band_load;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305 if (load != IWL_MVM_TRAFFIC_LOW)
1306 return MVM_TCM_PERIOD;
1307
1308
1309
1310
1311
1312 if (low_latency)
1313 return MVM_LL_PERIOD;
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325 return 0;
1326}
1327
1328void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1329{
1330 unsigned long ts = jiffies;
1331 bool handle_uapsd =
1332 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1333 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1334
1335 spin_lock(&mvm->tcm.lock);
1336 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1337 spin_unlock(&mvm->tcm.lock);
1338 return;
1339 }
1340 spin_unlock(&mvm->tcm.lock);
1341
1342 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1343 mutex_lock(&mvm->mutex);
1344 if (iwl_mvm_request_statistics(mvm, true))
1345 handle_uapsd = false;
1346 mutex_unlock(&mvm->mutex);
1347 }
1348
1349 spin_lock(&mvm->tcm.lock);
1350
1351 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1352
1353 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1354 handle_uapsd);
1355
1356
1357 smp_mb();
1358 mvm->tcm.ts = ts;
1359 if (work_delay)
1360 schedule_delayed_work(&mvm->tcm.work, work_delay);
1361 }
1362 spin_unlock(&mvm->tcm.lock);
1363
1364 iwl_mvm_tcm_results(mvm);
1365}
1366
1367void iwl_mvm_tcm_work(struct work_struct *work)
1368{
1369 struct delayed_work *delayed_work = to_delayed_work(work);
1370 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1371 tcm.work);
1372
1373 iwl_mvm_recalc_tcm(mvm);
1374}
1375
1376void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1377{
1378 spin_lock_bh(&mvm->tcm.lock);
1379 mvm->tcm.paused = true;
1380 spin_unlock_bh(&mvm->tcm.lock);
1381 if (with_cancel)
1382 cancel_delayed_work_sync(&mvm->tcm.work);
1383}
1384
1385void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1386{
1387 int mac;
1388 bool low_latency = false;
1389
1390 spin_lock_bh(&mvm->tcm.lock);
1391 mvm->tcm.ts = jiffies;
1392 mvm->tcm.ll_ts = jiffies;
1393 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1394 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1395
1396 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1397 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1398 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1399 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1400
1401 if (mvm->tcm.result.low_latency[mac])
1402 low_latency = true;
1403 }
1404
1405 smp_mb();
1406 mvm->tcm.paused = false;
1407
1408
1409
1410
1411
1412 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1413 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1414 else if (low_latency)
1415 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1416
1417 spin_unlock_bh(&mvm->tcm.lock);
1418}
1419
1420void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1421{
1422 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1423
1424 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1425 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1426}
1427
1428void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1429{
1430 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1431
1432 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1433}
1434
1435u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1436{
1437 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1438
1439 if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1440 mvm->trans->cfg->gp2_reg_addr)
1441 reg_addr = mvm->trans->cfg->gp2_reg_addr;
1442
1443 return iwl_read_prph(mvm->trans, reg_addr);
1444}
1445
1446void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
1447{
1448 bool ps_disabled;
1449
1450 lockdep_assert_held(&mvm->mutex);
1451
1452
1453 ps_disabled = mvm->ps_disabled;
1454 if (!ps_disabled) {
1455 mvm->ps_disabled = true;
1456 iwl_mvm_power_update_device(mvm);
1457 }
1458
1459 *gp2 = iwl_mvm_get_systime(mvm);
1460 *boottime = ktime_get_boottime_ns();
1461
1462 if (!ps_disabled) {
1463 mvm->ps_disabled = ps_disabled;
1464 iwl_mvm_power_update_device(mvm);
1465 }
1466}
1467