1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64#ifndef __iwl_trans_h__
65#define __iwl_trans_h__
66
67#include <linux/ieee80211.h>
68#include <linux/mm.h>
69#include <linux/lockdep.h>
70#include <linux/kernel.h>
71
72#include "iwl-debug.h"
73#include "iwl-config.h"
74#include "fw/img.h"
75#include "iwl-op-mode.h"
76#include "fw/api/cmdhdr.h"
77#include "fw/api/txq.h"
78#include "fw/api/dbg-tlv.h"
79#include "iwl-dbg-tlv.h"
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
116#define FH_RSCSR_FRAME_INVALID 0x55550000
117#define FH_RSCSR_FRAME_ALIGN 0x40
118#define FH_RSCSR_RPA_EN BIT(25)
119#define FH_RSCSR_RADA_EN BIT(26)
120#define FH_RSCSR_RXQ_POS 16
121#define FH_RSCSR_RXQ_MASK 0x3F0000
122
123struct iwl_rx_packet {
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141 __le32 len_n_flags;
142 struct iwl_cmd_header hdr;
143 u8 data[];
144} __packed;
145
146static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
147{
148 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
149}
150
151static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
152{
153 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
154}
155
156
157
158
159
160
161
162
163
164
165enum CMD_MODE {
166 CMD_ASYNC = BIT(0),
167 CMD_WANT_SKB = BIT(1),
168 CMD_SEND_IN_RFKILL = BIT(2),
169 CMD_WANT_ASYNC_CALLBACK = BIT(3),
170};
171
172#define DEF_CMD_PAYLOAD_SIZE 320
173
174
175
176
177
178
179
180
181struct iwl_device_cmd {
182 union {
183 struct {
184 struct iwl_cmd_header hdr;
185 u8 payload[DEF_CMD_PAYLOAD_SIZE];
186 };
187 struct {
188 struct iwl_cmd_header_wide hdr_wide;
189 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
190 sizeof(struct iwl_cmd_header_wide) +
191 sizeof(struct iwl_cmd_header)];
192 };
193 };
194} __packed;
195
196#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
197
198
199
200
201
202#define IWL_MAX_CMD_TBS_PER_TFD 2
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219enum iwl_hcmd_dataflag {
220 IWL_HCMD_DFL_NOCOPY = BIT(0),
221 IWL_HCMD_DFL_DUP = BIT(1),
222};
223
224enum iwl_error_event_table_status {
225 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
226 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
227 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
228};
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243struct iwl_host_cmd {
244 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
245 struct iwl_rx_packet *resp_pkt;
246 unsigned long _rx_page_addr;
247 u32 _rx_page_order;
248
249 u32 flags;
250 u32 id;
251 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
252 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
253};
254
255static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
256{
257 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
258}
259
260struct iwl_rx_cmd_buffer {
261 struct page *_page;
262 int _offset;
263 bool _page_stolen;
264 u32 _rx_page_order;
265 unsigned int truesize;
266};
267
268static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
269{
270 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
271}
272
273static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
274{
275 return r->_offset;
276}
277
278static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
279{
280 r->_page_stolen = true;
281 get_page(r->_page);
282 return r->_page;
283}
284
285static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
286{
287 __free_pages(r->_page, r->_rx_page_order);
288}
289
290#define MAX_NO_RECLAIM_CMDS 6
291
292#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
293
294
295
296
297
298#define IWL_MAX_HW_QUEUES 32
299#define IWL_MAX_TVQM_QUEUES 512
300
301#define IWL_MAX_TID_COUNT 8
302#define IWL_MGMT_TID 15
303#define IWL_FRAME_LIMIT 64
304#define IWL_MAX_RX_HW_QUEUES 16
305
306
307
308
309
310
311enum iwl_d3_status {
312 IWL_D3_STATUS_ALIVE,
313 IWL_D3_STATUS_RESET,
314};
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330enum iwl_trans_status {
331 STATUS_SYNC_HCMD_ACTIVE,
332 STATUS_DEVICE_ENABLED,
333 STATUS_TPOWER_PMI,
334 STATUS_INT_ENABLED,
335 STATUS_RFKILL_HW,
336 STATUS_RFKILL_OPMODE,
337 STATUS_FW_ERROR,
338 STATUS_TRANS_GOING_IDLE,
339 STATUS_TRANS_IDLE,
340 STATUS_TRANS_DEAD,
341};
342
343static inline int
344iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
345{
346 switch (rb_size) {
347 case IWL_AMSDU_2K:
348 return get_order(2 * 1024);
349 case IWL_AMSDU_4K:
350 return get_order(4 * 1024);
351 case IWL_AMSDU_8K:
352 return get_order(8 * 1024);
353 case IWL_AMSDU_12K:
354 return get_order(12 * 1024);
355 default:
356 WARN_ON(1);
357 return -1;
358 }
359}
360
361struct iwl_hcmd_names {
362 u8 cmd_id;
363 const char *const cmd_name;
364};
365
366#define HCMD_NAME(x) \
367 { .cmd_id = x, .cmd_name = #x }
368
369struct iwl_hcmd_arr {
370 const struct iwl_hcmd_names *arr;
371 int size;
372};
373
374#define HCMD_ARR(x) \
375 { .arr = x, .size = ARRAY_SIZE(x) }
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402struct iwl_trans_config {
403 struct iwl_op_mode *op_mode;
404
405 u8 cmd_queue;
406 u8 cmd_fifo;
407 unsigned int cmd_q_wdg_timeout;
408 const u8 *no_reclaim_cmds;
409 unsigned int n_no_reclaim_cmds;
410
411 enum iwl_amsdu_size rx_buf_size;
412 bool bc_table_dword;
413 bool scd_set_active;
414 bool sw_csum_tx;
415 const struct iwl_hcmd_arr *command_groups;
416 int command_groups_size;
417
418 u8 cb_data_offs;
419};
420
421struct iwl_trans_dump_data {
422 u32 len;
423 u8 data[];
424};
425
426struct iwl_trans;
427
428struct iwl_trans_txq_scd_cfg {
429 u8 fifo;
430 u8 sta_id;
431 u8 tid;
432 bool aggregate;
433 int frame_limit;
434};
435
436
437
438
439
440
441
442
443struct iwl_trans_rxq_dma_data {
444 u64 fr_bd_cb;
445 u32 fr_bd_wid;
446 u64 urbd_stts_wrptr;
447 u64 ur_bd_cb;
448};
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531struct iwl_trans_ops {
532
533 int (*start_hw)(struct iwl_trans *iwl_trans);
534 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
535 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
536 bool run_in_rfkill);
537 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
538 void (*stop_device)(struct iwl_trans *trans);
539
540 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
541 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
542 bool test, bool reset);
543
544 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
545
546 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
547 struct iwl_device_cmd *dev_cmd, int queue);
548 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
549 struct sk_buff_head *skbs);
550
551 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
552
553 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
554 const struct iwl_trans_txq_scd_cfg *cfg,
555 unsigned int queue_wdg_timeout);
556 void (*txq_disable)(struct iwl_trans *trans, int queue,
557 bool configure_scd);
558
559 int (*txq_alloc)(struct iwl_trans *trans,
560 __le16 flags, u8 sta_id, u8 tid,
561 int cmd_id, int size,
562 unsigned int queue_wdg_timeout);
563 void (*txq_free)(struct iwl_trans *trans, int queue);
564 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
565 struct iwl_trans_rxq_dma_data *data);
566
567 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
568 bool shared);
569
570 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
571 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
572 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
573 bool freeze);
574 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
575
576 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
577 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
578 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
579 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
580 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
581 int (*read_mem)(struct iwl_trans *trans, u32 addr,
582 void *buf, int dwords);
583 int (*write_mem)(struct iwl_trans *trans, u32 addr,
584 const void *buf, int dwords);
585 void (*configure)(struct iwl_trans *trans,
586 const struct iwl_trans_config *trans_cfg);
587 void (*set_pmi)(struct iwl_trans *trans, bool state);
588 void (*sw_reset)(struct iwl_trans *trans);
589 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
590 void (*release_nic_access)(struct iwl_trans *trans,
591 unsigned long *flags);
592 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
593 u32 value);
594 int (*suspend)(struct iwl_trans *trans);
595 void (*resume)(struct iwl_trans *trans);
596
597 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
598 u32 dump_mask);
599 void (*debugfs_cleanup)(struct iwl_trans *trans);
600 void (*sync_nmi)(struct iwl_trans *trans);
601};
602
603
604
605
606
607
608
609enum iwl_trans_state {
610 IWL_TRANS_NO_FW = 0,
611 IWL_TRANS_FW_ALIVE = 1,
612};
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648enum iwl_plat_pm_mode {
649 IWL_PLAT_PM_MODE_DISABLED,
650 IWL_PLAT_PM_MODE_D3,
651};
652
653
654
655
656
657
658
659
660enum iwl_ini_cfg_state {
661 IWL_INI_CFG_STATE_NOT_LOADED,
662 IWL_INI_CFG_STATE_LOADED,
663 IWL_INI_CFG_STATE_CORRUPTED,
664};
665
666
667#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
668
669
670
671
672
673
674
675struct iwl_dram_data {
676 dma_addr_t physical;
677 void *block;
678 int size;
679};
680
681
682
683
684
685
686
687
688struct iwl_self_init_dram {
689 struct iwl_dram_data *fw;
690 int fw_cnt;
691 struct iwl_dram_data *paging;
692 int paging_cnt;
693};
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714struct iwl_trans_debug {
715 u8 n_dest_reg;
716 bool rec_on;
717
718 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
719 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
720 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
721
722 u32 lmac_error_event_table[2];
723 u32 umac_error_event_table;
724 unsigned int error_event_table_tlv_status;
725
726 enum iwl_ini_cfg_state internal_ini_cfg;
727 enum iwl_ini_cfg_state external_ini_cfg;
728
729 int num_blocks;
730 struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
731
732 bool hw_error;
733 enum iwl_fw_ini_buffer_location ini_dest;
734};
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770struct iwl_trans {
771 const struct iwl_trans_ops *ops;
772 struct iwl_op_mode *op_mode;
773 const struct iwl_cfg_trans_params *trans_cfg;
774 const struct iwl_cfg *cfg;
775 struct iwl_drv *drv;
776 enum iwl_trans_state state;
777 unsigned long status;
778
779 struct device *dev;
780 u32 max_skb_frags;
781 u32 hw_rev;
782 u32 hw_rf_id;
783 u32 hw_id;
784 char hw_id_str[52];
785
786 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
787
788 bool pm_support;
789 bool ltr_enabled;
790
791 const struct iwl_hcmd_arr *command_groups;
792 int command_groups_size;
793 bool wide_cmd_header;
794
795 u8 num_rx_queues;
796
797 size_t iml_len;
798 u8 *iml;
799
800
801 struct kmem_cache *dev_cmd_pool;
802 char dev_cmd_pool_name[50];
803
804 struct dentry *dbgfs_dir;
805
806#ifdef CONFIG_LOCKDEP
807 struct lockdep_map sync_cmd_lockdep_map;
808#endif
809
810 struct iwl_trans_debug dbg;
811 struct iwl_self_init_dram init_dram;
812
813 enum iwl_plat_pm_mode system_pm_mode;
814
815
816
817 char trans_specific[0] __aligned(sizeof(void *));
818};
819
820const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
821int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
822
823static inline void iwl_trans_configure(struct iwl_trans *trans,
824 const struct iwl_trans_config *trans_cfg)
825{
826 trans->op_mode = trans_cfg->op_mode;
827
828 trans->ops->configure(trans, trans_cfg);
829 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
830}
831
832static inline int iwl_trans_start_hw(struct iwl_trans *trans)
833{
834 might_sleep();
835
836 return trans->ops->start_hw(trans);
837}
838
839static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
840{
841 might_sleep();
842
843 if (trans->ops->op_mode_leave)
844 trans->ops->op_mode_leave(trans);
845
846 trans->op_mode = NULL;
847
848 trans->state = IWL_TRANS_NO_FW;
849}
850
851static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
852{
853 might_sleep();
854
855 trans->state = IWL_TRANS_FW_ALIVE;
856
857 trans->ops->fw_alive(trans, scd_addr);
858}
859
860static inline int iwl_trans_start_fw(struct iwl_trans *trans,
861 const struct fw_img *fw,
862 bool run_in_rfkill)
863{
864 might_sleep();
865
866 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
867
868 clear_bit(STATUS_FW_ERROR, &trans->status);
869 return trans->ops->start_fw(trans, fw, run_in_rfkill);
870}
871
872static inline void iwl_trans_stop_device(struct iwl_trans *trans)
873{
874 might_sleep();
875
876 trans->ops->stop_device(trans);
877
878 trans->state = IWL_TRANS_NO_FW;
879}
880
881static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
882 bool reset)
883{
884 might_sleep();
885 if (!trans->ops->d3_suspend)
886 return 0;
887
888 return trans->ops->d3_suspend(trans, test, reset);
889}
890
891static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
892 enum iwl_d3_status *status,
893 bool test, bool reset)
894{
895 might_sleep();
896 if (!trans->ops->d3_resume)
897 return 0;
898
899 return trans->ops->d3_resume(trans, status, test, reset);
900}
901
902static inline int iwl_trans_suspend(struct iwl_trans *trans)
903{
904 if (!trans->ops->suspend)
905 return 0;
906
907 return trans->ops->suspend(trans);
908}
909
910static inline void iwl_trans_resume(struct iwl_trans *trans)
911{
912 if (trans->ops->resume)
913 trans->ops->resume(trans);
914}
915
916static inline struct iwl_trans_dump_data *
917iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
918{
919 if (!trans->ops->dump_data)
920 return NULL;
921 return trans->ops->dump_data(trans, dump_mask);
922}
923
924static inline struct iwl_device_cmd *
925iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
926{
927 return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
928}
929
930int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
931
932static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
933 struct iwl_device_cmd *dev_cmd)
934{
935 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
936}
937
938static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
939 struct iwl_device_cmd *dev_cmd, int queue)
940{
941 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
942 return -EIO;
943
944 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
945 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
946 return -EIO;
947 }
948
949 return trans->ops->tx(trans, skb, dev_cmd, queue);
950}
951
952static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
953 int ssn, struct sk_buff_head *skbs)
954{
955 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
956 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
957 return;
958 }
959
960 trans->ops->reclaim(trans, queue, ssn, skbs);
961}
962
963static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
964 int ptr)
965{
966 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
967 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
968 return;
969 }
970
971 trans->ops->set_q_ptrs(trans, queue, ptr);
972}
973
974static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
975 bool configure_scd)
976{
977 trans->ops->txq_disable(trans, queue, configure_scd);
978}
979
980static inline bool
981iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
982 const struct iwl_trans_txq_scd_cfg *cfg,
983 unsigned int queue_wdg_timeout)
984{
985 might_sleep();
986
987 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
988 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
989 return false;
990 }
991
992 return trans->ops->txq_enable(trans, queue, ssn,
993 cfg, queue_wdg_timeout);
994}
995
996static inline int
997iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
998 struct iwl_trans_rxq_dma_data *data)
999{
1000 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1001 return -ENOTSUPP;
1002
1003 return trans->ops->rxq_dma_data(trans, queue, data);
1004}
1005
1006static inline void
1007iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1008{
1009 if (WARN_ON_ONCE(!trans->ops->txq_free))
1010 return;
1011
1012 trans->ops->txq_free(trans, queue);
1013}
1014
1015static inline int
1016iwl_trans_txq_alloc(struct iwl_trans *trans,
1017 __le16 flags, u8 sta_id, u8 tid,
1018 int cmd_id, int size,
1019 unsigned int wdg_timeout)
1020{
1021 might_sleep();
1022
1023 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1024 return -ENOTSUPP;
1025
1026 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1027 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1028 return -EIO;
1029 }
1030
1031 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1032 cmd_id, size, wdg_timeout);
1033}
1034
1035static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1036 int queue, bool shared_mode)
1037{
1038 if (trans->ops->txq_set_shared_mode)
1039 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1040}
1041
1042static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1043 int fifo, int sta_id, int tid,
1044 int frame_limit, u16 ssn,
1045 unsigned int queue_wdg_timeout)
1046{
1047 struct iwl_trans_txq_scd_cfg cfg = {
1048 .fifo = fifo,
1049 .sta_id = sta_id,
1050 .tid = tid,
1051 .frame_limit = frame_limit,
1052 .aggregate = sta_id >= 0,
1053 };
1054
1055 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1056}
1057
1058static inline
1059void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1060 unsigned int queue_wdg_timeout)
1061{
1062 struct iwl_trans_txq_scd_cfg cfg = {
1063 .fifo = fifo,
1064 .sta_id = -1,
1065 .tid = IWL_MAX_TID_COUNT,
1066 .frame_limit = IWL_FRAME_LIMIT,
1067 .aggregate = false,
1068 };
1069
1070 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1071}
1072
1073static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1074 unsigned long txqs,
1075 bool freeze)
1076{
1077 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1078 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1079 return;
1080 }
1081
1082 if (trans->ops->freeze_txq_timer)
1083 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1084}
1085
1086static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1087 bool block)
1088{
1089 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1090 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1091 return;
1092 }
1093
1094 if (trans->ops->block_txq_ptrs)
1095 trans->ops->block_txq_ptrs(trans, block);
1096}
1097
1098static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1099 u32 txqs)
1100{
1101 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1102 return -ENOTSUPP;
1103
1104 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1105 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1106 return -EIO;
1107 }
1108
1109 return trans->ops->wait_tx_queues_empty(trans, txqs);
1110}
1111
1112static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1113{
1114 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1115 return -ENOTSUPP;
1116
1117 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1118 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1119 return -EIO;
1120 }
1121
1122 return trans->ops->wait_txq_empty(trans, queue);
1123}
1124
1125static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1126{
1127 trans->ops->write8(trans, ofs, val);
1128}
1129
1130static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1131{
1132 trans->ops->write32(trans, ofs, val);
1133}
1134
1135static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1136{
1137 return trans->ops->read32(trans, ofs);
1138}
1139
1140static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1141{
1142 return trans->ops->read_prph(trans, ofs);
1143}
1144
1145static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1146 u32 val)
1147{
1148 return trans->ops->write_prph(trans, ofs, val);
1149}
1150
1151static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1152 void *buf, int dwords)
1153{
1154 return trans->ops->read_mem(trans, addr, buf, dwords);
1155}
1156
1157#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1158 do { \
1159 if (__builtin_constant_p(bufsize)) \
1160 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1161 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1162 } while (0)
1163
1164static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1165{
1166 u32 value;
1167
1168 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1169 return 0xa5a5a5a5;
1170
1171 return value;
1172}
1173
1174static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1175 const void *buf, int dwords)
1176{
1177 return trans->ops->write_mem(trans, addr, buf, dwords);
1178}
1179
1180static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1181 u32 val)
1182{
1183 return iwl_trans_write_mem(trans, addr, &val, 1);
1184}
1185
1186static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1187{
1188 if (trans->ops->set_pmi)
1189 trans->ops->set_pmi(trans, state);
1190}
1191
1192static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1193{
1194 if (trans->ops->sw_reset)
1195 trans->ops->sw_reset(trans);
1196}
1197
1198static inline void
1199iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1200{
1201 trans->ops->set_bits_mask(trans, reg, mask, value);
1202}
1203
1204#define iwl_trans_grab_nic_access(trans, flags) \
1205 __cond_lock(nic_access, \
1206 likely((trans)->ops->grab_nic_access(trans, flags)))
1207
1208static inline void __releases(nic_access)
1209iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1210{
1211 trans->ops->release_nic_access(trans, flags);
1212 __release(nic_access);
1213}
1214
1215static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1216{
1217 if (WARN_ON_ONCE(!trans->op_mode))
1218 return;
1219
1220
1221 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1222 iwl_op_mode_nic_error(trans->op_mode);
1223}
1224
1225static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1226{
1227 if (trans->ops->sync_nmi)
1228 trans->ops->sync_nmi(trans);
1229}
1230
1231static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1232{
1233 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1234 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1235}
1236
1237
1238
1239
1240struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1241 struct device *dev,
1242 const struct iwl_trans_ops *ops);
1243void iwl_trans_free(struct iwl_trans *trans);
1244
1245
1246
1247
1248int __must_check iwl_pci_register_driver(void);
1249void iwl_pci_unregister_driver(void);
1250
1251#endif
1252