1
2
3
4
5
6
7#ifndef __iwl_trans_h__
8#define __iwl_trans_h__
9
10#include <linux/ieee80211.h>
11#include <linux/mm.h>
12#include <linux/lockdep.h>
13#include <linux/kernel.h>
14
15#include "iwl-debug.h"
16#include "iwl-config.h"
17#include "fw/img.h"
18#include "iwl-op-mode.h"
19#include <linux/firmware.h>
20#include "fw/api/cmdhdr.h"
21#include "fw/api/txq.h"
22#include "fw/api/dbg-tlv.h"
23#include "iwl-dbg-tlv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
60
61#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
62#define FH_RSCSR_FRAME_INVALID 0x55550000
63#define FH_RSCSR_FRAME_ALIGN 0x40
64#define FH_RSCSR_RPA_EN BIT(25)
65#define FH_RSCSR_RADA_EN BIT(26)
66#define FH_RSCSR_RXQ_POS 16
67#define FH_RSCSR_RXQ_MASK 0x3F0000
68
69struct iwl_rx_packet {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 __le32 len_n_flags;
88 struct iwl_cmd_header hdr;
89 u8 data[];
90} __packed;
91
92static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
93{
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
95}
96
97static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
98{
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
100}
101
102
103
104
105
106
107
108
109
110
111
112
113
114enum CMD_MODE {
115 CMD_ASYNC = BIT(0),
116 CMD_WANT_SKB = BIT(1),
117 CMD_SEND_IN_RFKILL = BIT(2),
118 CMD_WANT_ASYNC_CALLBACK = BIT(3),
119 CMD_SEND_IN_D3 = BIT(4),
120};
121
122#define DEF_CMD_PAYLOAD_SIZE 320
123
124
125
126
127
128
129
130
131struct iwl_device_cmd {
132 union {
133 struct {
134 struct iwl_cmd_header hdr;
135 u8 payload[DEF_CMD_PAYLOAD_SIZE];
136 };
137 struct {
138 struct iwl_cmd_header_wide hdr_wide;
139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
140 sizeof(struct iwl_cmd_header_wide) +
141 sizeof(struct iwl_cmd_header)];
142 };
143 };
144} __packed;
145
146
147
148
149
150
151
152
153struct iwl_device_tx_cmd {
154 struct iwl_cmd_header hdr;
155 u8 payload[];
156} __packed;
157
158#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
159
160
161
162
163
164#define IWL_MAX_CMD_TBS_PER_TFD 2
165
166
167
168
169
170#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187enum iwl_hcmd_dataflag {
188 IWL_HCMD_DFL_NOCOPY = BIT(0),
189 IWL_HCMD_DFL_DUP = BIT(1),
190};
191
192enum iwl_error_event_table_status {
193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
196 IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
197 IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
198 IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
199 IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
200};
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215struct iwl_host_cmd {
216 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
217 struct iwl_rx_packet *resp_pkt;
218 unsigned long _rx_page_addr;
219 u32 _rx_page_order;
220
221 u32 flags;
222 u32 id;
223 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
224 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
225};
226
227static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
228{
229 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
230}
231
232struct iwl_rx_cmd_buffer {
233 struct page *_page;
234 int _offset;
235 bool _page_stolen;
236 u32 _rx_page_order;
237 unsigned int truesize;
238};
239
240static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
241{
242 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
243}
244
245static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
246{
247 return r->_offset;
248}
249
250static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
251{
252 r->_page_stolen = true;
253 get_page(r->_page);
254 return r->_page;
255}
256
257static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
258{
259 __free_pages(r->_page, r->_rx_page_order);
260}
261
262#define MAX_NO_RECLAIM_CMDS 6
263
264#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
265
266
267
268
269
270#define IWL_MAX_HW_QUEUES 32
271#define IWL_MAX_TVQM_QUEUES 512
272
273#define IWL_MAX_TID_COUNT 8
274#define IWL_MGMT_TID 15
275#define IWL_FRAME_LIMIT 64
276#define IWL_MAX_RX_HW_QUEUES 16
277#define IWL_9000_MAX_RX_HW_QUEUES 6
278
279
280
281
282
283
284enum iwl_d3_status {
285 IWL_D3_STATUS_ALIVE,
286 IWL_D3_STATUS_RESET,
287};
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305enum iwl_trans_status {
306 STATUS_SYNC_HCMD_ACTIVE,
307 STATUS_DEVICE_ENABLED,
308 STATUS_TPOWER_PMI,
309 STATUS_INT_ENABLED,
310 STATUS_RFKILL_HW,
311 STATUS_RFKILL_OPMODE,
312 STATUS_FW_ERROR,
313 STATUS_TRANS_GOING_IDLE,
314 STATUS_TRANS_IDLE,
315 STATUS_TRANS_DEAD,
316 STATUS_SUPPRESS_CMD_ERROR_ONCE,
317};
318
319static inline int
320iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
321{
322 switch (rb_size) {
323 case IWL_AMSDU_2K:
324 return get_order(2 * 1024);
325 case IWL_AMSDU_4K:
326 return get_order(4 * 1024);
327 case IWL_AMSDU_8K:
328 return get_order(8 * 1024);
329 case IWL_AMSDU_12K:
330 return get_order(16 * 1024);
331 default:
332 WARN_ON(1);
333 return -1;
334 }
335}
336
337static inline int
338iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
339{
340 switch (rb_size) {
341 case IWL_AMSDU_2K:
342 return 2 * 1024;
343 case IWL_AMSDU_4K:
344 return 4 * 1024;
345 case IWL_AMSDU_8K:
346 return 8 * 1024;
347 case IWL_AMSDU_12K:
348 return 16 * 1024;
349 default:
350 WARN_ON(1);
351 return 0;
352 }
353}
354
355struct iwl_hcmd_names {
356 u8 cmd_id;
357 const char *const cmd_name;
358};
359
360#define HCMD_NAME(x) \
361 { .cmd_id = x, .cmd_name = #x }
362
363struct iwl_hcmd_arr {
364 const struct iwl_hcmd_names *arr;
365 int size;
366};
367
368#define HCMD_ARR(x) \
369 { .arr = x, .size = ARRAY_SIZE(x) }
370
371
372
373
374
375
376
377
378
379struct iwl_dump_sanitize_ops {
380 void (*frob_txf)(void *ctx, void *buf, size_t buflen);
381 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
382 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
383};
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413struct iwl_trans_config {
414 struct iwl_op_mode *op_mode;
415
416 u8 cmd_queue;
417 u8 cmd_fifo;
418 unsigned int cmd_q_wdg_timeout;
419 const u8 *no_reclaim_cmds;
420 unsigned int n_no_reclaim_cmds;
421
422 enum iwl_amsdu_size rx_buf_size;
423 bool bc_table_dword;
424 bool scd_set_active;
425 const struct iwl_hcmd_arr *command_groups;
426 int command_groups_size;
427
428 u8 cb_data_offs;
429 bool fw_reset_handshake;
430 u8 queue_alloc_cmd_ver;
431};
432
433struct iwl_trans_dump_data {
434 u32 len;
435 u8 data[];
436};
437
438struct iwl_trans;
439
440struct iwl_trans_txq_scd_cfg {
441 u8 fifo;
442 u8 sta_id;
443 u8 tid;
444 bool aggregate;
445 int frame_limit;
446};
447
448
449
450
451
452
453
454
455struct iwl_trans_rxq_dma_data {
456 u64 fr_bd_cb;
457 u32 fr_bd_wid;
458 u64 urbd_stts_wrptr;
459 u64 ur_bd_cb;
460};
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548struct iwl_trans_ops {
549
550 int (*start_hw)(struct iwl_trans *iwl_trans);
551 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
552 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
553 bool run_in_rfkill);
554 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
555 void (*stop_device)(struct iwl_trans *trans);
556
557 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
558 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
559 bool test, bool reset);
560
561 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
562
563 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
564 struct iwl_device_tx_cmd *dev_cmd, int queue);
565 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
566 struct sk_buff_head *skbs);
567
568 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
569
570 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
571 const struct iwl_trans_txq_scd_cfg *cfg,
572 unsigned int queue_wdg_timeout);
573 void (*txq_disable)(struct iwl_trans *trans, int queue,
574 bool configure_scd);
575
576 int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
577 u32 sta_mask, u8 tid,
578 int size, unsigned int queue_wdg_timeout);
579 void (*txq_free)(struct iwl_trans *trans, int queue);
580 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
581 struct iwl_trans_rxq_dma_data *data);
582
583 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
584 bool shared);
585
586 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
587 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
588 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
589 bool freeze);
590 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
591
592 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
593 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
594 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
595 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
596 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
597 int (*read_mem)(struct iwl_trans *trans, u32 addr,
598 void *buf, int dwords);
599 int (*write_mem)(struct iwl_trans *trans, u32 addr,
600 const void *buf, int dwords);
601 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
602 void (*configure)(struct iwl_trans *trans,
603 const struct iwl_trans_config *trans_cfg);
604 void (*set_pmi)(struct iwl_trans *trans, bool state);
605 int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
606 bool (*grab_nic_access)(struct iwl_trans *trans);
607 void (*release_nic_access)(struct iwl_trans *trans);
608 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
609 u32 value);
610
611 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
612 u32 dump_mask,
613 const struct iwl_dump_sanitize_ops *sanitize_ops,
614 void *sanitize_ctx);
615 void (*debugfs_cleanup)(struct iwl_trans *trans);
616 void (*sync_nmi)(struct iwl_trans *trans);
617 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
618 int (*set_reduce_power)(struct iwl_trans *trans,
619 const void *data, u32 len);
620 void (*interrupts)(struct iwl_trans *trans, bool enable);
621 int (*imr_dma_data)(struct iwl_trans *trans,
622 u32 dst_addr, u64 src_addr,
623 u32 byte_cnt);
624
625};
626
627
628
629
630
631
632
633
634enum iwl_trans_state {
635 IWL_TRANS_NO_FW,
636 IWL_TRANS_FW_STARTED,
637 IWL_TRANS_FW_ALIVE,
638};
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674enum iwl_plat_pm_mode {
675 IWL_PLAT_PM_MODE_DISABLED,
676 IWL_PLAT_PM_MODE_D3,
677};
678
679
680
681
682
683
684
685
686enum iwl_ini_cfg_state {
687 IWL_INI_CFG_STATE_NOT_LOADED,
688 IWL_INI_CFG_STATE_LOADED,
689 IWL_INI_CFG_STATE_CORRUPTED,
690};
691
692
693#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
694
695
696
697
698
699
700
701struct iwl_dram_data {
702 dma_addr_t physical;
703 void *block;
704 int size;
705};
706
707
708
709
710
711
712struct iwl_fw_mon {
713 u32 num_frags;
714 struct iwl_dram_data *frags;
715};
716
717
718
719
720
721
722
723
724struct iwl_self_init_dram {
725 struct iwl_dram_data *fw;
726 int fw_cnt;
727 struct iwl_dram_data *paging;
728 int paging_cnt;
729};
730
731
732
733
734
735
736
737
738
739
740
741struct iwl_imr_data {
742 u32 imr_enable;
743 u32 imr_size;
744 u32 sram_addr;
745 u32 sram_size;
746 u32 imr2sram_remainbyte;
747 u64 imr_curr_addr;
748 __le64 imr_base_addr;
749};
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779struct iwl_trans_debug {
780 u8 n_dest_reg;
781 bool rec_on;
782
783 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
784 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
785 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
786
787 u32 lmac_error_event_table[2];
788 u32 umac_error_event_table;
789 u32 tcm_error_event_table[2];
790 u32 rcm_error_event_table[2];
791 unsigned int error_event_table_tlv_status;
792
793 enum iwl_ini_cfg_state internal_ini_cfg;
794 enum iwl_ini_cfg_state external_ini_cfg;
795
796 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
797 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
798
799 struct iwl_dram_data fw_mon;
800
801 bool hw_error;
802 enum iwl_fw_ini_buffer_location ini_dest;
803
804 u64 unsupported_region_msk;
805 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
806 struct list_head debug_info_tlv_list;
807 struct iwl_dbg_tlv_time_point_data
808 time_point[IWL_FW_INI_TIME_POINT_NUM];
809 struct list_head periodic_trig_list;
810
811 u32 domains_bitmap;
812 u32 ucode_preset;
813 bool restart_required;
814 u32 last_tp_resetfw;
815 struct iwl_imr_data imr_data;
816};
817
818struct iwl_dma_ptr {
819 dma_addr_t dma;
820 void *addr;
821 size_t size;
822};
823
824struct iwl_cmd_meta {
825
826 struct iwl_host_cmd *source;
827 u32 flags;
828 u32 tbs;
829};
830
831
832
833
834
835
836
837
838
839
840#define IWL_FIRST_TB_SIZE 20
841#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
842
843struct iwl_pcie_txq_entry {
844 void *cmd;
845 struct sk_buff *skb;
846
847 const void *free_buf;
848 struct iwl_cmd_meta meta;
849};
850
851struct iwl_pcie_first_tb_buf {
852 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
853};
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897struct iwl_txq {
898 void *tfds;
899 struct iwl_pcie_first_tb_buf *first_tb_bufs;
900 dma_addr_t first_tb_dma;
901 struct iwl_pcie_txq_entry *entries;
902
903 spinlock_t lock;
904 unsigned long frozen_expiry_remainder;
905 struct timer_list stuck_timer;
906 struct iwl_trans *trans;
907 bool need_update;
908 bool frozen;
909 bool ampdu;
910 int block;
911 unsigned long wd_timeout;
912 struct sk_buff_head overflow_q;
913 struct iwl_dma_ptr bc_tbl;
914
915 int write_ptr;
916 int read_ptr;
917 dma_addr_t dma_addr;
918 int n_window;
919 u32 id;
920 int low_mark;
921 int high_mark;
922
923 bool overflow_tx;
924};
925
926
927
928
929
930
931
932
933
934
935
936
937struct iwl_trans_txqs {
938 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
939 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
940 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
941 struct dma_pool *bc_pool;
942 size_t bc_tbl_size;
943 bool bc_table_dword;
944 u8 page_offs;
945 u8 dev_cmd_offs;
946 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
947
948 struct {
949 u8 fifo;
950 u8 q_id;
951 unsigned int wdg_timeout;
952 } cmd;
953
954 struct {
955 u8 max_tbs;
956 u16 size;
957 u8 addr_size;
958 } tfd;
959
960 struct iwl_dma_ptr scd_bc_tbls;
961
962 u8 queue_alloc_cmd_ver;
963};
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003struct iwl_trans {
1004 bool csme_own;
1005 const struct iwl_trans_ops *ops;
1006 struct iwl_op_mode *op_mode;
1007 const struct iwl_cfg_trans_params *trans_cfg;
1008 const struct iwl_cfg *cfg;
1009 struct iwl_drv *drv;
1010 enum iwl_trans_state state;
1011 unsigned long status;
1012
1013 struct device *dev;
1014 u32 max_skb_frags;
1015 u32 hw_rev;
1016 u32 hw_rev_step;
1017 u32 hw_rf_id;
1018 u32 hw_id;
1019 char hw_id_str[52];
1020 u32 sku_id[3];
1021
1022 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
1023
1024 bool pm_support;
1025 bool ltr_enabled;
1026 u8 pnvm_loaded:1;
1027 u8 reduce_power_loaded:1;
1028
1029 const struct iwl_hcmd_arr *command_groups;
1030 int command_groups_size;
1031 bool wide_cmd_header;
1032
1033 wait_queue_head_t wait_command_queue;
1034 u8 num_rx_queues;
1035
1036 size_t iml_len;
1037 u8 *iml;
1038
1039
1040 struct kmem_cache *dev_cmd_pool;
1041 char dev_cmd_pool_name[50];
1042
1043 struct dentry *dbgfs_dir;
1044
1045#ifdef CONFIG_LOCKDEP
1046 struct lockdep_map sync_cmd_lockdep_map;
1047#endif
1048
1049 struct iwl_trans_debug dbg;
1050 struct iwl_self_init_dram init_dram;
1051
1052 enum iwl_plat_pm_mode system_pm_mode;
1053
1054 const char *name;
1055 struct iwl_trans_txqs txqs;
1056
1057
1058
1059 char trans_specific[] __aligned(sizeof(void *));
1060};
1061
1062const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1063int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1064
1065static inline void iwl_trans_configure(struct iwl_trans *trans,
1066 const struct iwl_trans_config *trans_cfg)
1067{
1068 trans->op_mode = trans_cfg->op_mode;
1069
1070 trans->ops->configure(trans, trans_cfg);
1071 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1072}
1073
1074static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1075{
1076 might_sleep();
1077
1078 return trans->ops->start_hw(trans);
1079}
1080
1081static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1082{
1083 might_sleep();
1084
1085 if (trans->ops->op_mode_leave)
1086 trans->ops->op_mode_leave(trans);
1087
1088 trans->op_mode = NULL;
1089
1090 trans->state = IWL_TRANS_NO_FW;
1091}
1092
1093static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1094{
1095 might_sleep();
1096
1097 trans->state = IWL_TRANS_FW_ALIVE;
1098
1099 trans->ops->fw_alive(trans, scd_addr);
1100}
1101
1102static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1103 const struct fw_img *fw,
1104 bool run_in_rfkill)
1105{
1106 int ret;
1107
1108 might_sleep();
1109
1110 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1111
1112 clear_bit(STATUS_FW_ERROR, &trans->status);
1113 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1114 if (ret == 0)
1115 trans->state = IWL_TRANS_FW_STARTED;
1116
1117 return ret;
1118}
1119
1120static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1121{
1122 might_sleep();
1123
1124 trans->ops->stop_device(trans);
1125
1126 trans->state = IWL_TRANS_NO_FW;
1127}
1128
1129static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1130 bool reset)
1131{
1132 might_sleep();
1133 if (!trans->ops->d3_suspend)
1134 return 0;
1135
1136 return trans->ops->d3_suspend(trans, test, reset);
1137}
1138
1139static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1140 enum iwl_d3_status *status,
1141 bool test, bool reset)
1142{
1143 might_sleep();
1144 if (!trans->ops->d3_resume)
1145 return 0;
1146
1147 return trans->ops->d3_resume(trans, status, test, reset);
1148}
1149
1150static inline struct iwl_trans_dump_data *
1151iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1152 const struct iwl_dump_sanitize_ops *sanitize_ops,
1153 void *sanitize_ctx)
1154{
1155 if (!trans->ops->dump_data)
1156 return NULL;
1157 return trans->ops->dump_data(trans, dump_mask,
1158 sanitize_ops, sanitize_ctx);
1159}
1160
1161static inline struct iwl_device_tx_cmd *
1162iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1163{
1164 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1165}
1166
1167int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1168
1169static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1170 struct iwl_device_tx_cmd *dev_cmd)
1171{
1172 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1173}
1174
1175static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1176 struct iwl_device_tx_cmd *dev_cmd, int queue)
1177{
1178 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1179 return -EIO;
1180
1181 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1182 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1183 return -EIO;
1184 }
1185
1186 return trans->ops->tx(trans, skb, dev_cmd, queue);
1187}
1188
1189static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1190 int ssn, struct sk_buff_head *skbs)
1191{
1192 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1193 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1194 return;
1195 }
1196
1197 trans->ops->reclaim(trans, queue, ssn, skbs);
1198}
1199
1200static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1201 int ptr)
1202{
1203 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1204 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1205 return;
1206 }
1207
1208 trans->ops->set_q_ptrs(trans, queue, ptr);
1209}
1210
1211static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1212 bool configure_scd)
1213{
1214 trans->ops->txq_disable(trans, queue, configure_scd);
1215}
1216
1217static inline bool
1218iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1219 const struct iwl_trans_txq_scd_cfg *cfg,
1220 unsigned int queue_wdg_timeout)
1221{
1222 might_sleep();
1223
1224 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1225 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1226 return false;
1227 }
1228
1229 return trans->ops->txq_enable(trans, queue, ssn,
1230 cfg, queue_wdg_timeout);
1231}
1232
1233static inline int
1234iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1235 struct iwl_trans_rxq_dma_data *data)
1236{
1237 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1238 return -ENOTSUPP;
1239
1240 return trans->ops->rxq_dma_data(trans, queue, data);
1241}
1242
1243static inline void
1244iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1245{
1246 if (WARN_ON_ONCE(!trans->ops->txq_free))
1247 return;
1248
1249 trans->ops->txq_free(trans, queue);
1250}
1251
1252static inline int
1253iwl_trans_txq_alloc(struct iwl_trans *trans,
1254 u32 flags, u32 sta_mask, u8 tid,
1255 int size, unsigned int wdg_timeout)
1256{
1257 might_sleep();
1258
1259 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1260 return -ENOTSUPP;
1261
1262 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1263 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1264 return -EIO;
1265 }
1266
1267 return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
1268 size, wdg_timeout);
1269}
1270
1271static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1272 int queue, bool shared_mode)
1273{
1274 if (trans->ops->txq_set_shared_mode)
1275 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1276}
1277
1278static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1279 int fifo, int sta_id, int tid,
1280 int frame_limit, u16 ssn,
1281 unsigned int queue_wdg_timeout)
1282{
1283 struct iwl_trans_txq_scd_cfg cfg = {
1284 .fifo = fifo,
1285 .sta_id = sta_id,
1286 .tid = tid,
1287 .frame_limit = frame_limit,
1288 .aggregate = sta_id >= 0,
1289 };
1290
1291 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1292}
1293
1294static inline
1295void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1296 unsigned int queue_wdg_timeout)
1297{
1298 struct iwl_trans_txq_scd_cfg cfg = {
1299 .fifo = fifo,
1300 .sta_id = -1,
1301 .tid = IWL_MAX_TID_COUNT,
1302 .frame_limit = IWL_FRAME_LIMIT,
1303 .aggregate = false,
1304 };
1305
1306 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1307}
1308
1309static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1310 unsigned long txqs,
1311 bool freeze)
1312{
1313 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1314 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1315 return;
1316 }
1317
1318 if (trans->ops->freeze_txq_timer)
1319 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1320}
1321
1322static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1323 bool block)
1324{
1325 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1326 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1327 return;
1328 }
1329
1330 if (trans->ops->block_txq_ptrs)
1331 trans->ops->block_txq_ptrs(trans, block);
1332}
1333
1334static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1335 u32 txqs)
1336{
1337 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1338 return -ENOTSUPP;
1339
1340
1341 if (trans->state != IWL_TRANS_FW_ALIVE) {
1342 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1343 return -EIO;
1344 }
1345
1346 return trans->ops->wait_tx_queues_empty(trans, txqs);
1347}
1348
1349static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1350{
1351 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1352 return -ENOTSUPP;
1353
1354 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1355 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1356 return -EIO;
1357 }
1358
1359 return trans->ops->wait_txq_empty(trans, queue);
1360}
1361
1362static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1363{
1364 trans->ops->write8(trans, ofs, val);
1365}
1366
1367static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1368{
1369 trans->ops->write32(trans, ofs, val);
1370}
1371
1372static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1373{
1374 return trans->ops->read32(trans, ofs);
1375}
1376
1377static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1378{
1379 return trans->ops->read_prph(trans, ofs);
1380}
1381
1382static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1383 u32 val)
1384{
1385 return trans->ops->write_prph(trans, ofs, val);
1386}
1387
1388static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1389 void *buf, int dwords)
1390{
1391 return trans->ops->read_mem(trans, addr, buf, dwords);
1392}
1393
1394#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1395 do { \
1396 if (__builtin_constant_p(bufsize)) \
1397 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1398 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1399 } while (0)
1400
1401static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
1402 u32 dst_addr, u64 src_addr,
1403 u32 byte_cnt)
1404{
1405 if (trans->ops->imr_dma_data)
1406 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
1407 return 0;
1408}
1409
1410static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1411{
1412 u32 value;
1413
1414 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1415 return 0xa5a5a5a5;
1416
1417 return value;
1418}
1419
1420static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1421 const void *buf, int dwords)
1422{
1423 return trans->ops->write_mem(trans, addr, buf, dwords);
1424}
1425
1426static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1427 u32 val)
1428{
1429 return iwl_trans_write_mem(trans, addr, &val, 1);
1430}
1431
1432static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1433{
1434 if (trans->ops->set_pmi)
1435 trans->ops->set_pmi(trans, state);
1436}
1437
1438static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1439 bool retake_ownership)
1440{
1441 if (trans->ops->sw_reset)
1442 return trans->ops->sw_reset(trans, retake_ownership);
1443 return 0;
1444}
1445
1446static inline void
1447iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1448{
1449 trans->ops->set_bits_mask(trans, reg, mask, value);
1450}
1451
1452#define iwl_trans_grab_nic_access(trans) \
1453 __cond_lock(nic_access, \
1454 likely((trans)->ops->grab_nic_access(trans)))
1455
1456static inline void __releases(nic_access)
1457iwl_trans_release_nic_access(struct iwl_trans *trans)
1458{
1459 trans->ops->release_nic_access(trans);
1460 __release(nic_access);
1461}
1462
1463static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1464{
1465 if (WARN_ON_ONCE(!trans->op_mode))
1466 return;
1467
1468
1469 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1470 iwl_op_mode_nic_error(trans->op_mode, sync);
1471 trans->state = IWL_TRANS_NO_FW;
1472 }
1473}
1474
1475static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1476{
1477 return trans->state == IWL_TRANS_FW_ALIVE;
1478}
1479
1480static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1481{
1482 if (trans->ops->sync_nmi)
1483 trans->ops->sync_nmi(trans);
1484}
1485
1486void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1487 u32 sw_err_bit);
1488
1489static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1490 const void *data, u32 len)
1491{
1492 if (trans->ops->set_pnvm) {
1493 int ret = trans->ops->set_pnvm(trans, data, len);
1494
1495 if (ret)
1496 return ret;
1497 }
1498
1499 trans->pnvm_loaded = true;
1500
1501 return 0;
1502}
1503
1504static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
1505 const void *data, u32 len)
1506{
1507 if (trans->ops->set_reduce_power) {
1508 int ret = trans->ops->set_reduce_power(trans, data, len);
1509
1510 if (ret)
1511 return ret;
1512 }
1513
1514 trans->reduce_power_loaded = true;
1515 return 0;
1516}
1517
1518static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1519{
1520 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1521 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1522}
1523
1524static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1525{
1526 if (trans->ops->interrupts)
1527 trans->ops->interrupts(trans, enable);
1528}
1529
1530
1531
1532
1533struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1534 struct device *dev,
1535 const struct iwl_trans_ops *ops,
1536 const struct iwl_cfg_trans_params *cfg_trans);
1537int iwl_trans_init(struct iwl_trans *trans);
1538void iwl_trans_free(struct iwl_trans *trans);
1539
1540
1541
1542
1543int __must_check iwl_pci_register_driver(void);
1544void iwl_pci_unregister_driver(void);
1545
1546#endif
1547