1
2
3
4
5
6
7#ifndef __iwl_trans_h__
8#define __iwl_trans_h__
9
10#include <linux/ieee80211.h>
11#include <linux/mm.h>
12#include <linux/lockdep.h>
13#include <linux/kernel.h>
14
15#include "iwl-debug.h"
16#include "iwl-config.h"
17#include "fw/img.h"
18#include "iwl-op-mode.h"
19#include <linux/firmware.h>
20#include "fw/api/cmdhdr.h"
21#include "fw/api/txq.h"
22#include "fw/api/dbg-tlv.h"
23#include "iwl-dbg-tlv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
60
61#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
62#define FH_RSCSR_FRAME_INVALID 0x55550000
63#define FH_RSCSR_FRAME_ALIGN 0x40
64#define FH_RSCSR_RPA_EN BIT(25)
65#define FH_RSCSR_RADA_EN BIT(26)
66#define FH_RSCSR_RXQ_POS 16
67#define FH_RSCSR_RXQ_MASK 0x3F0000
68
69struct iwl_rx_packet {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 __le32 len_n_flags;
88 struct iwl_cmd_header hdr;
89 u8 data[];
90} __packed;
91
92static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
93{
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
95}
96
97static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
98{
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
100}
101
102
103
104
105
106
107
108
109
110
111
112
113
114enum CMD_MODE {
115 CMD_ASYNC = BIT(0),
116 CMD_WANT_SKB = BIT(1),
117 CMD_SEND_IN_RFKILL = BIT(2),
118 CMD_WANT_ASYNC_CALLBACK = BIT(3),
119 CMD_SEND_IN_D3 = BIT(4),
120};
121
122#define DEF_CMD_PAYLOAD_SIZE 320
123
124
125
126
127
128
129
130
131struct iwl_device_cmd {
132 union {
133 struct {
134 struct iwl_cmd_header hdr;
135 u8 payload[DEF_CMD_PAYLOAD_SIZE];
136 };
137 struct {
138 struct iwl_cmd_header_wide hdr_wide;
139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
140 sizeof(struct iwl_cmd_header_wide) +
141 sizeof(struct iwl_cmd_header)];
142 };
143 };
144} __packed;
145
146
147
148
149
150
151
152
153struct iwl_device_tx_cmd {
154 struct iwl_cmd_header hdr;
155 u8 payload[];
156} __packed;
157
158#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
159
160
161
162
163
164#define IWL_MAX_CMD_TBS_PER_TFD 2
165
166
167
168
169
170#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187enum iwl_hcmd_dataflag {
188 IWL_HCMD_DFL_NOCOPY = BIT(0),
189 IWL_HCMD_DFL_DUP = BIT(1),
190};
191
192enum iwl_error_event_table_status {
193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
196 IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
197 IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
198 IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
199 IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
200};
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215struct iwl_host_cmd {
216 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
217 struct iwl_rx_packet *resp_pkt;
218 unsigned long _rx_page_addr;
219 u32 _rx_page_order;
220
221 u32 flags;
222 u32 id;
223 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
224 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
225};
226
227static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
228{
229 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
230}
231
232struct iwl_rx_cmd_buffer {
233 struct page *_page;
234 int _offset;
235 bool _page_stolen;
236 u32 _rx_page_order;
237 unsigned int truesize;
238};
239
240static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
241{
242 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
243}
244
245static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
246{
247 return r->_offset;
248}
249
250static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
251{
252 r->_page_stolen = true;
253 get_page(r->_page);
254 return r->_page;
255}
256
257static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
258{
259 __free_pages(r->_page, r->_rx_page_order);
260}
261
262#define MAX_NO_RECLAIM_CMDS 6
263
264#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
265
266
267
268
269
270#define IWL_MAX_HW_QUEUES 32
271#define IWL_MAX_TVQM_QUEUES 512
272
273#define IWL_MAX_TID_COUNT 8
274#define IWL_MGMT_TID 15
275#define IWL_FRAME_LIMIT 64
276#define IWL_MAX_RX_HW_QUEUES 16
277#define IWL_9000_MAX_RX_HW_QUEUES 6
278
279
280
281
282
283
284enum iwl_d3_status {
285 IWL_D3_STATUS_ALIVE,
286 IWL_D3_STATUS_RESET,
287};
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305enum iwl_trans_status {
306 STATUS_SYNC_HCMD_ACTIVE,
307 STATUS_DEVICE_ENABLED,
308 STATUS_TPOWER_PMI,
309 STATUS_INT_ENABLED,
310 STATUS_RFKILL_HW,
311 STATUS_RFKILL_OPMODE,
312 STATUS_FW_ERROR,
313 STATUS_TRANS_GOING_IDLE,
314 STATUS_TRANS_IDLE,
315 STATUS_TRANS_DEAD,
316 STATUS_SUPPRESS_CMD_ERROR_ONCE,
317};
318
319static inline int
320iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
321{
322 switch (rb_size) {
323 case IWL_AMSDU_2K:
324 return get_order(2 * 1024);
325 case IWL_AMSDU_4K:
326 return get_order(4 * 1024);
327 case IWL_AMSDU_8K:
328 return get_order(8 * 1024);
329 case IWL_AMSDU_12K:
330 return get_order(16 * 1024);
331 default:
332 WARN_ON(1);
333 return -1;
334 }
335}
336
337static inline int
338iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
339{
340 switch (rb_size) {
341 case IWL_AMSDU_2K:
342 return 2 * 1024;
343 case IWL_AMSDU_4K:
344 return 4 * 1024;
345 case IWL_AMSDU_8K:
346 return 8 * 1024;
347 case IWL_AMSDU_12K:
348 return 16 * 1024;
349 default:
350 WARN_ON(1);
351 return 0;
352 }
353}
354
355struct iwl_hcmd_names {
356 u8 cmd_id;
357 const char *const cmd_name;
358};
359
360#define HCMD_NAME(x) \
361 { .cmd_id = x, .cmd_name = #x }
362
363struct iwl_hcmd_arr {
364 const struct iwl_hcmd_names *arr;
365 int size;
366};
367
368#define HCMD_ARR(x) \
369 { .arr = x, .size = ARRAY_SIZE(x) }
370
371
372
373
374
375
376
377
378
379struct iwl_dump_sanitize_ops {
380 void (*frob_txf)(void *ctx, void *buf, size_t buflen);
381 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
382 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
383};
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410struct iwl_trans_config {
411 struct iwl_op_mode *op_mode;
412
413 u8 cmd_queue;
414 u8 cmd_fifo;
415 unsigned int cmd_q_wdg_timeout;
416 const u8 *no_reclaim_cmds;
417 unsigned int n_no_reclaim_cmds;
418
419 enum iwl_amsdu_size rx_buf_size;
420 bool bc_table_dword;
421 bool scd_set_active;
422 const struct iwl_hcmd_arr *command_groups;
423 int command_groups_size;
424
425 u8 cb_data_offs;
426 bool fw_reset_handshake;
427};
428
429struct iwl_trans_dump_data {
430 u32 len;
431 u8 data[];
432};
433
434struct iwl_trans;
435
436struct iwl_trans_txq_scd_cfg {
437 u8 fifo;
438 u8 sta_id;
439 u8 tid;
440 bool aggregate;
441 int frame_limit;
442};
443
444
445
446
447
448
449
450
451struct iwl_trans_rxq_dma_data {
452 u64 fr_bd_cb;
453 u32 fr_bd_wid;
454 u64 urbd_stts_wrptr;
455 u64 ur_bd_cb;
456};
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544struct iwl_trans_ops {
545
546 int (*start_hw)(struct iwl_trans *iwl_trans);
547 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
548 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
549 bool run_in_rfkill);
550 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
551 void (*stop_device)(struct iwl_trans *trans);
552
553 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
554 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
555 bool test, bool reset);
556
557 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
558
559 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
560 struct iwl_device_tx_cmd *dev_cmd, int queue);
561 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
562 struct sk_buff_head *skbs);
563
564 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
565
566 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
567 const struct iwl_trans_txq_scd_cfg *cfg,
568 unsigned int queue_wdg_timeout);
569 void (*txq_disable)(struct iwl_trans *trans, int queue,
570 bool configure_scd);
571
572 int (*txq_alloc)(struct iwl_trans *trans,
573 __le16 flags, u8 sta_id, u8 tid,
574 int cmd_id, int size,
575 unsigned int queue_wdg_timeout);
576 void (*txq_free)(struct iwl_trans *trans, int queue);
577 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
578 struct iwl_trans_rxq_dma_data *data);
579
580 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
581 bool shared);
582
583 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
584 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
585 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
586 bool freeze);
587 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
588
589 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
590 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
591 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
592 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
593 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
594 int (*read_mem)(struct iwl_trans *trans, u32 addr,
595 void *buf, int dwords);
596 int (*write_mem)(struct iwl_trans *trans, u32 addr,
597 const void *buf, int dwords);
598 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
599 void (*configure)(struct iwl_trans *trans,
600 const struct iwl_trans_config *trans_cfg);
601 void (*set_pmi)(struct iwl_trans *trans, bool state);
602 int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
603 bool (*grab_nic_access)(struct iwl_trans *trans);
604 void (*release_nic_access)(struct iwl_trans *trans);
605 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
606 u32 value);
607
608 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
609 u32 dump_mask,
610 const struct iwl_dump_sanitize_ops *sanitize_ops,
611 void *sanitize_ctx);
612 void (*debugfs_cleanup)(struct iwl_trans *trans);
613 void (*sync_nmi)(struct iwl_trans *trans);
614 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
615 int (*set_reduce_power)(struct iwl_trans *trans,
616 const void *data, u32 len);
617 void (*interrupts)(struct iwl_trans *trans, bool enable);
618};
619
620
621
622
623
624
625
626
627enum iwl_trans_state {
628 IWL_TRANS_NO_FW,
629 IWL_TRANS_FW_STARTED,
630 IWL_TRANS_FW_ALIVE,
631};
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667enum iwl_plat_pm_mode {
668 IWL_PLAT_PM_MODE_DISABLED,
669 IWL_PLAT_PM_MODE_D3,
670};
671
672
673
674
675
676
677
678
679enum iwl_ini_cfg_state {
680 IWL_INI_CFG_STATE_NOT_LOADED,
681 IWL_INI_CFG_STATE_LOADED,
682 IWL_INI_CFG_STATE_CORRUPTED,
683};
684
685
686#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
687
688
689
690
691
692
693
694struct iwl_dram_data {
695 dma_addr_t physical;
696 void *block;
697 int size;
698};
699
700
701
702
703
704
705struct iwl_fw_mon {
706 u32 num_frags;
707 struct iwl_dram_data *frags;
708};
709
710
711
712
713
714
715
716
717struct iwl_self_init_dram {
718 struct iwl_dram_data *fw;
719 int fw_cnt;
720 struct iwl_dram_data *paging;
721 int paging_cnt;
722};
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752struct iwl_trans_debug {
753 u8 n_dest_reg;
754 bool rec_on;
755
756 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
757 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
758 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
759
760 u32 lmac_error_event_table[2];
761 u32 umac_error_event_table;
762 u32 tcm_error_event_table[2];
763 u32 rcm_error_event_table[2];
764 unsigned int error_event_table_tlv_status;
765
766 enum iwl_ini_cfg_state internal_ini_cfg;
767 enum iwl_ini_cfg_state external_ini_cfg;
768
769 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
770 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
771
772 struct iwl_dram_data fw_mon;
773
774 bool hw_error;
775 enum iwl_fw_ini_buffer_location ini_dest;
776
777 u64 unsupported_region_msk;
778 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
779 struct list_head debug_info_tlv_list;
780 struct iwl_dbg_tlv_time_point_data
781 time_point[IWL_FW_INI_TIME_POINT_NUM];
782 struct list_head periodic_trig_list;
783
784 u32 domains_bitmap;
785 u32 ucode_preset;
786 bool restart_required;
787 u32 last_tp_resetfw;
788};
789
790struct iwl_dma_ptr {
791 dma_addr_t dma;
792 void *addr;
793 size_t size;
794};
795
796struct iwl_cmd_meta {
797
798 struct iwl_host_cmd *source;
799 u32 flags;
800 u32 tbs;
801};
802
803
804
805
806
807
808
809
810
811
812#define IWL_FIRST_TB_SIZE 20
813#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
814
815struct iwl_pcie_txq_entry {
816 void *cmd;
817 struct sk_buff *skb;
818
819 const void *free_buf;
820 struct iwl_cmd_meta meta;
821};
822
823struct iwl_pcie_first_tb_buf {
824 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
825};
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869struct iwl_txq {
870 void *tfds;
871 struct iwl_pcie_first_tb_buf *first_tb_bufs;
872 dma_addr_t first_tb_dma;
873 struct iwl_pcie_txq_entry *entries;
874
875 spinlock_t lock;
876 unsigned long frozen_expiry_remainder;
877 struct timer_list stuck_timer;
878 struct iwl_trans *trans;
879 bool need_update;
880 bool frozen;
881 bool ampdu;
882 int block;
883 unsigned long wd_timeout;
884 struct sk_buff_head overflow_q;
885 struct iwl_dma_ptr bc_tbl;
886
887 int write_ptr;
888 int read_ptr;
889 dma_addr_t dma_addr;
890 int n_window;
891 u32 id;
892 int low_mark;
893 int high_mark;
894
895 bool overflow_tx;
896};
897
898
899
900
901
902
903
904
905
906
907
908struct iwl_trans_txqs {
909 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
910 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
911 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
912 struct dma_pool *bc_pool;
913 size_t bc_tbl_size;
914 bool bc_table_dword;
915 u8 page_offs;
916 u8 dev_cmd_offs;
917 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
918
919 struct {
920 u8 fifo;
921 u8 q_id;
922 unsigned int wdg_timeout;
923 } cmd;
924
925 struct {
926 u8 max_tbs;
927 u16 size;
928 u8 addr_size;
929 } tfd;
930
931 struct iwl_dma_ptr scd_bc_tbls;
932};
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972struct iwl_trans {
973 bool csme_own;
974 const struct iwl_trans_ops *ops;
975 struct iwl_op_mode *op_mode;
976 const struct iwl_cfg_trans_params *trans_cfg;
977 const struct iwl_cfg *cfg;
978 struct iwl_drv *drv;
979 enum iwl_trans_state state;
980 unsigned long status;
981
982 struct device *dev;
983 u32 max_skb_frags;
984 u32 hw_rev;
985 u32 hw_rev_step;
986 u32 hw_rf_id;
987 u32 hw_id;
988 char hw_id_str[52];
989 u32 sku_id[3];
990
991 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
992
993 bool pm_support;
994 bool ltr_enabled;
995 u8 pnvm_loaded:1;
996 u8 reduce_power_loaded:1;
997
998 const struct iwl_hcmd_arr *command_groups;
999 int command_groups_size;
1000 bool wide_cmd_header;
1001
1002 wait_queue_head_t wait_command_queue;
1003 u8 num_rx_queues;
1004
1005 size_t iml_len;
1006 u8 *iml;
1007
1008
1009 struct kmem_cache *dev_cmd_pool;
1010 char dev_cmd_pool_name[50];
1011
1012 struct dentry *dbgfs_dir;
1013
1014#ifdef CONFIG_LOCKDEP
1015 struct lockdep_map sync_cmd_lockdep_map;
1016#endif
1017
1018 struct iwl_trans_debug dbg;
1019 struct iwl_self_init_dram init_dram;
1020
1021 enum iwl_plat_pm_mode system_pm_mode;
1022
1023 const char *name;
1024 struct iwl_trans_txqs txqs;
1025
1026
1027
1028 char trans_specific[] __aligned(sizeof(void *));
1029};
1030
1031const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1032int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1033
1034static inline void iwl_trans_configure(struct iwl_trans *trans,
1035 const struct iwl_trans_config *trans_cfg)
1036{
1037 trans->op_mode = trans_cfg->op_mode;
1038
1039 trans->ops->configure(trans, trans_cfg);
1040 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1041}
1042
1043static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1044{
1045 might_sleep();
1046
1047 return trans->ops->start_hw(trans);
1048}
1049
1050static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1051{
1052 might_sleep();
1053
1054 if (trans->ops->op_mode_leave)
1055 trans->ops->op_mode_leave(trans);
1056
1057 trans->op_mode = NULL;
1058
1059 trans->state = IWL_TRANS_NO_FW;
1060}
1061
1062static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1063{
1064 might_sleep();
1065
1066 trans->state = IWL_TRANS_FW_ALIVE;
1067
1068 trans->ops->fw_alive(trans, scd_addr);
1069}
1070
1071static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1072 const struct fw_img *fw,
1073 bool run_in_rfkill)
1074{
1075 int ret;
1076
1077 might_sleep();
1078
1079 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1080
1081 clear_bit(STATUS_FW_ERROR, &trans->status);
1082 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1083 if (ret == 0)
1084 trans->state = IWL_TRANS_FW_STARTED;
1085
1086 return ret;
1087}
1088
1089static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1090{
1091 might_sleep();
1092
1093 trans->ops->stop_device(trans);
1094
1095 trans->state = IWL_TRANS_NO_FW;
1096}
1097
1098static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1099 bool reset)
1100{
1101 might_sleep();
1102 if (!trans->ops->d3_suspend)
1103 return 0;
1104
1105 return trans->ops->d3_suspend(trans, test, reset);
1106}
1107
1108static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1109 enum iwl_d3_status *status,
1110 bool test, bool reset)
1111{
1112 might_sleep();
1113 if (!trans->ops->d3_resume)
1114 return 0;
1115
1116 return trans->ops->d3_resume(trans, status, test, reset);
1117}
1118
1119static inline struct iwl_trans_dump_data *
1120iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1121 const struct iwl_dump_sanitize_ops *sanitize_ops,
1122 void *sanitize_ctx)
1123{
1124 if (!trans->ops->dump_data)
1125 return NULL;
1126 return trans->ops->dump_data(trans, dump_mask,
1127 sanitize_ops, sanitize_ctx);
1128}
1129
1130static inline struct iwl_device_tx_cmd *
1131iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1132{
1133 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1134}
1135
1136int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1137
1138static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1139 struct iwl_device_tx_cmd *dev_cmd)
1140{
1141 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1142}
1143
1144static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1145 struct iwl_device_tx_cmd *dev_cmd, int queue)
1146{
1147 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1148 return -EIO;
1149
1150 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1151 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1152 return -EIO;
1153 }
1154
1155 return trans->ops->tx(trans, skb, dev_cmd, queue);
1156}
1157
1158static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1159 int ssn, struct sk_buff_head *skbs)
1160{
1161 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1162 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1163 return;
1164 }
1165
1166 trans->ops->reclaim(trans, queue, ssn, skbs);
1167}
1168
1169static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1170 int ptr)
1171{
1172 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1173 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1174 return;
1175 }
1176
1177 trans->ops->set_q_ptrs(trans, queue, ptr);
1178}
1179
1180static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1181 bool configure_scd)
1182{
1183 trans->ops->txq_disable(trans, queue, configure_scd);
1184}
1185
1186static inline bool
1187iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1188 const struct iwl_trans_txq_scd_cfg *cfg,
1189 unsigned int queue_wdg_timeout)
1190{
1191 might_sleep();
1192
1193 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1194 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1195 return false;
1196 }
1197
1198 return trans->ops->txq_enable(trans, queue, ssn,
1199 cfg, queue_wdg_timeout);
1200}
1201
1202static inline int
1203iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1204 struct iwl_trans_rxq_dma_data *data)
1205{
1206 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1207 return -ENOTSUPP;
1208
1209 return trans->ops->rxq_dma_data(trans, queue, data);
1210}
1211
1212static inline void
1213iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1214{
1215 if (WARN_ON_ONCE(!trans->ops->txq_free))
1216 return;
1217
1218 trans->ops->txq_free(trans, queue);
1219}
1220
1221static inline int
1222iwl_trans_txq_alloc(struct iwl_trans *trans,
1223 __le16 flags, u8 sta_id, u8 tid,
1224 int cmd_id, int size,
1225 unsigned int wdg_timeout)
1226{
1227 might_sleep();
1228
1229 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1230 return -ENOTSUPP;
1231
1232 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1233 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1234 return -EIO;
1235 }
1236
1237 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1238 cmd_id, size, wdg_timeout);
1239}
1240
1241static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1242 int queue, bool shared_mode)
1243{
1244 if (trans->ops->txq_set_shared_mode)
1245 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1246}
1247
1248static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1249 int fifo, int sta_id, int tid,
1250 int frame_limit, u16 ssn,
1251 unsigned int queue_wdg_timeout)
1252{
1253 struct iwl_trans_txq_scd_cfg cfg = {
1254 .fifo = fifo,
1255 .sta_id = sta_id,
1256 .tid = tid,
1257 .frame_limit = frame_limit,
1258 .aggregate = sta_id >= 0,
1259 };
1260
1261 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1262}
1263
1264static inline
1265void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1266 unsigned int queue_wdg_timeout)
1267{
1268 struct iwl_trans_txq_scd_cfg cfg = {
1269 .fifo = fifo,
1270 .sta_id = -1,
1271 .tid = IWL_MAX_TID_COUNT,
1272 .frame_limit = IWL_FRAME_LIMIT,
1273 .aggregate = false,
1274 };
1275
1276 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1277}
1278
1279static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1280 unsigned long txqs,
1281 bool freeze)
1282{
1283 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1284 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1285 return;
1286 }
1287
1288 if (trans->ops->freeze_txq_timer)
1289 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1290}
1291
1292static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1293 bool block)
1294{
1295 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1296 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1297 return;
1298 }
1299
1300 if (trans->ops->block_txq_ptrs)
1301 trans->ops->block_txq_ptrs(trans, block);
1302}
1303
1304static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1305 u32 txqs)
1306{
1307 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1308 return -ENOTSUPP;
1309
1310
1311 if (trans->state != IWL_TRANS_FW_ALIVE) {
1312 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1313 return -EIO;
1314 }
1315
1316 return trans->ops->wait_tx_queues_empty(trans, txqs);
1317}
1318
1319static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1320{
1321 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1322 return -ENOTSUPP;
1323
1324 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1325 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1326 return -EIO;
1327 }
1328
1329 return trans->ops->wait_txq_empty(trans, queue);
1330}
1331
1332static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1333{
1334 trans->ops->write8(trans, ofs, val);
1335}
1336
1337static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1338{
1339 trans->ops->write32(trans, ofs, val);
1340}
1341
1342static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1343{
1344 return trans->ops->read32(trans, ofs);
1345}
1346
1347static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1348{
1349 return trans->ops->read_prph(trans, ofs);
1350}
1351
1352static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1353 u32 val)
1354{
1355 return trans->ops->write_prph(trans, ofs, val);
1356}
1357
1358static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1359 void *buf, int dwords)
1360{
1361 return trans->ops->read_mem(trans, addr, buf, dwords);
1362}
1363
1364#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1365 do { \
1366 if (__builtin_constant_p(bufsize)) \
1367 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1368 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1369 } while (0)
1370
1371static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1372{
1373 u32 value;
1374
1375 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1376 return 0xa5a5a5a5;
1377
1378 return value;
1379}
1380
1381static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1382 const void *buf, int dwords)
1383{
1384 return trans->ops->write_mem(trans, addr, buf, dwords);
1385}
1386
1387static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1388 u32 val)
1389{
1390 return iwl_trans_write_mem(trans, addr, &val, 1);
1391}
1392
1393static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1394{
1395 if (trans->ops->set_pmi)
1396 trans->ops->set_pmi(trans, state);
1397}
1398
1399static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1400 bool retake_ownership)
1401{
1402 if (trans->ops->sw_reset)
1403 return trans->ops->sw_reset(trans, retake_ownership);
1404 return 0;
1405}
1406
1407static inline void
1408iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1409{
1410 trans->ops->set_bits_mask(trans, reg, mask, value);
1411}
1412
1413#define iwl_trans_grab_nic_access(trans) \
1414 __cond_lock(nic_access, \
1415 likely((trans)->ops->grab_nic_access(trans)))
1416
1417static inline void __releases(nic_access)
1418iwl_trans_release_nic_access(struct iwl_trans *trans)
1419{
1420 trans->ops->release_nic_access(trans);
1421 __release(nic_access);
1422}
1423
1424static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1425{
1426 if (WARN_ON_ONCE(!trans->op_mode))
1427 return;
1428
1429
1430 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1431 iwl_op_mode_nic_error(trans->op_mode, sync);
1432 trans->state = IWL_TRANS_NO_FW;
1433 }
1434}
1435
1436static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1437{
1438 return trans->state == IWL_TRANS_FW_ALIVE;
1439}
1440
1441static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1442{
1443 if (trans->ops->sync_nmi)
1444 trans->ops->sync_nmi(trans);
1445}
1446
1447void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1448 u32 sw_err_bit);
1449
1450static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1451 const void *data, u32 len)
1452{
1453 if (trans->ops->set_pnvm) {
1454 int ret = trans->ops->set_pnvm(trans, data, len);
1455
1456 if (ret)
1457 return ret;
1458 }
1459
1460 trans->pnvm_loaded = true;
1461
1462 return 0;
1463}
1464
1465static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
1466 const void *data, u32 len)
1467{
1468 if (trans->ops->set_reduce_power) {
1469 int ret = trans->ops->set_reduce_power(trans, data, len);
1470
1471 if (ret)
1472 return ret;
1473 }
1474
1475 trans->reduce_power_loaded = true;
1476 return 0;
1477}
1478
1479static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1480{
1481 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1482 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1483}
1484
1485static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1486{
1487 if (trans->ops->interrupts)
1488 trans->ops->interrupts(trans, enable);
1489}
1490
1491
1492
1493
1494struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1495 struct device *dev,
1496 const struct iwl_trans_ops *ops,
1497 const struct iwl_cfg_trans_params *cfg_trans);
1498int iwl_trans_init(struct iwl_trans *trans);
1499void iwl_trans_free(struct iwl_trans *trans);
1500
1501
1502
1503
1504int __must_check iwl_pci_register_driver(void);
1505void iwl_pci_unregister_driver(void);
1506
1507#endif
1508