1
2
3
4
5
6
7#ifndef __iwl_trans_h__
8#define __iwl_trans_h__
9
10#include <linux/ieee80211.h>
11#include <linux/mm.h>
12#include <linux/lockdep.h>
13#include <linux/kernel.h>
14
15#include "iwl-debug.h"
16#include "iwl-config.h"
17#include "fw/img.h"
18#include "iwl-op-mode.h"
19#include <linux/firmware.h>
20#include "fw/api/cmdhdr.h"
21#include "fw/api/txq.h"
22#include "fw/api/dbg-tlv.h"
23#include "iwl-dbg-tlv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
60
61#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
62#define FH_RSCSR_FRAME_INVALID 0x55550000
63#define FH_RSCSR_FRAME_ALIGN 0x40
64#define FH_RSCSR_RPA_EN BIT(25)
65#define FH_RSCSR_RADA_EN BIT(26)
66#define FH_RSCSR_RXQ_POS 16
67#define FH_RSCSR_RXQ_MASK 0x3F0000
68
69struct iwl_rx_packet {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 __le32 len_n_flags;
88 struct iwl_cmd_header hdr;
89 u8 data[];
90} __packed;
91
92static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
93{
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
95}
96
97static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
98{
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
100}
101
102
103
104
105
106
107
108
109
110
111enum CMD_MODE {
112 CMD_ASYNC = BIT(0),
113 CMD_WANT_SKB = BIT(1),
114 CMD_SEND_IN_RFKILL = BIT(2),
115 CMD_WANT_ASYNC_CALLBACK = BIT(3),
116};
117
118#define DEF_CMD_PAYLOAD_SIZE 320
119
120
121
122
123
124
125
126
127struct iwl_device_cmd {
128 union {
129 struct {
130 struct iwl_cmd_header hdr;
131 u8 payload[DEF_CMD_PAYLOAD_SIZE];
132 };
133 struct {
134 struct iwl_cmd_header_wide hdr_wide;
135 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
136 sizeof(struct iwl_cmd_header_wide) +
137 sizeof(struct iwl_cmd_header)];
138 };
139 };
140} __packed;
141
142
143
144
145
146
147
148
149struct iwl_device_tx_cmd {
150 struct iwl_cmd_header hdr;
151 u8 payload[];
152} __packed;
153
154#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
155
156
157
158
159
160#define IWL_MAX_CMD_TBS_PER_TFD 2
161
162
163
164
165
166#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183enum iwl_hcmd_dataflag {
184 IWL_HCMD_DFL_NOCOPY = BIT(0),
185 IWL_HCMD_DFL_DUP = BIT(1),
186};
187
188enum iwl_error_event_table_status {
189 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
190 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
191 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
192};
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207struct iwl_host_cmd {
208 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
209 struct iwl_rx_packet *resp_pkt;
210 unsigned long _rx_page_addr;
211 u32 _rx_page_order;
212
213 u32 flags;
214 u32 id;
215 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
216 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
217};
218
219static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
220{
221 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
222}
223
224struct iwl_rx_cmd_buffer {
225 struct page *_page;
226 int _offset;
227 bool _page_stolen;
228 u32 _rx_page_order;
229 unsigned int truesize;
230};
231
232static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
233{
234 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
235}
236
237static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
238{
239 return r->_offset;
240}
241
242static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
243{
244 r->_page_stolen = true;
245 get_page(r->_page);
246 return r->_page;
247}
248
249static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
250{
251 __free_pages(r->_page, r->_rx_page_order);
252}
253
254#define MAX_NO_RECLAIM_CMDS 6
255
256#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
257
258
259
260
261
262#define IWL_MAX_HW_QUEUES 32
263#define IWL_MAX_TVQM_QUEUES 512
264
265#define IWL_MAX_TID_COUNT 8
266#define IWL_MGMT_TID 15
267#define IWL_FRAME_LIMIT 64
268#define IWL_MAX_RX_HW_QUEUES 16
269#define IWL_9000_MAX_RX_HW_QUEUES 6
270
271
272
273
274
275
276enum iwl_d3_status {
277 IWL_D3_STATUS_ALIVE,
278 IWL_D3_STATUS_RESET,
279};
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295enum iwl_trans_status {
296 STATUS_SYNC_HCMD_ACTIVE,
297 STATUS_DEVICE_ENABLED,
298 STATUS_TPOWER_PMI,
299 STATUS_INT_ENABLED,
300 STATUS_RFKILL_HW,
301 STATUS_RFKILL_OPMODE,
302 STATUS_FW_ERROR,
303 STATUS_TRANS_GOING_IDLE,
304 STATUS_TRANS_IDLE,
305 STATUS_TRANS_DEAD,
306};
307
308static inline int
309iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
310{
311 switch (rb_size) {
312 case IWL_AMSDU_2K:
313 return get_order(2 * 1024);
314 case IWL_AMSDU_4K:
315 return get_order(4 * 1024);
316 case IWL_AMSDU_8K:
317 return get_order(8 * 1024);
318 case IWL_AMSDU_12K:
319 return get_order(16 * 1024);
320 default:
321 WARN_ON(1);
322 return -1;
323 }
324}
325
326static inline int
327iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
328{
329 switch (rb_size) {
330 case IWL_AMSDU_2K:
331 return 2 * 1024;
332 case IWL_AMSDU_4K:
333 return 4 * 1024;
334 case IWL_AMSDU_8K:
335 return 8 * 1024;
336 case IWL_AMSDU_12K:
337 return 16 * 1024;
338 default:
339 WARN_ON(1);
340 return 0;
341 }
342}
343
344struct iwl_hcmd_names {
345 u8 cmd_id;
346 const char *const cmd_name;
347};
348
349#define HCMD_NAME(x) \
350 { .cmd_id = x, .cmd_name = #x }
351
352struct iwl_hcmd_arr {
353 const struct iwl_hcmd_names *arr;
354 int size;
355};
356
357#define HCMD_ARR(x) \
358 { .arr = x, .size = ARRAY_SIZE(x) }
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385struct iwl_trans_config {
386 struct iwl_op_mode *op_mode;
387
388 u8 cmd_queue;
389 u8 cmd_fifo;
390 unsigned int cmd_q_wdg_timeout;
391 const u8 *no_reclaim_cmds;
392 unsigned int n_no_reclaim_cmds;
393
394 enum iwl_amsdu_size rx_buf_size;
395 bool bc_table_dword;
396 bool scd_set_active;
397 const struct iwl_hcmd_arr *command_groups;
398 int command_groups_size;
399
400 u8 cb_data_offs;
401 bool fw_reset_handshake;
402};
403
404struct iwl_trans_dump_data {
405 u32 len;
406 u8 data[];
407};
408
409struct iwl_trans;
410
411struct iwl_trans_txq_scd_cfg {
412 u8 fifo;
413 u8 sta_id;
414 u8 tid;
415 bool aggregate;
416 int frame_limit;
417};
418
419
420
421
422
423
424
425
426struct iwl_trans_rxq_dma_data {
427 u64 fr_bd_cb;
428 u32 fr_bd_wid;
429 u64 urbd_stts_wrptr;
430 u64 ur_bd_cb;
431};
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518struct iwl_trans_ops {
519
520 int (*start_hw)(struct iwl_trans *iwl_trans);
521 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
522 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
523 bool run_in_rfkill);
524 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
525 void (*stop_device)(struct iwl_trans *trans);
526
527 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
528 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
529 bool test, bool reset);
530
531 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
532
533 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
534 struct iwl_device_tx_cmd *dev_cmd, int queue);
535 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
536 struct sk_buff_head *skbs);
537
538 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
539
540 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
541 const struct iwl_trans_txq_scd_cfg *cfg,
542 unsigned int queue_wdg_timeout);
543 void (*txq_disable)(struct iwl_trans *trans, int queue,
544 bool configure_scd);
545
546 int (*txq_alloc)(struct iwl_trans *trans,
547 __le16 flags, u8 sta_id, u8 tid,
548 int cmd_id, int size,
549 unsigned int queue_wdg_timeout);
550 void (*txq_free)(struct iwl_trans *trans, int queue);
551 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
552 struct iwl_trans_rxq_dma_data *data);
553
554 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
555 bool shared);
556
557 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
558 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
559 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
560 bool freeze);
561 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
562
563 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
564 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
565 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
566 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
567 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
568 int (*read_mem)(struct iwl_trans *trans, u32 addr,
569 void *buf, int dwords);
570 int (*write_mem)(struct iwl_trans *trans, u32 addr,
571 const void *buf, int dwords);
572 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
573 void (*configure)(struct iwl_trans *trans,
574 const struct iwl_trans_config *trans_cfg);
575 void (*set_pmi)(struct iwl_trans *trans, bool state);
576 void (*sw_reset)(struct iwl_trans *trans);
577 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
578 void (*release_nic_access)(struct iwl_trans *trans,
579 unsigned long *flags);
580 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
581 u32 value);
582 int (*suspend)(struct iwl_trans *trans);
583 void (*resume)(struct iwl_trans *trans);
584
585 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
586 u32 dump_mask);
587 void (*debugfs_cleanup)(struct iwl_trans *trans);
588 void (*sync_nmi)(struct iwl_trans *trans);
589 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
590};
591
592
593
594
595
596
597
598
599enum iwl_trans_state {
600 IWL_TRANS_NO_FW,
601 IWL_TRANS_FW_STARTED,
602 IWL_TRANS_FW_ALIVE,
603};
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639enum iwl_plat_pm_mode {
640 IWL_PLAT_PM_MODE_DISABLED,
641 IWL_PLAT_PM_MODE_D3,
642};
643
644
645
646
647
648
649
650
651enum iwl_ini_cfg_state {
652 IWL_INI_CFG_STATE_NOT_LOADED,
653 IWL_INI_CFG_STATE_LOADED,
654 IWL_INI_CFG_STATE_CORRUPTED,
655};
656
657
658#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
659
660
661
662
663
664
665
666struct iwl_dram_data {
667 dma_addr_t physical;
668 void *block;
669 int size;
670};
671
672
673
674
675
676
677struct iwl_fw_mon {
678 u32 num_frags;
679 struct iwl_dram_data *frags;
680};
681
682
683
684
685
686
687
688
689struct iwl_self_init_dram {
690 struct iwl_dram_data *fw;
691 int fw_cnt;
692 struct iwl_dram_data *paging;
693 int paging_cnt;
694};
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722struct iwl_trans_debug {
723 u8 n_dest_reg;
724 bool rec_on;
725
726 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
727 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
728 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
729
730 u32 lmac_error_event_table[2];
731 u32 umac_error_event_table;
732 unsigned int error_event_table_tlv_status;
733
734 enum iwl_ini_cfg_state internal_ini_cfg;
735 enum iwl_ini_cfg_state external_ini_cfg;
736
737 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
738 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
739
740 struct iwl_dram_data fw_mon;
741
742 bool hw_error;
743 enum iwl_fw_ini_buffer_location ini_dest;
744
745 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
746 struct list_head debug_info_tlv_list;
747 struct iwl_dbg_tlv_time_point_data
748 time_point[IWL_FW_INI_TIME_POINT_NUM];
749 struct list_head periodic_trig_list;
750
751 u32 domains_bitmap;
752};
753
754struct iwl_dma_ptr {
755 dma_addr_t dma;
756 void *addr;
757 size_t size;
758};
759
760struct iwl_cmd_meta {
761
762 struct iwl_host_cmd *source;
763 u32 flags;
764 u32 tbs;
765};
766
767
768
769
770
771
772
773
774
775
776#define IWL_FIRST_TB_SIZE 20
777#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
778
779struct iwl_pcie_txq_entry {
780 void *cmd;
781 struct sk_buff *skb;
782
783 const void *free_buf;
784 struct iwl_cmd_meta meta;
785};
786
787struct iwl_pcie_first_tb_buf {
788 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
789};
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833struct iwl_txq {
834 void *tfds;
835 struct iwl_pcie_first_tb_buf *first_tb_bufs;
836 dma_addr_t first_tb_dma;
837 struct iwl_pcie_txq_entry *entries;
838
839 spinlock_t lock;
840 unsigned long frozen_expiry_remainder;
841 struct timer_list stuck_timer;
842 struct iwl_trans *trans;
843 bool need_update;
844 bool frozen;
845 bool ampdu;
846 int block;
847 unsigned long wd_timeout;
848 struct sk_buff_head overflow_q;
849 struct iwl_dma_ptr bc_tbl;
850
851 int write_ptr;
852 int read_ptr;
853 dma_addr_t dma_addr;
854 int n_window;
855 u32 id;
856 int low_mark;
857 int high_mark;
858
859 bool overflow_tx;
860};
861
862
863
864
865
866
867
868
869
870
871
872struct iwl_trans_txqs {
873 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
874 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
875 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
876 struct dma_pool *bc_pool;
877 size_t bc_tbl_size;
878 bool bc_table_dword;
879 u8 page_offs;
880 u8 dev_cmd_offs;
881 struct __percpu iwl_tso_hdr_page * tso_hdr_page;
882
883 struct {
884 u8 fifo;
885 u8 q_id;
886 unsigned int wdg_timeout;
887 } cmd;
888
889 struct {
890 u8 max_tbs;
891 u16 size;
892 u8 addr_size;
893 } tfd;
894
895 struct iwl_dma_ptr scd_bc_tbls;
896};
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933struct iwl_trans {
934 const struct iwl_trans_ops *ops;
935 struct iwl_op_mode *op_mode;
936 const struct iwl_cfg_trans_params *trans_cfg;
937 const struct iwl_cfg *cfg;
938 struct iwl_drv *drv;
939 enum iwl_trans_state state;
940 unsigned long status;
941
942 struct device *dev;
943 u32 max_skb_frags;
944 u32 hw_rev;
945 u32 hw_rf_id;
946 u32 hw_id;
947 char hw_id_str[52];
948 u32 sku_id[3];
949
950 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
951
952 bool pm_support;
953 bool ltr_enabled;
954 u8 pnvm_loaded:1;
955
956 const struct iwl_hcmd_arr *command_groups;
957 int command_groups_size;
958 bool wide_cmd_header;
959
960 u8 num_rx_queues;
961
962 size_t iml_len;
963 u8 *iml;
964
965
966 struct kmem_cache *dev_cmd_pool;
967 char dev_cmd_pool_name[50];
968
969 struct dentry *dbgfs_dir;
970
971#ifdef CONFIG_LOCKDEP
972 struct lockdep_map sync_cmd_lockdep_map;
973#endif
974
975 struct iwl_trans_debug dbg;
976 struct iwl_self_init_dram init_dram;
977
978 enum iwl_plat_pm_mode system_pm_mode;
979
980 const char *name;
981 struct iwl_trans_txqs txqs;
982
983
984
985 char trans_specific[] __aligned(sizeof(void *));
986};
987
988const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
989int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
990
991static inline void iwl_trans_configure(struct iwl_trans *trans,
992 const struct iwl_trans_config *trans_cfg)
993{
994 trans->op_mode = trans_cfg->op_mode;
995
996 trans->ops->configure(trans, trans_cfg);
997 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
998}
999
1000static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1001{
1002 might_sleep();
1003
1004 return trans->ops->start_hw(trans);
1005}
1006
1007static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1008{
1009 might_sleep();
1010
1011 if (trans->ops->op_mode_leave)
1012 trans->ops->op_mode_leave(trans);
1013
1014 trans->op_mode = NULL;
1015
1016 trans->state = IWL_TRANS_NO_FW;
1017}
1018
1019static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1020{
1021 might_sleep();
1022
1023 trans->state = IWL_TRANS_FW_ALIVE;
1024
1025 trans->ops->fw_alive(trans, scd_addr);
1026}
1027
1028static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1029 const struct fw_img *fw,
1030 bool run_in_rfkill)
1031{
1032 int ret;
1033
1034 might_sleep();
1035
1036 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1037
1038 clear_bit(STATUS_FW_ERROR, &trans->status);
1039 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1040 if (ret == 0)
1041 trans->state = IWL_TRANS_FW_STARTED;
1042
1043 return ret;
1044}
1045
1046static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1047{
1048 might_sleep();
1049
1050 trans->ops->stop_device(trans);
1051
1052 trans->state = IWL_TRANS_NO_FW;
1053}
1054
1055static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1056 bool reset)
1057{
1058 might_sleep();
1059 if (!trans->ops->d3_suspend)
1060 return 0;
1061
1062 return trans->ops->d3_suspend(trans, test, reset);
1063}
1064
1065static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1066 enum iwl_d3_status *status,
1067 bool test, bool reset)
1068{
1069 might_sleep();
1070 if (!trans->ops->d3_resume)
1071 return 0;
1072
1073 return trans->ops->d3_resume(trans, status, test, reset);
1074}
1075
1076static inline int iwl_trans_suspend(struct iwl_trans *trans)
1077{
1078 if (!trans->ops->suspend)
1079 return 0;
1080
1081 return trans->ops->suspend(trans);
1082}
1083
1084static inline void iwl_trans_resume(struct iwl_trans *trans)
1085{
1086 if (trans->ops->resume)
1087 trans->ops->resume(trans);
1088}
1089
1090static inline struct iwl_trans_dump_data *
1091iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
1092{
1093 if (!trans->ops->dump_data)
1094 return NULL;
1095 return trans->ops->dump_data(trans, dump_mask);
1096}
1097
1098static inline struct iwl_device_tx_cmd *
1099iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1100{
1101 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1102}
1103
1104int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1105
1106static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1107 struct iwl_device_tx_cmd *dev_cmd)
1108{
1109 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1110}
1111
1112static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1113 struct iwl_device_tx_cmd *dev_cmd, int queue)
1114{
1115 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1116 return -EIO;
1117
1118 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1119 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1120 return -EIO;
1121 }
1122
1123 return trans->ops->tx(trans, skb, dev_cmd, queue);
1124}
1125
1126static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1127 int ssn, struct sk_buff_head *skbs)
1128{
1129 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1130 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1131 return;
1132 }
1133
1134 trans->ops->reclaim(trans, queue, ssn, skbs);
1135}
1136
1137static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1138 int ptr)
1139{
1140 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1141 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1142 return;
1143 }
1144
1145 trans->ops->set_q_ptrs(trans, queue, ptr);
1146}
1147
1148static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1149 bool configure_scd)
1150{
1151 trans->ops->txq_disable(trans, queue, configure_scd);
1152}
1153
1154static inline bool
1155iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1156 const struct iwl_trans_txq_scd_cfg *cfg,
1157 unsigned int queue_wdg_timeout)
1158{
1159 might_sleep();
1160
1161 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1162 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1163 return false;
1164 }
1165
1166 return trans->ops->txq_enable(trans, queue, ssn,
1167 cfg, queue_wdg_timeout);
1168}
1169
1170static inline int
1171iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1172 struct iwl_trans_rxq_dma_data *data)
1173{
1174 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1175 return -ENOTSUPP;
1176
1177 return trans->ops->rxq_dma_data(trans, queue, data);
1178}
1179
1180static inline void
1181iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1182{
1183 if (WARN_ON_ONCE(!trans->ops->txq_free))
1184 return;
1185
1186 trans->ops->txq_free(trans, queue);
1187}
1188
1189static inline int
1190iwl_trans_txq_alloc(struct iwl_trans *trans,
1191 __le16 flags, u8 sta_id, u8 tid,
1192 int cmd_id, int size,
1193 unsigned int wdg_timeout)
1194{
1195 might_sleep();
1196
1197 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1198 return -ENOTSUPP;
1199
1200 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1201 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1202 return -EIO;
1203 }
1204
1205 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1206 cmd_id, size, wdg_timeout);
1207}
1208
1209static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1210 int queue, bool shared_mode)
1211{
1212 if (trans->ops->txq_set_shared_mode)
1213 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1214}
1215
1216static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1217 int fifo, int sta_id, int tid,
1218 int frame_limit, u16 ssn,
1219 unsigned int queue_wdg_timeout)
1220{
1221 struct iwl_trans_txq_scd_cfg cfg = {
1222 .fifo = fifo,
1223 .sta_id = sta_id,
1224 .tid = tid,
1225 .frame_limit = frame_limit,
1226 .aggregate = sta_id >= 0,
1227 };
1228
1229 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1230}
1231
1232static inline
1233void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1234 unsigned int queue_wdg_timeout)
1235{
1236 struct iwl_trans_txq_scd_cfg cfg = {
1237 .fifo = fifo,
1238 .sta_id = -1,
1239 .tid = IWL_MAX_TID_COUNT,
1240 .frame_limit = IWL_FRAME_LIMIT,
1241 .aggregate = false,
1242 };
1243
1244 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1245}
1246
1247static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1248 unsigned long txqs,
1249 bool freeze)
1250{
1251 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1252 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1253 return;
1254 }
1255
1256 if (trans->ops->freeze_txq_timer)
1257 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1258}
1259
1260static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1261 bool block)
1262{
1263 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1264 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1265 return;
1266 }
1267
1268 if (trans->ops->block_txq_ptrs)
1269 trans->ops->block_txq_ptrs(trans, block);
1270}
1271
1272static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1273 u32 txqs)
1274{
1275 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1276 return -ENOTSUPP;
1277
1278 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1279 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1280 return -EIO;
1281 }
1282
1283 return trans->ops->wait_tx_queues_empty(trans, txqs);
1284}
1285
1286static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1287{
1288 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1289 return -ENOTSUPP;
1290
1291 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1292 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1293 return -EIO;
1294 }
1295
1296 return trans->ops->wait_txq_empty(trans, queue);
1297}
1298
1299static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1300{
1301 trans->ops->write8(trans, ofs, val);
1302}
1303
1304static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1305{
1306 trans->ops->write32(trans, ofs, val);
1307}
1308
1309static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1310{
1311 return trans->ops->read32(trans, ofs);
1312}
1313
1314static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1315{
1316 return trans->ops->read_prph(trans, ofs);
1317}
1318
1319static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1320 u32 val)
1321{
1322 return trans->ops->write_prph(trans, ofs, val);
1323}
1324
1325static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1326 void *buf, int dwords)
1327{
1328 return trans->ops->read_mem(trans, addr, buf, dwords);
1329}
1330
1331#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1332 do { \
1333 if (__builtin_constant_p(bufsize)) \
1334 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1335 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1336 } while (0)
1337
1338static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1339{
1340 u32 value;
1341
1342 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1343 return 0xa5a5a5a5;
1344
1345 return value;
1346}
1347
1348static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1349 const void *buf, int dwords)
1350{
1351 return trans->ops->write_mem(trans, addr, buf, dwords);
1352}
1353
1354static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1355 u32 val)
1356{
1357 return iwl_trans_write_mem(trans, addr, &val, 1);
1358}
1359
1360static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1361{
1362 if (trans->ops->set_pmi)
1363 trans->ops->set_pmi(trans, state);
1364}
1365
1366static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1367{
1368 if (trans->ops->sw_reset)
1369 trans->ops->sw_reset(trans);
1370}
1371
1372static inline void
1373iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1374{
1375 trans->ops->set_bits_mask(trans, reg, mask, value);
1376}
1377
1378#define iwl_trans_grab_nic_access(trans, flags) \
1379 __cond_lock(nic_access, \
1380 likely((trans)->ops->grab_nic_access(trans, flags)))
1381
1382static inline void __releases(nic_access)
1383iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1384{
1385 trans->ops->release_nic_access(trans, flags);
1386 __release(nic_access);
1387}
1388
1389static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1390{
1391 if (WARN_ON_ONCE(!trans->op_mode))
1392 return;
1393
1394
1395 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1396 iwl_op_mode_nic_error(trans->op_mode);
1397 trans->state = IWL_TRANS_NO_FW;
1398 }
1399}
1400
1401static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1402{
1403 return trans->state == IWL_TRANS_FW_ALIVE;
1404}
1405
1406static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1407{
1408 if (trans->ops->sync_nmi)
1409 trans->ops->sync_nmi(trans);
1410}
1411
1412static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1413 const void *data, u32 len)
1414{
1415 if (trans->ops->set_pnvm) {
1416 int ret = trans->ops->set_pnvm(trans, data, len);
1417
1418 if (ret)
1419 return ret;
1420 }
1421
1422 trans->pnvm_loaded = true;
1423
1424 return 0;
1425}
1426
1427static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1428{
1429 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1430 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1431}
1432
1433
1434
1435
1436struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1437 struct device *dev,
1438 const struct iwl_trans_ops *ops,
1439 const struct iwl_cfg_trans_params *cfg_trans);
1440void iwl_trans_free(struct iwl_trans *trans);
1441
1442
1443
1444
1445int __must_check iwl_pci_register_driver(void);
1446void iwl_pci_unregister_driver(void);
1447
1448#endif
1449