1
2
3
4
5
6
7#ifndef __iwl_trans_h__
8#define __iwl_trans_h__
9
10#include <linux/ieee80211.h>
11#include <linux/mm.h>
12#include <linux/lockdep.h>
13#include <linux/kernel.h>
14
15#include "iwl-debug.h"
16#include "iwl-config.h"
17#include "fw/img.h"
18#include "iwl-op-mode.h"
19#include <linux/firmware.h>
20#include "fw/api/cmdhdr.h"
21#include "fw/api/txq.h"
22#include "fw/api/dbg-tlv.h"
23#include "iwl-dbg-tlv.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
60
61#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
62#define FH_RSCSR_FRAME_INVALID 0x55550000
63#define FH_RSCSR_FRAME_ALIGN 0x40
64#define FH_RSCSR_RPA_EN BIT(25)
65#define FH_RSCSR_RADA_EN BIT(26)
66#define FH_RSCSR_RXQ_POS 16
67#define FH_RSCSR_RXQ_MASK 0x3F0000
68
69struct iwl_rx_packet {
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87 __le32 len_n_flags;
88 struct iwl_cmd_header hdr;
89 u8 data[];
90} __packed;
91
92static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
93{
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
95}
96
97static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
98{
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
100}
101
102
103
104
105
106
107
108
109
110
111
112
113
114enum CMD_MODE {
115 CMD_ASYNC = BIT(0),
116 CMD_WANT_SKB = BIT(1),
117 CMD_SEND_IN_RFKILL = BIT(2),
118 CMD_WANT_ASYNC_CALLBACK = BIT(3),
119 CMD_SEND_IN_D3 = BIT(4),
120};
121
122#define DEF_CMD_PAYLOAD_SIZE 320
123
124
125
126
127
128
129
130
131struct iwl_device_cmd {
132 union {
133 struct {
134 struct iwl_cmd_header hdr;
135 u8 payload[DEF_CMD_PAYLOAD_SIZE];
136 };
137 struct {
138 struct iwl_cmd_header_wide hdr_wide;
139 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
140 sizeof(struct iwl_cmd_header_wide) +
141 sizeof(struct iwl_cmd_header)];
142 };
143 };
144} __packed;
145
146
147
148
149
150
151
152
153struct iwl_device_tx_cmd {
154 struct iwl_cmd_header hdr;
155 u8 payload[];
156} __packed;
157
158#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
159
160
161
162
163
164#define IWL_MAX_CMD_TBS_PER_TFD 2
165
166
167
168
169
170#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187enum iwl_hcmd_dataflag {
188 IWL_HCMD_DFL_NOCOPY = BIT(0),
189 IWL_HCMD_DFL_DUP = BIT(1),
190};
191
192enum iwl_error_event_table_status {
193 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
194 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
195 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
196 IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
197};
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212struct iwl_host_cmd {
213 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
214 struct iwl_rx_packet *resp_pkt;
215 unsigned long _rx_page_addr;
216 u32 _rx_page_order;
217
218 u32 flags;
219 u32 id;
220 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
221 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
222};
223
224static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
225{
226 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
227}
228
229struct iwl_rx_cmd_buffer {
230 struct page *_page;
231 int _offset;
232 bool _page_stolen;
233 u32 _rx_page_order;
234 unsigned int truesize;
235};
236
237static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
238{
239 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
240}
241
242static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
243{
244 return r->_offset;
245}
246
247static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
248{
249 r->_page_stolen = true;
250 get_page(r->_page);
251 return r->_page;
252}
253
254static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
255{
256 __free_pages(r->_page, r->_rx_page_order);
257}
258
259#define MAX_NO_RECLAIM_CMDS 6
260
261#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
262
263
264
265
266
267#define IWL_MAX_HW_QUEUES 32
268#define IWL_MAX_TVQM_QUEUES 512
269
270#define IWL_MAX_TID_COUNT 8
271#define IWL_MGMT_TID 15
272#define IWL_FRAME_LIMIT 64
273#define IWL_MAX_RX_HW_QUEUES 16
274#define IWL_9000_MAX_RX_HW_QUEUES 6
275
276
277
278
279
280
281enum iwl_d3_status {
282 IWL_D3_STATUS_ALIVE,
283 IWL_D3_STATUS_RESET,
284};
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300enum iwl_trans_status {
301 STATUS_SYNC_HCMD_ACTIVE,
302 STATUS_DEVICE_ENABLED,
303 STATUS_TPOWER_PMI,
304 STATUS_INT_ENABLED,
305 STATUS_RFKILL_HW,
306 STATUS_RFKILL_OPMODE,
307 STATUS_FW_ERROR,
308 STATUS_TRANS_GOING_IDLE,
309 STATUS_TRANS_IDLE,
310 STATUS_TRANS_DEAD,
311};
312
313static inline int
314iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
315{
316 switch (rb_size) {
317 case IWL_AMSDU_2K:
318 return get_order(2 * 1024);
319 case IWL_AMSDU_4K:
320 return get_order(4 * 1024);
321 case IWL_AMSDU_8K:
322 return get_order(8 * 1024);
323 case IWL_AMSDU_12K:
324 return get_order(16 * 1024);
325 default:
326 WARN_ON(1);
327 return -1;
328 }
329}
330
331static inline int
332iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
333{
334 switch (rb_size) {
335 case IWL_AMSDU_2K:
336 return 2 * 1024;
337 case IWL_AMSDU_4K:
338 return 4 * 1024;
339 case IWL_AMSDU_8K:
340 return 8 * 1024;
341 case IWL_AMSDU_12K:
342 return 16 * 1024;
343 default:
344 WARN_ON(1);
345 return 0;
346 }
347}
348
349struct iwl_hcmd_names {
350 u8 cmd_id;
351 const char *const cmd_name;
352};
353
354#define HCMD_NAME(x) \
355 { .cmd_id = x, .cmd_name = #x }
356
357struct iwl_hcmd_arr {
358 const struct iwl_hcmd_names *arr;
359 int size;
360};
361
362#define HCMD_ARR(x) \
363 { .arr = x, .size = ARRAY_SIZE(x) }
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390struct iwl_trans_config {
391 struct iwl_op_mode *op_mode;
392
393 u8 cmd_queue;
394 u8 cmd_fifo;
395 unsigned int cmd_q_wdg_timeout;
396 const u8 *no_reclaim_cmds;
397 unsigned int n_no_reclaim_cmds;
398
399 enum iwl_amsdu_size rx_buf_size;
400 bool bc_table_dword;
401 bool scd_set_active;
402 const struct iwl_hcmd_arr *command_groups;
403 int command_groups_size;
404
405 u8 cb_data_offs;
406 bool fw_reset_handshake;
407};
408
409struct iwl_trans_dump_data {
410 u32 len;
411 u8 data[];
412};
413
414struct iwl_trans;
415
416struct iwl_trans_txq_scd_cfg {
417 u8 fifo;
418 u8 sta_id;
419 u8 tid;
420 bool aggregate;
421 int frame_limit;
422};
423
424
425
426
427
428
429
430
431struct iwl_trans_rxq_dma_data {
432 u64 fr_bd_cb;
433 u32 fr_bd_wid;
434 u64 urbd_stts_wrptr;
435 u64 ur_bd_cb;
436};
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524struct iwl_trans_ops {
525
526 int (*start_hw)(struct iwl_trans *iwl_trans);
527 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
528 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
529 bool run_in_rfkill);
530 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
531 void (*stop_device)(struct iwl_trans *trans);
532
533 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
534 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
535 bool test, bool reset);
536
537 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
538
539 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
540 struct iwl_device_tx_cmd *dev_cmd, int queue);
541 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
542 struct sk_buff_head *skbs);
543
544 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
545
546 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
547 const struct iwl_trans_txq_scd_cfg *cfg,
548 unsigned int queue_wdg_timeout);
549 void (*txq_disable)(struct iwl_trans *trans, int queue,
550 bool configure_scd);
551
552 int (*txq_alloc)(struct iwl_trans *trans,
553 __le16 flags, u8 sta_id, u8 tid,
554 int cmd_id, int size,
555 unsigned int queue_wdg_timeout);
556 void (*txq_free)(struct iwl_trans *trans, int queue);
557 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
558 struct iwl_trans_rxq_dma_data *data);
559
560 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
561 bool shared);
562
563 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
564 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
565 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
566 bool freeze);
567 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
568
569 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
570 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
571 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
572 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
573 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
574 int (*read_mem)(struct iwl_trans *trans, u32 addr,
575 void *buf, int dwords);
576 int (*write_mem)(struct iwl_trans *trans, u32 addr,
577 const void *buf, int dwords);
578 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
579 void (*configure)(struct iwl_trans *trans,
580 const struct iwl_trans_config *trans_cfg);
581 void (*set_pmi)(struct iwl_trans *trans, bool state);
582 void (*sw_reset)(struct iwl_trans *trans);
583 bool (*grab_nic_access)(struct iwl_trans *trans);
584 void (*release_nic_access)(struct iwl_trans *trans);
585 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
586 u32 value);
587
588 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
589 u32 dump_mask);
590 void (*debugfs_cleanup)(struct iwl_trans *trans);
591 void (*sync_nmi)(struct iwl_trans *trans);
592 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
593 int (*set_reduce_power)(struct iwl_trans *trans,
594 const void *data, u32 len);
595 void (*interrupts)(struct iwl_trans *trans, bool enable);
596};
597
598
599
600
601
602
603
604
605enum iwl_trans_state {
606 IWL_TRANS_NO_FW,
607 IWL_TRANS_FW_STARTED,
608 IWL_TRANS_FW_ALIVE,
609};
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645enum iwl_plat_pm_mode {
646 IWL_PLAT_PM_MODE_DISABLED,
647 IWL_PLAT_PM_MODE_D3,
648};
649
650
651
652
653
654
655
656
657enum iwl_ini_cfg_state {
658 IWL_INI_CFG_STATE_NOT_LOADED,
659 IWL_INI_CFG_STATE_LOADED,
660 IWL_INI_CFG_STATE_CORRUPTED,
661};
662
663
664#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
665
666
667
668
669
670
671
672struct iwl_dram_data {
673 dma_addr_t physical;
674 void *block;
675 int size;
676};
677
678
679
680
681
682
683struct iwl_fw_mon {
684 u32 num_frags;
685 struct iwl_dram_data *frags;
686};
687
688
689
690
691
692
693
694
695struct iwl_self_init_dram {
696 struct iwl_dram_data *fw;
697 int fw_cnt;
698 struct iwl_dram_data *paging;
699 int paging_cnt;
700};
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729struct iwl_trans_debug {
730 u8 n_dest_reg;
731 bool rec_on;
732
733 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
734 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
735 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
736
737 u32 lmac_error_event_table[2];
738 u32 umac_error_event_table;
739 u32 tcm_error_event_table;
740 unsigned int error_event_table_tlv_status;
741
742 enum iwl_ini_cfg_state internal_ini_cfg;
743 enum iwl_ini_cfg_state external_ini_cfg;
744
745 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
746 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
747
748 struct iwl_dram_data fw_mon;
749
750 bool hw_error;
751 enum iwl_fw_ini_buffer_location ini_dest;
752
753 u64 unsupported_region_msk;
754 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
755 struct list_head debug_info_tlv_list;
756 struct iwl_dbg_tlv_time_point_data
757 time_point[IWL_FW_INI_TIME_POINT_NUM];
758 struct list_head periodic_trig_list;
759
760 u32 domains_bitmap;
761};
762
763struct iwl_dma_ptr {
764 dma_addr_t dma;
765 void *addr;
766 size_t size;
767};
768
769struct iwl_cmd_meta {
770
771 struct iwl_host_cmd *source;
772 u32 flags;
773 u32 tbs;
774};
775
776
777
778
779
780
781
782
783
784
785#define IWL_FIRST_TB_SIZE 20
786#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
787
788struct iwl_pcie_txq_entry {
789 void *cmd;
790 struct sk_buff *skb;
791
792 const void *free_buf;
793 struct iwl_cmd_meta meta;
794};
795
796struct iwl_pcie_first_tb_buf {
797 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
798};
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842struct iwl_txq {
843 void *tfds;
844 struct iwl_pcie_first_tb_buf *first_tb_bufs;
845 dma_addr_t first_tb_dma;
846 struct iwl_pcie_txq_entry *entries;
847
848 spinlock_t lock;
849 unsigned long frozen_expiry_remainder;
850 struct timer_list stuck_timer;
851 struct iwl_trans *trans;
852 bool need_update;
853 bool frozen;
854 bool ampdu;
855 int block;
856 unsigned long wd_timeout;
857 struct sk_buff_head overflow_q;
858 struct iwl_dma_ptr bc_tbl;
859
860 int write_ptr;
861 int read_ptr;
862 dma_addr_t dma_addr;
863 int n_window;
864 u32 id;
865 int low_mark;
866 int high_mark;
867
868 bool overflow_tx;
869};
870
871
872
873
874
875
876
877
878
879
880
881struct iwl_trans_txqs {
882 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
883 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
884 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
885 struct dma_pool *bc_pool;
886 size_t bc_tbl_size;
887 bool bc_table_dword;
888 u8 page_offs;
889 u8 dev_cmd_offs;
890 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
891
892 struct {
893 u8 fifo;
894 u8 q_id;
895 unsigned int wdg_timeout;
896 } cmd;
897
898 struct {
899 u8 max_tbs;
900 u16 size;
901 u8 addr_size;
902 } tfd;
903
904 struct iwl_dma_ptr scd_bc_tbls;
905};
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943struct iwl_trans {
944 const struct iwl_trans_ops *ops;
945 struct iwl_op_mode *op_mode;
946 const struct iwl_cfg_trans_params *trans_cfg;
947 const struct iwl_cfg *cfg;
948 struct iwl_drv *drv;
949 enum iwl_trans_state state;
950 unsigned long status;
951
952 struct device *dev;
953 u32 max_skb_frags;
954 u32 hw_rev;
955 u32 hw_rf_id;
956 u32 hw_id;
957 char hw_id_str[52];
958 u32 sku_id[3];
959
960 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
961
962 bool pm_support;
963 bool ltr_enabled;
964 u8 pnvm_loaded:1;
965 u8 reduce_power_loaded:1;
966
967 const struct iwl_hcmd_arr *command_groups;
968 int command_groups_size;
969 bool wide_cmd_header;
970
971 wait_queue_head_t wait_command_queue;
972 u8 num_rx_queues;
973
974 size_t iml_len;
975 u8 *iml;
976
977
978 struct kmem_cache *dev_cmd_pool;
979 char dev_cmd_pool_name[50];
980
981 struct dentry *dbgfs_dir;
982
983#ifdef CONFIG_LOCKDEP
984 struct lockdep_map sync_cmd_lockdep_map;
985#endif
986
987 struct iwl_trans_debug dbg;
988 struct iwl_self_init_dram init_dram;
989
990 enum iwl_plat_pm_mode system_pm_mode;
991
992 const char *name;
993 struct iwl_trans_txqs txqs;
994
995
996
997 char trans_specific[] __aligned(sizeof(void *));
998};
999
1000const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1001int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1002
1003static inline void iwl_trans_configure(struct iwl_trans *trans,
1004 const struct iwl_trans_config *trans_cfg)
1005{
1006 trans->op_mode = trans_cfg->op_mode;
1007
1008 trans->ops->configure(trans, trans_cfg);
1009 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1010}
1011
1012static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1013{
1014 might_sleep();
1015
1016 return trans->ops->start_hw(trans);
1017}
1018
1019static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1020{
1021 might_sleep();
1022
1023 if (trans->ops->op_mode_leave)
1024 trans->ops->op_mode_leave(trans);
1025
1026 trans->op_mode = NULL;
1027
1028 trans->state = IWL_TRANS_NO_FW;
1029}
1030
1031static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1032{
1033 might_sleep();
1034
1035 trans->state = IWL_TRANS_FW_ALIVE;
1036
1037 trans->ops->fw_alive(trans, scd_addr);
1038}
1039
1040static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1041 const struct fw_img *fw,
1042 bool run_in_rfkill)
1043{
1044 int ret;
1045
1046 might_sleep();
1047
1048 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1049
1050 clear_bit(STATUS_FW_ERROR, &trans->status);
1051 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1052 if (ret == 0)
1053 trans->state = IWL_TRANS_FW_STARTED;
1054
1055 return ret;
1056}
1057
1058static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1059{
1060 might_sleep();
1061
1062 trans->ops->stop_device(trans);
1063
1064 trans->state = IWL_TRANS_NO_FW;
1065}
1066
1067static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1068 bool reset)
1069{
1070 might_sleep();
1071 if (!trans->ops->d3_suspend)
1072 return 0;
1073
1074 return trans->ops->d3_suspend(trans, test, reset);
1075}
1076
1077static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1078 enum iwl_d3_status *status,
1079 bool test, bool reset)
1080{
1081 might_sleep();
1082 if (!trans->ops->d3_resume)
1083 return 0;
1084
1085 return trans->ops->d3_resume(trans, status, test, reset);
1086}
1087
1088static inline struct iwl_trans_dump_data *
1089iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
1090{
1091 if (!trans->ops->dump_data)
1092 return NULL;
1093 return trans->ops->dump_data(trans, dump_mask);
1094}
1095
1096static inline struct iwl_device_tx_cmd *
1097iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1098{
1099 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1100}
1101
1102int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1103
1104static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1105 struct iwl_device_tx_cmd *dev_cmd)
1106{
1107 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1108}
1109
1110static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1111 struct iwl_device_tx_cmd *dev_cmd, int queue)
1112{
1113 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1114 return -EIO;
1115
1116 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1117 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1118 return -EIO;
1119 }
1120
1121 return trans->ops->tx(trans, skb, dev_cmd, queue);
1122}
1123
1124static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1125 int ssn, struct sk_buff_head *skbs)
1126{
1127 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1128 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1129 return;
1130 }
1131
1132 trans->ops->reclaim(trans, queue, ssn, skbs);
1133}
1134
1135static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1136 int ptr)
1137{
1138 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1139 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1140 return;
1141 }
1142
1143 trans->ops->set_q_ptrs(trans, queue, ptr);
1144}
1145
1146static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1147 bool configure_scd)
1148{
1149 trans->ops->txq_disable(trans, queue, configure_scd);
1150}
1151
1152static inline bool
1153iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1154 const struct iwl_trans_txq_scd_cfg *cfg,
1155 unsigned int queue_wdg_timeout)
1156{
1157 might_sleep();
1158
1159 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1160 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1161 return false;
1162 }
1163
1164 return trans->ops->txq_enable(trans, queue, ssn,
1165 cfg, queue_wdg_timeout);
1166}
1167
1168static inline int
1169iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1170 struct iwl_trans_rxq_dma_data *data)
1171{
1172 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1173 return -ENOTSUPP;
1174
1175 return trans->ops->rxq_dma_data(trans, queue, data);
1176}
1177
1178static inline void
1179iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1180{
1181 if (WARN_ON_ONCE(!trans->ops->txq_free))
1182 return;
1183
1184 trans->ops->txq_free(trans, queue);
1185}
1186
1187static inline int
1188iwl_trans_txq_alloc(struct iwl_trans *trans,
1189 __le16 flags, u8 sta_id, u8 tid,
1190 int cmd_id, int size,
1191 unsigned int wdg_timeout)
1192{
1193 might_sleep();
1194
1195 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1196 return -ENOTSUPP;
1197
1198 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1199 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1200 return -EIO;
1201 }
1202
1203 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1204 cmd_id, size, wdg_timeout);
1205}
1206
1207static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1208 int queue, bool shared_mode)
1209{
1210 if (trans->ops->txq_set_shared_mode)
1211 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1212}
1213
1214static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1215 int fifo, int sta_id, int tid,
1216 int frame_limit, u16 ssn,
1217 unsigned int queue_wdg_timeout)
1218{
1219 struct iwl_trans_txq_scd_cfg cfg = {
1220 .fifo = fifo,
1221 .sta_id = sta_id,
1222 .tid = tid,
1223 .frame_limit = frame_limit,
1224 .aggregate = sta_id >= 0,
1225 };
1226
1227 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1228}
1229
1230static inline
1231void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1232 unsigned int queue_wdg_timeout)
1233{
1234 struct iwl_trans_txq_scd_cfg cfg = {
1235 .fifo = fifo,
1236 .sta_id = -1,
1237 .tid = IWL_MAX_TID_COUNT,
1238 .frame_limit = IWL_FRAME_LIMIT,
1239 .aggregate = false,
1240 };
1241
1242 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1243}
1244
1245static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1246 unsigned long txqs,
1247 bool freeze)
1248{
1249 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1250 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1251 return;
1252 }
1253
1254 if (trans->ops->freeze_txq_timer)
1255 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1256}
1257
1258static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1259 bool block)
1260{
1261 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1262 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1263 return;
1264 }
1265
1266 if (trans->ops->block_txq_ptrs)
1267 trans->ops->block_txq_ptrs(trans, block);
1268}
1269
1270static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1271 u32 txqs)
1272{
1273 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1274 return -ENOTSUPP;
1275
1276
1277 if (trans->state != IWL_TRANS_FW_ALIVE) {
1278 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1279 return -EIO;
1280 }
1281
1282 return trans->ops->wait_tx_queues_empty(trans, txqs);
1283}
1284
1285static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1286{
1287 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1288 return -ENOTSUPP;
1289
1290 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1291 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1292 return -EIO;
1293 }
1294
1295 return trans->ops->wait_txq_empty(trans, queue);
1296}
1297
1298static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1299{
1300 trans->ops->write8(trans, ofs, val);
1301}
1302
1303static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1304{
1305 trans->ops->write32(trans, ofs, val);
1306}
1307
1308static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1309{
1310 return trans->ops->read32(trans, ofs);
1311}
1312
1313static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1314{
1315 return trans->ops->read_prph(trans, ofs);
1316}
1317
1318static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1319 u32 val)
1320{
1321 return trans->ops->write_prph(trans, ofs, val);
1322}
1323
1324static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1325 void *buf, int dwords)
1326{
1327 return trans->ops->read_mem(trans, addr, buf, dwords);
1328}
1329
1330#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1331 do { \
1332 if (__builtin_constant_p(bufsize)) \
1333 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1334 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1335 } while (0)
1336
1337static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1338{
1339 u32 value;
1340
1341 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1342 return 0xa5a5a5a5;
1343
1344 return value;
1345}
1346
1347static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1348 const void *buf, int dwords)
1349{
1350 return trans->ops->write_mem(trans, addr, buf, dwords);
1351}
1352
1353static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1354 u32 val)
1355{
1356 return iwl_trans_write_mem(trans, addr, &val, 1);
1357}
1358
1359static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1360{
1361 if (trans->ops->set_pmi)
1362 trans->ops->set_pmi(trans, state);
1363}
1364
1365static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1366{
1367 if (trans->ops->sw_reset)
1368 trans->ops->sw_reset(trans);
1369}
1370
1371static inline void
1372iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1373{
1374 trans->ops->set_bits_mask(trans, reg, mask, value);
1375}
1376
1377#define iwl_trans_grab_nic_access(trans) \
1378 __cond_lock(nic_access, \
1379 likely((trans)->ops->grab_nic_access(trans)))
1380
1381static inline void __releases(nic_access)
1382iwl_trans_release_nic_access(struct iwl_trans *trans)
1383{
1384 trans->ops->release_nic_access(trans);
1385 __release(nic_access);
1386}
1387
1388static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1389{
1390 if (WARN_ON_ONCE(!trans->op_mode))
1391 return;
1392
1393
1394 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1395 iwl_op_mode_nic_error(trans->op_mode, sync);
1396 trans->state = IWL_TRANS_NO_FW;
1397 }
1398}
1399
1400static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1401{
1402 return trans->state == IWL_TRANS_FW_ALIVE;
1403}
1404
1405static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1406{
1407 if (trans->ops->sync_nmi)
1408 trans->ops->sync_nmi(trans);
1409}
1410
1411void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1412 u32 sw_err_bit);
1413
1414static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1415 const void *data, u32 len)
1416{
1417 if (trans->ops->set_pnvm) {
1418 int ret = trans->ops->set_pnvm(trans, data, len);
1419
1420 if (ret)
1421 return ret;
1422 }
1423
1424 trans->pnvm_loaded = true;
1425
1426 return 0;
1427}
1428
1429static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
1430 const void *data, u32 len)
1431{
1432 if (trans->ops->set_reduce_power) {
1433 int ret = trans->ops->set_reduce_power(trans, data, len);
1434
1435 if (ret)
1436 return ret;
1437 }
1438
1439 trans->reduce_power_loaded = true;
1440 return 0;
1441}
1442
1443static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1444{
1445 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1446 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1447}
1448
1449static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1450{
1451 if (trans->ops->interrupts)
1452 trans->ops->interrupts(trans, enable);
1453}
1454
1455
1456
1457
1458struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1459 struct device *dev,
1460 const struct iwl_trans_ops *ops,
1461 const struct iwl_cfg_trans_params *cfg_trans);
1462int iwl_trans_init(struct iwl_trans *trans);
1463void iwl_trans_free(struct iwl_trans *trans);
1464
1465
1466
1467
1468int __must_check iwl_pci_register_driver(void);
1469void iwl_pci_unregister_driver(void);
1470
1471#endif
1472