1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62#ifndef __iwl_trans_h__
63#define __iwl_trans_h__
64
65#include <linux/ieee80211.h>
66#include <linux/mm.h>
67#include <linux/lockdep.h>
68#include <linux/kernel.h>
69
70#include "iwl-debug.h"
71#include "iwl-config.h"
72#include "fw/img.h"
73#include "iwl-op-mode.h"
74#include "fw/api/cmdhdr.h"
75#include "fw/api/txq.h"
76#include "fw/api/dbg-tlv.h"
77#include "iwl-dbg-tlv.h"
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
114#define FH_RSCSR_FRAME_INVALID 0x55550000
115#define FH_RSCSR_FRAME_ALIGN 0x40
116#define FH_RSCSR_RPA_EN BIT(25)
117#define FH_RSCSR_RADA_EN BIT(26)
118#define FH_RSCSR_RXQ_POS 16
119#define FH_RSCSR_RXQ_MASK 0x3F0000
120
121struct iwl_rx_packet {
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139 __le32 len_n_flags;
140 struct iwl_cmd_header hdr;
141 u8 data[];
142} __packed;
143
144static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
145{
146 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
147}
148
149static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
150{
151 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170enum CMD_MODE {
171 CMD_ASYNC = BIT(0),
172 CMD_WANT_SKB = BIT(1),
173 CMD_SEND_IN_RFKILL = BIT(2),
174 CMD_HIGH_PRIO = BIT(3),
175 CMD_SEND_IN_IDLE = BIT(4),
176 CMD_MAKE_TRANS_IDLE = BIT(5),
177 CMD_WAKE_UP_TRANS = BIT(6),
178 CMD_WANT_ASYNC_CALLBACK = BIT(7),
179};
180
181#define DEF_CMD_PAYLOAD_SIZE 320
182
183
184
185
186
187
188
189
190struct iwl_device_cmd {
191 union {
192 struct {
193 struct iwl_cmd_header hdr;
194 u8 payload[DEF_CMD_PAYLOAD_SIZE];
195 };
196 struct {
197 struct iwl_cmd_header_wide hdr_wide;
198 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
199 sizeof(struct iwl_cmd_header_wide) +
200 sizeof(struct iwl_cmd_header)];
201 };
202 };
203} __packed;
204
205#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
206
207
208
209
210
211#define IWL_MAX_CMD_TBS_PER_TFD 2
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228enum iwl_hcmd_dataflag {
229 IWL_HCMD_DFL_NOCOPY = BIT(0),
230 IWL_HCMD_DFL_DUP = BIT(1),
231};
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246struct iwl_host_cmd {
247 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
248 struct iwl_rx_packet *resp_pkt;
249 unsigned long _rx_page_addr;
250 u32 _rx_page_order;
251
252 u32 flags;
253 u32 id;
254 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
255 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
256};
257
258static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
259{
260 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
261}
262
263struct iwl_rx_cmd_buffer {
264 struct page *_page;
265 int _offset;
266 bool _page_stolen;
267 u32 _rx_page_order;
268 unsigned int truesize;
269 u8 status;
270};
271
272static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
273{
274 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
275}
276
277static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
278{
279 return r->_offset;
280}
281
282static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
283{
284 r->_page_stolen = true;
285 get_page(r->_page);
286 return r->_page;
287}
288
289static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
290{
291 __free_pages(r->_page, r->_rx_page_order);
292}
293
294#define MAX_NO_RECLAIM_CMDS 6
295
296#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
297
298
299
300
301
302#define IWL_MAX_HW_QUEUES 32
303#define IWL_MAX_TVQM_QUEUES 512
304
305#define IWL_MAX_TID_COUNT 8
306#define IWL_MGMT_TID 15
307#define IWL_FRAME_LIMIT 64
308#define IWL_MAX_RX_HW_QUEUES 16
309
310
311
312
313
314
315enum iwl_d3_status {
316 IWL_D3_STATUS_ALIVE,
317 IWL_D3_STATUS_RESET,
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334enum iwl_trans_status {
335 STATUS_SYNC_HCMD_ACTIVE,
336 STATUS_DEVICE_ENABLED,
337 STATUS_TPOWER_PMI,
338 STATUS_INT_ENABLED,
339 STATUS_RFKILL_HW,
340 STATUS_RFKILL_OPMODE,
341 STATUS_FW_ERROR,
342 STATUS_TRANS_GOING_IDLE,
343 STATUS_TRANS_IDLE,
344 STATUS_TRANS_DEAD,
345};
346
347static inline int
348iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
349{
350 switch (rb_size) {
351 case IWL_AMSDU_2K:
352 return get_order(2 * 1024);
353 case IWL_AMSDU_4K:
354 return get_order(4 * 1024);
355 case IWL_AMSDU_8K:
356 return get_order(8 * 1024);
357 case IWL_AMSDU_12K:
358 return get_order(12 * 1024);
359 default:
360 WARN_ON(1);
361 return -1;
362 }
363}
364
365struct iwl_hcmd_names {
366 u8 cmd_id;
367 const char *const cmd_name;
368};
369
370#define HCMD_NAME(x) \
371 { .cmd_id = x, .cmd_name = #x }
372
373struct iwl_hcmd_arr {
374 const struct iwl_hcmd_names *arr;
375 int size;
376};
377
378#define HCMD_ARR(x) \
379 { .arr = x, .size = ARRAY_SIZE(x) }
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406struct iwl_trans_config {
407 struct iwl_op_mode *op_mode;
408
409 u8 cmd_queue;
410 u8 cmd_fifo;
411 unsigned int cmd_q_wdg_timeout;
412 const u8 *no_reclaim_cmds;
413 unsigned int n_no_reclaim_cmds;
414
415 enum iwl_amsdu_size rx_buf_size;
416 bool bc_table_dword;
417 bool scd_set_active;
418 bool sw_csum_tx;
419 const struct iwl_hcmd_arr *command_groups;
420 int command_groups_size;
421
422 u8 cb_data_offs;
423};
424
425struct iwl_trans_dump_data {
426 u32 len;
427 u8 data[];
428};
429
430struct iwl_trans;
431
432struct iwl_trans_txq_scd_cfg {
433 u8 fifo;
434 u8 sta_id;
435 u8 tid;
436 bool aggregate;
437 int frame_limit;
438};
439
440
441
442
443
444
445
446
447struct iwl_trans_rxq_dma_data {
448 u64 fr_bd_cb;
449 u32 fr_bd_wid;
450 u64 urbd_stts_wrptr;
451 u64 ur_bd_cb;
452};
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542struct iwl_trans_ops {
543
544 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
545 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
546 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
547 bool run_in_rfkill);
548 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
549 void (*stop_device)(struct iwl_trans *trans, bool low_power);
550
551 void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
552 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
553 bool test, bool reset);
554
555 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
556
557 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
558 struct iwl_device_cmd *dev_cmd, int queue);
559 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
560 struct sk_buff_head *skbs);
561
562 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
563 const struct iwl_trans_txq_scd_cfg *cfg,
564 unsigned int queue_wdg_timeout);
565 void (*txq_disable)(struct iwl_trans *trans, int queue,
566 bool configure_scd);
567
568 int (*txq_alloc)(struct iwl_trans *trans,
569 __le16 flags, u8 sta_id, u8 tid,
570 int cmd_id, int size,
571 unsigned int queue_wdg_timeout);
572 void (*txq_free)(struct iwl_trans *trans, int queue);
573 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
574 struct iwl_trans_rxq_dma_data *data);
575
576 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
577 bool shared);
578
579 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
580 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
581 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
582 bool freeze);
583 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
584
585 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
586 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
587 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
588 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
589 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
590 int (*read_mem)(struct iwl_trans *trans, u32 addr,
591 void *buf, int dwords);
592 int (*write_mem)(struct iwl_trans *trans, u32 addr,
593 const void *buf, int dwords);
594 void (*configure)(struct iwl_trans *trans,
595 const struct iwl_trans_config *trans_cfg);
596 void (*set_pmi)(struct iwl_trans *trans, bool state);
597 void (*sw_reset)(struct iwl_trans *trans);
598 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
599 void (*release_nic_access)(struct iwl_trans *trans,
600 unsigned long *flags);
601 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
602 u32 value);
603 void (*ref)(struct iwl_trans *trans);
604 void (*unref)(struct iwl_trans *trans);
605 int (*suspend)(struct iwl_trans *trans);
606 void (*resume)(struct iwl_trans *trans);
607
608 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
609 u32 dump_mask);
610 void (*debugfs_cleanup)(struct iwl_trans *trans);
611};
612
613
614
615
616
617
618
619enum iwl_trans_state {
620 IWL_TRANS_NO_FW = 0,
621 IWL_TRANS_FW_ALIVE = 1,
622};
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676enum iwl_plat_pm_mode {
677 IWL_PLAT_PM_MODE_DISABLED,
678 IWL_PLAT_PM_MODE_D3,
679 IWL_PLAT_PM_MODE_D0I3,
680};
681
682
683
684
685#define IWL_TRANS_IDLE_TIMEOUT 2000
686
687
688
689
690
691
692
693struct iwl_dram_data {
694 dma_addr_t physical;
695 void *block;
696 int size;
697};
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742struct iwl_trans {
743 const struct iwl_trans_ops *ops;
744 struct iwl_op_mode *op_mode;
745 const struct iwl_cfg *cfg;
746 struct iwl_drv *drv;
747 enum iwl_trans_state state;
748 unsigned long status;
749
750 struct device *dev;
751 u32 max_skb_frags;
752 u32 hw_rev;
753 u32 hw_rf_id;
754 u32 hw_id;
755 char hw_id_str[52];
756
757 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
758
759 bool pm_support;
760 bool ltr_enabled;
761
762 const struct iwl_hcmd_arr *command_groups;
763 int command_groups_size;
764 bool wide_cmd_header;
765
766 u8 num_rx_queues;
767
768 size_t iml_len;
769 u8 *iml;
770
771
772 struct kmem_cache *dev_cmd_pool;
773 char dev_cmd_pool_name[50];
774
775 struct dentry *dbgfs_dir;
776
777#ifdef CONFIG_LOCKDEP
778 struct lockdep_map sync_cmd_lockdep_map;
779#endif
780
781 struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
782 struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
783
784 bool external_ini_loaded;
785 bool ini_valid;
786
787 const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
788 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
789 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
790 u8 dbg_n_dest_reg;
791 int num_blocks;
792 struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
793
794 enum iwl_plat_pm_mode system_pm_mode;
795 enum iwl_plat_pm_mode runtime_pm_mode;
796 bool suspending;
797 bool dbg_rec_on;
798
799
800
801 char trans_specific[0] __aligned(sizeof(void *));
802};
803
804const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
805int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
806
807static inline void iwl_trans_configure(struct iwl_trans *trans,
808 const struct iwl_trans_config *trans_cfg)
809{
810 trans->op_mode = trans_cfg->op_mode;
811
812 trans->ops->configure(trans, trans_cfg);
813 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
814}
815
816static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
817{
818 might_sleep();
819
820 return trans->ops->start_hw(trans, low_power);
821}
822
823static inline int iwl_trans_start_hw(struct iwl_trans *trans)
824{
825 return trans->ops->start_hw(trans, true);
826}
827
828static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
829{
830 might_sleep();
831
832 if (trans->ops->op_mode_leave)
833 trans->ops->op_mode_leave(trans);
834
835 trans->op_mode = NULL;
836
837 trans->state = IWL_TRANS_NO_FW;
838}
839
840static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
841{
842 might_sleep();
843
844 trans->state = IWL_TRANS_FW_ALIVE;
845
846 trans->ops->fw_alive(trans, scd_addr);
847}
848
849static inline int iwl_trans_start_fw(struct iwl_trans *trans,
850 const struct fw_img *fw,
851 bool run_in_rfkill)
852{
853 might_sleep();
854
855 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
856
857 clear_bit(STATUS_FW_ERROR, &trans->status);
858 return trans->ops->start_fw(trans, fw, run_in_rfkill);
859}
860
861static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
862 bool low_power)
863{
864 might_sleep();
865
866 trans->ops->stop_device(trans, low_power);
867
868 trans->state = IWL_TRANS_NO_FW;
869}
870
871static inline void iwl_trans_stop_device(struct iwl_trans *trans)
872{
873 _iwl_trans_stop_device(trans, true);
874}
875
876static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
877 bool reset)
878{
879 might_sleep();
880 if (trans->ops->d3_suspend)
881 trans->ops->d3_suspend(trans, test, reset);
882}
883
884static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
885 enum iwl_d3_status *status,
886 bool test, bool reset)
887{
888 might_sleep();
889 if (!trans->ops->d3_resume)
890 return 0;
891
892 return trans->ops->d3_resume(trans, status, test, reset);
893}
894
895static inline int iwl_trans_suspend(struct iwl_trans *trans)
896{
897 if (!trans->ops->suspend)
898 return 0;
899
900 return trans->ops->suspend(trans);
901}
902
903static inline void iwl_trans_resume(struct iwl_trans *trans)
904{
905 if (trans->ops->resume)
906 trans->ops->resume(trans);
907}
908
909static inline struct iwl_trans_dump_data *
910iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
911{
912 if (!trans->ops->dump_data)
913 return NULL;
914 return trans->ops->dump_data(trans, dump_mask);
915}
916
917static inline struct iwl_device_cmd *
918iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
919{
920 return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
921}
922
923int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
924
925static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
926 struct iwl_device_cmd *dev_cmd)
927{
928 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
929}
930
931static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
932 struct iwl_device_cmd *dev_cmd, int queue)
933{
934 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
935 return -EIO;
936
937 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
938 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
939 return -EIO;
940 }
941
942 return trans->ops->tx(trans, skb, dev_cmd, queue);
943}
944
945static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
946 int ssn, struct sk_buff_head *skbs)
947{
948 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
949 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
950 return;
951 }
952
953 trans->ops->reclaim(trans, queue, ssn, skbs);
954}
955
956static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
957 bool configure_scd)
958{
959 trans->ops->txq_disable(trans, queue, configure_scd);
960}
961
962static inline bool
963iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
964 const struct iwl_trans_txq_scd_cfg *cfg,
965 unsigned int queue_wdg_timeout)
966{
967 might_sleep();
968
969 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
970 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
971 return false;
972 }
973
974 return trans->ops->txq_enable(trans, queue, ssn,
975 cfg, queue_wdg_timeout);
976}
977
978static inline int
979iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
980 struct iwl_trans_rxq_dma_data *data)
981{
982 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
983 return -ENOTSUPP;
984
985 return trans->ops->rxq_dma_data(trans, queue, data);
986}
987
988static inline void
989iwl_trans_txq_free(struct iwl_trans *trans, int queue)
990{
991 if (WARN_ON_ONCE(!trans->ops->txq_free))
992 return;
993
994 trans->ops->txq_free(trans, queue);
995}
996
997static inline int
998iwl_trans_txq_alloc(struct iwl_trans *trans,
999 __le16 flags, u8 sta_id, u8 tid,
1000 int cmd_id, int size,
1001 unsigned int wdg_timeout)
1002{
1003 might_sleep();
1004
1005 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1006 return -ENOTSUPP;
1007
1008 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1009 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1010 return -EIO;
1011 }
1012
1013 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1014 cmd_id, size, wdg_timeout);
1015}
1016
1017static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1018 int queue, bool shared_mode)
1019{
1020 if (trans->ops->txq_set_shared_mode)
1021 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1022}
1023
1024static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1025 int fifo, int sta_id, int tid,
1026 int frame_limit, u16 ssn,
1027 unsigned int queue_wdg_timeout)
1028{
1029 struct iwl_trans_txq_scd_cfg cfg = {
1030 .fifo = fifo,
1031 .sta_id = sta_id,
1032 .tid = tid,
1033 .frame_limit = frame_limit,
1034 .aggregate = sta_id >= 0,
1035 };
1036
1037 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1038}
1039
1040static inline
1041void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1042 unsigned int queue_wdg_timeout)
1043{
1044 struct iwl_trans_txq_scd_cfg cfg = {
1045 .fifo = fifo,
1046 .sta_id = -1,
1047 .tid = IWL_MAX_TID_COUNT,
1048 .frame_limit = IWL_FRAME_LIMIT,
1049 .aggregate = false,
1050 };
1051
1052 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1053}
1054
1055static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1056 unsigned long txqs,
1057 bool freeze)
1058{
1059 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1060 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1061 return;
1062 }
1063
1064 if (trans->ops->freeze_txq_timer)
1065 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1066}
1067
1068static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1069 bool block)
1070{
1071 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1072 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1073 return;
1074 }
1075
1076 if (trans->ops->block_txq_ptrs)
1077 trans->ops->block_txq_ptrs(trans, block);
1078}
1079
1080static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1081 u32 txqs)
1082{
1083 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1084 return -ENOTSUPP;
1085
1086 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1087 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1088 return -EIO;
1089 }
1090
1091 return trans->ops->wait_tx_queues_empty(trans, txqs);
1092}
1093
1094static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1095{
1096 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1097 return -ENOTSUPP;
1098
1099 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1100 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1101 return -EIO;
1102 }
1103
1104 return trans->ops->wait_txq_empty(trans, queue);
1105}
1106
1107static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1108{
1109 trans->ops->write8(trans, ofs, val);
1110}
1111
1112static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1113{
1114 trans->ops->write32(trans, ofs, val);
1115}
1116
1117static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1118{
1119 return trans->ops->read32(trans, ofs);
1120}
1121
1122static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1123{
1124 return trans->ops->read_prph(trans, ofs);
1125}
1126
1127static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1128 u32 val)
1129{
1130 return trans->ops->write_prph(trans, ofs, val);
1131}
1132
1133static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1134 void *buf, int dwords)
1135{
1136 return trans->ops->read_mem(trans, addr, buf, dwords);
1137}
1138
1139#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1140 do { \
1141 if (__builtin_constant_p(bufsize)) \
1142 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1143 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1144 } while (0)
1145
1146static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1147{
1148 u32 value;
1149
1150 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1151 return 0xa5a5a5a5;
1152
1153 return value;
1154}
1155
1156static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1157 const void *buf, int dwords)
1158{
1159 return trans->ops->write_mem(trans, addr, buf, dwords);
1160}
1161
1162static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1163 u32 val)
1164{
1165 return iwl_trans_write_mem(trans, addr, &val, 1);
1166}
1167
1168static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1169{
1170 if (trans->ops->set_pmi)
1171 trans->ops->set_pmi(trans, state);
1172}
1173
1174static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1175{
1176 if (trans->ops->sw_reset)
1177 trans->ops->sw_reset(trans);
1178}
1179
1180static inline void
1181iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1182{
1183 trans->ops->set_bits_mask(trans, reg, mask, value);
1184}
1185
1186#define iwl_trans_grab_nic_access(trans, flags) \
1187 __cond_lock(nic_access, \
1188 likely((trans)->ops->grab_nic_access(trans, flags)))
1189
1190static inline void __releases(nic_access)
1191iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1192{
1193 trans->ops->release_nic_access(trans, flags);
1194 __release(nic_access);
1195}
1196
1197static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1198{
1199 if (WARN_ON_ONCE(!trans->op_mode))
1200 return;
1201
1202
1203 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1204 iwl_op_mode_nic_error(trans->op_mode);
1205}
1206
1207
1208
1209
1210struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1211 struct device *dev,
1212 const struct iwl_cfg *cfg,
1213 const struct iwl_trans_ops *ops);
1214void iwl_trans_free(struct iwl_trans *trans);
1215void iwl_trans_ref(struct iwl_trans *trans);
1216void iwl_trans_unref(struct iwl_trans *trans);
1217
1218
1219
1220
1221int __must_check iwl_pci_register_driver(void);
1222void iwl_pci_unregister_driver(void);
1223
1224#endif
1225