1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63#ifndef __iwl_trans_h__
64#define __iwl_trans_h__
65
66#include <linux/ieee80211.h>
67#include <linux/mm.h>
68#include <linux/lockdep.h>
69
70#include "iwl-debug.h"
71#include "iwl-config.h"
72#include "iwl-fw.h"
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
118#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
119#define SEQ_TO_INDEX(s) ((s) & 0xff)
120#define INDEX_TO_SEQ(i) ((i) & 0xff)
121#define SEQ_RX_FRAME cpu_to_le16(0x8000)
122
123
124
125
126
127
128
129struct iwl_cmd_header {
130 u8 cmd;
131 u8 flags;
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 __le16 sequence;
153} __packed;
154
155
156#define IWL_CMD_FAILED_MSK 0x40
157
158
159#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
160#define FH_RSCSR_FRAME_INVALID 0x55550000
161#define FH_RSCSR_FRAME_ALIGN 0x40
162
163struct iwl_rx_packet {
164
165
166
167
168
169
170
171
172
173
174 __le32 len_n_flags;
175 struct iwl_cmd_header hdr;
176 u8 data[];
177} __packed;
178
179
180
181
182
183
184
185
186
187enum CMD_MODE {
188 CMD_SYNC = 0,
189 CMD_ASYNC = BIT(0),
190 CMD_WANT_SKB = BIT(1),
191 CMD_SEND_IN_RFKILL = BIT(2),
192};
193
194#define DEF_CMD_PAYLOAD_SIZE 320
195
196
197
198
199
200
201
202
203struct iwl_device_cmd {
204 struct iwl_cmd_header hdr;
205 u8 payload[DEF_CMD_PAYLOAD_SIZE];
206} __packed;
207
208#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
209
210
211
212
213
214#define IWL_MAX_CMD_TBS_PER_TFD 2
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231enum iwl_hcmd_dataflag {
232 IWL_HCMD_DFL_NOCOPY = BIT(0),
233 IWL_HCMD_DFL_DUP = BIT(1),
234};
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct iwl_host_cmd {
251 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
252 struct iwl_rx_packet *resp_pkt;
253 unsigned long _rx_page_addr;
254 u32 _rx_page_order;
255 int handler_status;
256
257 u32 flags;
258 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
259 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
260 u8 id;
261};
262
263static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
264{
265 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
266}
267
268struct iwl_rx_cmd_buffer {
269 struct page *_page;
270 int _offset;
271 bool _page_stolen;
272 u32 _rx_page_order;
273 unsigned int truesize;
274};
275
276static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
277{
278 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
279}
280
281static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
282{
283 return r->_offset;
284}
285
286static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
287{
288 r->_page_stolen = true;
289 get_page(r->_page);
290 return r->_page;
291}
292
293static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
294{
295 __free_pages(r->_page, r->_rx_page_order);
296}
297
298#define MAX_NO_RECLAIM_CMDS 6
299
300#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
301
302
303
304
305
306#define IWL_MAX_HW_QUEUES 32
307#define IWL_MAX_TID_COUNT 8
308#define IWL_FRAME_LIMIT 64
309
310
311
312
313
314
315enum iwl_d3_status {
316 IWL_D3_STATUS_ALIVE,
317 IWL_D3_STATUS_RESET,
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341struct iwl_trans_config {
342 struct iwl_op_mode *op_mode;
343
344 u8 cmd_queue;
345 u8 cmd_fifo;
346 const u8 *no_reclaim_cmds;
347 int n_no_reclaim_cmds;
348
349 bool rx_buf_size_8k;
350 bool bc_table_dword;
351 unsigned int queue_watchdog_timeout;
352 const char **command_names;
353};
354
355struct iwl_trans;
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420struct iwl_trans_ops {
421
422 int (*start_hw)(struct iwl_trans *iwl_trans);
423 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
424 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
425 bool run_in_rfkill);
426 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
427 void (*stop_device)(struct iwl_trans *trans);
428
429 void (*d3_suspend)(struct iwl_trans *trans, bool test);
430 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
431 bool test);
432
433 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
434
435 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
436 struct iwl_device_cmd *dev_cmd, int queue);
437 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
438 struct sk_buff_head *skbs);
439
440 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
441 int sta_id, int tid, int frame_limit, u16 ssn);
442 void (*txq_disable)(struct iwl_trans *trans, int queue);
443
444 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
445 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
446#ifdef CONFIG_PM_SLEEP
447 int (*suspend)(struct iwl_trans *trans);
448 int (*resume)(struct iwl_trans *trans);
449#endif
450 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
451 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
452 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
453 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
454 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
455 int (*read_mem)(struct iwl_trans *trans, u32 addr,
456 void *buf, int dwords);
457 int (*write_mem)(struct iwl_trans *trans, u32 addr,
458 const void *buf, int dwords);
459 void (*configure)(struct iwl_trans *trans,
460 const struct iwl_trans_config *trans_cfg);
461 void (*set_pmi)(struct iwl_trans *trans, bool state);
462 bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
463 unsigned long *flags);
464 void (*release_nic_access)(struct iwl_trans *trans,
465 unsigned long *flags);
466 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
467 u32 value);
468};
469
470
471
472
473
474
475
476enum iwl_trans_state {
477 IWL_TRANS_NO_FW = 0,
478 IWL_TRANS_FW_ALIVE = 1,
479};
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502struct iwl_trans {
503 const struct iwl_trans_ops *ops;
504 struct iwl_op_mode *op_mode;
505 const struct iwl_cfg *cfg;
506 enum iwl_trans_state state;
507
508 struct device *dev;
509 u32 hw_rev;
510 u32 hw_id;
511 char hw_id_str[52];
512
513 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
514
515 bool pm_support;
516
517
518 struct kmem_cache *dev_cmd_pool;
519 size_t dev_cmd_headroom;
520 char dev_cmd_pool_name[50];
521
522 struct dentry *dbgfs_dir;
523
524#ifdef CONFIG_LOCKDEP
525 struct lockdep_map sync_cmd_lockdep_map;
526#endif
527
528
529
530 char trans_specific[0] __aligned(sizeof(void *));
531};
532
533static inline void iwl_trans_configure(struct iwl_trans *trans,
534 const struct iwl_trans_config *trans_cfg)
535{
536 trans->op_mode = trans_cfg->op_mode;
537
538 trans->ops->configure(trans, trans_cfg);
539}
540
541static inline int iwl_trans_start_hw(struct iwl_trans *trans)
542{
543 might_sleep();
544
545 return trans->ops->start_hw(trans);
546}
547
548static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
549 bool op_mode_leaving)
550{
551 might_sleep();
552
553 trans->ops->stop_hw(trans, op_mode_leaving);
554
555 if (op_mode_leaving)
556 trans->op_mode = NULL;
557
558 trans->state = IWL_TRANS_NO_FW;
559}
560
561static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
562{
563 might_sleep();
564
565 trans->state = IWL_TRANS_FW_ALIVE;
566
567 trans->ops->fw_alive(trans, scd_addr);
568}
569
570static inline int iwl_trans_start_fw(struct iwl_trans *trans,
571 const struct fw_img *fw,
572 bool run_in_rfkill)
573{
574 might_sleep();
575
576 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
577
578 return trans->ops->start_fw(trans, fw, run_in_rfkill);
579}
580
581static inline void iwl_trans_stop_device(struct iwl_trans *trans)
582{
583 might_sleep();
584
585 trans->ops->stop_device(trans);
586
587 trans->state = IWL_TRANS_NO_FW;
588}
589
590static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
591{
592 might_sleep();
593 trans->ops->d3_suspend(trans, test);
594}
595
596static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
597 enum iwl_d3_status *status,
598 bool test)
599{
600 might_sleep();
601 return trans->ops->d3_resume(trans, status, test);
602}
603
604static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
605 struct iwl_host_cmd *cmd)
606{
607 int ret;
608
609 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
610 "%s bad state = %d", __func__, trans->state);
611
612 if (!(cmd->flags & CMD_ASYNC))
613 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
614
615 ret = trans->ops->send_cmd(trans, cmd);
616
617 if (!(cmd->flags & CMD_ASYNC))
618 lock_map_release(&trans->sync_cmd_lockdep_map);
619
620 return ret;
621}
622
623static inline struct iwl_device_cmd *
624iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
625{
626 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
627
628 if (unlikely(dev_cmd_ptr == NULL))
629 return NULL;
630
631 return (struct iwl_device_cmd *)
632 (dev_cmd_ptr + trans->dev_cmd_headroom);
633}
634
635static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
636 struct iwl_device_cmd *dev_cmd)
637{
638 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
639
640 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
641}
642
643static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
644 struct iwl_device_cmd *dev_cmd, int queue)
645{
646 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
647 "%s bad state = %d", __func__, trans->state);
648
649 return trans->ops->tx(trans, skb, dev_cmd, queue);
650}
651
652static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
653 int ssn, struct sk_buff_head *skbs)
654{
655 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
656 "%s bad state = %d", __func__, trans->state);
657
658 trans->ops->reclaim(trans, queue, ssn, skbs);
659}
660
661static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
662{
663 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
664 "%s bad state = %d", __func__, trans->state);
665
666 trans->ops->txq_disable(trans, queue);
667}
668
669static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
670 int fifo, int sta_id, int tid,
671 int frame_limit, u16 ssn)
672{
673 might_sleep();
674
675 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
676 "%s bad state = %d", __func__, trans->state);
677
678 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
679 frame_limit, ssn);
680}
681
682static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
683 int fifo)
684{
685 iwl_trans_txq_enable(trans, queue, fifo, -1,
686 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
687}
688
689static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
690{
691 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
692 "%s bad state = %d", __func__, trans->state);
693
694 return trans->ops->wait_tx_queue_empty(trans);
695}
696
697static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
698 struct dentry *dir)
699{
700 return trans->ops->dbgfs_register(trans, dir);
701}
702
703#ifdef CONFIG_PM_SLEEP
704static inline int iwl_trans_suspend(struct iwl_trans *trans)
705{
706 return trans->ops->suspend(trans);
707}
708
709static inline int iwl_trans_resume(struct iwl_trans *trans)
710{
711 return trans->ops->resume(trans);
712}
713#endif
714
715static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
716{
717 trans->ops->write8(trans, ofs, val);
718}
719
720static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
721{
722 trans->ops->write32(trans, ofs, val);
723}
724
725static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
726{
727 return trans->ops->read32(trans, ofs);
728}
729
730static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
731{
732 return trans->ops->read_prph(trans, ofs);
733}
734
735static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
736 u32 val)
737{
738 return trans->ops->write_prph(trans, ofs, val);
739}
740
741static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
742 void *buf, int dwords)
743{
744 return trans->ops->read_mem(trans, addr, buf, dwords);
745}
746
747#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
748 do { \
749 if (__builtin_constant_p(bufsize)) \
750 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
751 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
752 } while (0)
753
754static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
755{
756 u32 value;
757
758 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
759 return 0xa5a5a5a5;
760
761 return value;
762}
763
764static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
765 const void *buf, int dwords)
766{
767 return trans->ops->write_mem(trans, addr, buf, dwords);
768}
769
770static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
771 u32 val)
772{
773 return iwl_trans_write_mem(trans, addr, &val, 1);
774}
775
776static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
777{
778 trans->ops->set_pmi(trans, state);
779}
780
781static inline void
782iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
783{
784 trans->ops->set_bits_mask(trans, reg, mask, value);
785}
786
787#define iwl_trans_grab_nic_access(trans, silent, flags) \
788 __cond_lock(nic_access, \
789 likely((trans)->ops->grab_nic_access(trans, silent, flags)))
790
791static inline void __releases(nic_access)
792iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
793{
794 trans->ops->release_nic_access(trans, flags);
795 __release(nic_access);
796}
797
798
799
800
801int __must_check iwl_pci_register_driver(void);
802void iwl_pci_unregister_driver(void);
803
804static inline void trans_lockdep_init(struct iwl_trans *trans)
805{
806#ifdef CONFIG_LOCKDEP
807 static struct lock_class_key __key;
808
809 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
810 &__key, 0);
811#endif
812}
813
814#endif
815