1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63#ifndef __iwl_trans_h__
64#define __iwl_trans_h__
65
66#include <linux/ieee80211.h>
67#include <linux/mm.h>
68#include <linux/lockdep.h>
69
70#include "iwl-debug.h"
71#include "iwl-config.h"
72#include "iwl-fw.h"
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
118#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
119#define SEQ_TO_INDEX(s) ((s) & 0xff)
120#define INDEX_TO_SEQ(i) ((i) & 0xff)
121#define SEQ_RX_FRAME cpu_to_le16(0x8000)
122
123
124
125
126
127
128
129struct iwl_cmd_header {
130 u8 cmd;
131 u8 flags;
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 __le16 sequence;
153} __packed;
154
155
156#define IWL_CMD_FAILED_MSK 0x40
157
158
159#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
160#define FH_RSCSR_FRAME_INVALID 0x55550000
161#define FH_RSCSR_FRAME_ALIGN 0x40
162
163struct iwl_rx_packet {
164
165
166
167
168
169
170
171
172
173
174 __le32 len_n_flags;
175 struct iwl_cmd_header hdr;
176 u8 data[];
177} __packed;
178
179
180
181
182
183
184
185
186
187enum CMD_MODE {
188 CMD_SYNC = 0,
189 CMD_ASYNC = BIT(0),
190 CMD_WANT_SKB = BIT(1),
191 CMD_SEND_IN_RFKILL = BIT(2),
192};
193
194#define DEF_CMD_PAYLOAD_SIZE 320
195
196
197
198
199
200
201
202
203struct iwl_device_cmd {
204 struct iwl_cmd_header hdr;
205 u8 payload[DEF_CMD_PAYLOAD_SIZE];
206} __packed;
207
208#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
209
210
211
212
213
214#define IWL_MAX_CMD_TBS_PER_TFD 2
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231enum iwl_hcmd_dataflag {
232 IWL_HCMD_DFL_NOCOPY = BIT(0),
233 IWL_HCMD_DFL_DUP = BIT(1),
234};
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct iwl_host_cmd {
251 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
252 struct iwl_rx_packet *resp_pkt;
253 unsigned long _rx_page_addr;
254 u32 _rx_page_order;
255 int handler_status;
256
257 u32 flags;
258 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
259 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
260 u8 id;
261};
262
263static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
264{
265 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
266}
267
268struct iwl_rx_cmd_buffer {
269 struct page *_page;
270 int _offset;
271 bool _page_stolen;
272 u32 _rx_page_order;
273 unsigned int truesize;
274};
275
276static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
277{
278 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
279}
280
281static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
282{
283 return r->_offset;
284}
285
286static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
287{
288 r->_page_stolen = true;
289 get_page(r->_page);
290 return r->_page;
291}
292
293static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
294{
295 __free_pages(r->_page, r->_rx_page_order);
296}
297
298#define MAX_NO_RECLAIM_CMDS 6
299
300#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
301
302
303
304
305
306#define IWL_MAX_HW_QUEUES 32
307#define IWL_MAX_TID_COUNT 8
308#define IWL_FRAME_LIMIT 64
309
310
311
312
313
314
315enum iwl_d3_status {
316 IWL_D3_STATUS_ALIVE,
317 IWL_D3_STATUS_RESET,
318};
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341struct iwl_trans_config {
342 struct iwl_op_mode *op_mode;
343
344 u8 cmd_queue;
345 u8 cmd_fifo;
346 const u8 *no_reclaim_cmds;
347 unsigned int n_no_reclaim_cmds;
348
349 bool rx_buf_size_8k;
350 bool bc_table_dword;
351 unsigned int queue_watchdog_timeout;
352 const char **command_names;
353};
354
355struct iwl_trans;
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418struct iwl_trans_ops {
419
420 int (*start_hw)(struct iwl_trans *iwl_trans);
421 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
422 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
423 bool run_in_rfkill);
424 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
425 void (*stop_device)(struct iwl_trans *trans);
426
427 void (*d3_suspend)(struct iwl_trans *trans, bool test);
428 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
429 bool test);
430
431 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
432
433 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
434 struct iwl_device_cmd *dev_cmd, int queue);
435 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
436 struct sk_buff_head *skbs);
437
438 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
439 int sta_id, int tid, int frame_limit, u16 ssn);
440 void (*txq_disable)(struct iwl_trans *trans, int queue);
441
442 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
443 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
444
445 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
446 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
447 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
448 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
449 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
450 int (*read_mem)(struct iwl_trans *trans, u32 addr,
451 void *buf, int dwords);
452 int (*write_mem)(struct iwl_trans *trans, u32 addr,
453 const void *buf, int dwords);
454 void (*configure)(struct iwl_trans *trans,
455 const struct iwl_trans_config *trans_cfg);
456 void (*set_pmi)(struct iwl_trans *trans, bool state);
457 bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
458 unsigned long *flags);
459 void (*release_nic_access)(struct iwl_trans *trans,
460 unsigned long *flags);
461 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
462 u32 value);
463};
464
465
466
467
468
469
470
471enum iwl_trans_state {
472 IWL_TRANS_NO_FW = 0,
473 IWL_TRANS_FW_ALIVE = 1,
474};
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497struct iwl_trans {
498 const struct iwl_trans_ops *ops;
499 struct iwl_op_mode *op_mode;
500 const struct iwl_cfg *cfg;
501 enum iwl_trans_state state;
502
503 struct device *dev;
504 u32 hw_rev;
505 u32 hw_id;
506 char hw_id_str[52];
507
508 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
509
510 bool pm_support;
511
512
513 struct kmem_cache *dev_cmd_pool;
514 size_t dev_cmd_headroom;
515 char dev_cmd_pool_name[50];
516
517 struct dentry *dbgfs_dir;
518
519#ifdef CONFIG_LOCKDEP
520 struct lockdep_map sync_cmd_lockdep_map;
521#endif
522
523
524
525 char trans_specific[0] __aligned(sizeof(void *));
526};
527
528static inline void iwl_trans_configure(struct iwl_trans *trans,
529 const struct iwl_trans_config *trans_cfg)
530{
531 trans->op_mode = trans_cfg->op_mode;
532
533 trans->ops->configure(trans, trans_cfg);
534}
535
536static inline int iwl_trans_start_hw(struct iwl_trans *trans)
537{
538 might_sleep();
539
540 return trans->ops->start_hw(trans);
541}
542
543static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
544 bool op_mode_leaving)
545{
546 might_sleep();
547
548 trans->ops->stop_hw(trans, op_mode_leaving);
549
550 if (op_mode_leaving)
551 trans->op_mode = NULL;
552
553 trans->state = IWL_TRANS_NO_FW;
554}
555
556static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
557{
558 might_sleep();
559
560 trans->state = IWL_TRANS_FW_ALIVE;
561
562 trans->ops->fw_alive(trans, scd_addr);
563}
564
565static inline int iwl_trans_start_fw(struct iwl_trans *trans,
566 const struct fw_img *fw,
567 bool run_in_rfkill)
568{
569 might_sleep();
570
571 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
572
573 return trans->ops->start_fw(trans, fw, run_in_rfkill);
574}
575
576static inline void iwl_trans_stop_device(struct iwl_trans *trans)
577{
578 might_sleep();
579
580 trans->ops->stop_device(trans);
581
582 trans->state = IWL_TRANS_NO_FW;
583}
584
585static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
586{
587 might_sleep();
588 trans->ops->d3_suspend(trans, test);
589}
590
591static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
592 enum iwl_d3_status *status,
593 bool test)
594{
595 might_sleep();
596 return trans->ops->d3_resume(trans, status, test);
597}
598
599static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
600 struct iwl_host_cmd *cmd)
601{
602 int ret;
603
604 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
605 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
606 return -EIO;
607 }
608
609 if (!(cmd->flags & CMD_ASYNC))
610 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
611
612 ret = trans->ops->send_cmd(trans, cmd);
613
614 if (!(cmd->flags & CMD_ASYNC))
615 lock_map_release(&trans->sync_cmd_lockdep_map);
616
617 return ret;
618}
619
620static inline struct iwl_device_cmd *
621iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
622{
623 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
624
625 if (unlikely(dev_cmd_ptr == NULL))
626 return NULL;
627
628 return (struct iwl_device_cmd *)
629 (dev_cmd_ptr + trans->dev_cmd_headroom);
630}
631
632static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
633 struct iwl_device_cmd *dev_cmd)
634{
635 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
636
637 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
638}
639
640static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
641 struct iwl_device_cmd *dev_cmd, int queue)
642{
643 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
644 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
645
646 return trans->ops->tx(trans, skb, dev_cmd, queue);
647}
648
649static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
650 int ssn, struct sk_buff_head *skbs)
651{
652 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
653 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
654
655 trans->ops->reclaim(trans, queue, ssn, skbs);
656}
657
658static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
659{
660 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
661 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
662
663 trans->ops->txq_disable(trans, queue);
664}
665
666static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
667 int fifo, int sta_id, int tid,
668 int frame_limit, u16 ssn)
669{
670 might_sleep();
671
672 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
673 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
674
675 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
676 frame_limit, ssn);
677}
678
679static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
680 int fifo)
681{
682 iwl_trans_txq_enable(trans, queue, fifo, -1,
683 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
684}
685
686static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
687{
688 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
689 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
690
691 return trans->ops->wait_tx_queue_empty(trans);
692}
693
694static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
695 struct dentry *dir)
696{
697 return trans->ops->dbgfs_register(trans, dir);
698}
699
700static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
701{
702 trans->ops->write8(trans, ofs, val);
703}
704
705static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
706{
707 trans->ops->write32(trans, ofs, val);
708}
709
710static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
711{
712 return trans->ops->read32(trans, ofs);
713}
714
715static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
716{
717 return trans->ops->read_prph(trans, ofs);
718}
719
720static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
721 u32 val)
722{
723 return trans->ops->write_prph(trans, ofs, val);
724}
725
726static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
727 void *buf, int dwords)
728{
729 return trans->ops->read_mem(trans, addr, buf, dwords);
730}
731
732#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
733 do { \
734 if (__builtin_constant_p(bufsize)) \
735 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
736 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
737 } while (0)
738
739static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
740{
741 u32 value;
742
743 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
744 return 0xa5a5a5a5;
745
746 return value;
747}
748
749static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
750 const void *buf, int dwords)
751{
752 return trans->ops->write_mem(trans, addr, buf, dwords);
753}
754
755static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
756 u32 val)
757{
758 return iwl_trans_write_mem(trans, addr, &val, 1);
759}
760
761static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
762{
763 trans->ops->set_pmi(trans, state);
764}
765
766static inline void
767iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
768{
769 trans->ops->set_bits_mask(trans, reg, mask, value);
770}
771
772#define iwl_trans_grab_nic_access(trans, silent, flags) \
773 __cond_lock(nic_access, \
774 likely((trans)->ops->grab_nic_access(trans, silent, flags)))
775
776static inline void __releases(nic_access)
777iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
778{
779 trans->ops->release_nic_access(trans, flags);
780 __release(nic_access);
781}
782
783
784
785
786int __must_check iwl_pci_register_driver(void);
787void iwl_pci_unregister_driver(void);
788
789static inline void trans_lockdep_init(struct iwl_trans *trans)
790{
791#ifdef CONFIG_LOCKDEP
792 static struct lock_class_key __key;
793
794 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
795 &__key, 0);
796#endif
797}
798
799#endif
800