1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63#ifndef __iwl_trans_h__
64#define __iwl_trans_h__
65
66#include <linux/ieee80211.h>
67#include <linux/mm.h>
68#include <linux/lockdep.h>
69
70#include "iwl-debug.h"
71#include "iwl-config.h"
72#include "iwl-fw.h"
73#include "iwl-op-mode.h"
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
118#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
119#define SEQ_TO_INDEX(s) ((s) & 0xff)
120#define INDEX_TO_SEQ(i) ((i) & 0xff)
121#define SEQ_RX_FRAME cpu_to_le16(0x8000)
122
123
124
125
126
127
128
129struct iwl_cmd_header {
130 u8 cmd;
131 u8 flags;
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152 __le16 sequence;
153} __packed;
154
155
156#define IWL_CMD_FAILED_MSK 0x40
157
158
159#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF
160#define FH_RSCSR_FRAME_INVALID 0x55550000
161#define FH_RSCSR_FRAME_ALIGN 0x40
162
163struct iwl_rx_packet {
164
165
166
167
168
169
170
171
172
173
174 __le32 len_n_flags;
175 struct iwl_cmd_header hdr;
176 u8 data[];
177} __packed;
178
179static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
180{
181 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
182}
183
184static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
185{
186 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
187}
188
189
190
191
192
193
194
195
196
197enum CMD_MODE {
198 CMD_SYNC = 0,
199 CMD_ASYNC = BIT(0),
200 CMD_WANT_SKB = BIT(1),
201 CMD_SEND_IN_RFKILL = BIT(2),
202};
203
204#define DEF_CMD_PAYLOAD_SIZE 320
205
206
207
208
209
210
211
212
213struct iwl_device_cmd {
214 struct iwl_cmd_header hdr;
215 u8 payload[DEF_CMD_PAYLOAD_SIZE];
216} __packed;
217
218#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
219
220
221
222
223
224#define IWL_MAX_CMD_TBS_PER_TFD 2
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241enum iwl_hcmd_dataflag {
242 IWL_HCMD_DFL_NOCOPY = BIT(0),
243 IWL_HCMD_DFL_DUP = BIT(1),
244};
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260struct iwl_host_cmd {
261 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
262 struct iwl_rx_packet *resp_pkt;
263 unsigned long _rx_page_addr;
264 u32 _rx_page_order;
265 int handler_status;
266
267 u32 flags;
268 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
269 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
270 u8 id;
271};
272
273static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
274{
275 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
276}
277
278struct iwl_rx_cmd_buffer {
279 struct page *_page;
280 int _offset;
281 bool _page_stolen;
282 u32 _rx_page_order;
283 unsigned int truesize;
284};
285
286static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
287{
288 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
289}
290
291static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
292{
293 return r->_offset;
294}
295
296static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
297{
298 r->_page_stolen = true;
299 get_page(r->_page);
300 return r->_page;
301}
302
303static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
304{
305 __free_pages(r->_page, r->_rx_page_order);
306}
307
308#define MAX_NO_RECLAIM_CMDS 6
309
310#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
311
312
313
314
315
316#define IWL_MAX_HW_QUEUES 32
317#define IWL_MAX_TID_COUNT 8
318#define IWL_FRAME_LIMIT 64
319
320
321
322
323
324
325enum iwl_d3_status {
326 IWL_D3_STATUS_ALIVE,
327 IWL_D3_STATUS_RESET,
328};
329
330
331
332
333
334
335
336
337
338
339enum iwl_trans_status {
340 STATUS_SYNC_HCMD_ACTIVE,
341 STATUS_DEVICE_ENABLED,
342 STATUS_TPOWER_PMI,
343 STATUS_INT_ENABLED,
344 STATUS_RFKILL,
345 STATUS_FW_ERROR,
346};
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369struct iwl_trans_config {
370 struct iwl_op_mode *op_mode;
371
372 u8 cmd_queue;
373 u8 cmd_fifo;
374 const u8 *no_reclaim_cmds;
375 unsigned int n_no_reclaim_cmds;
376
377 bool rx_buf_size_8k;
378 bool bc_table_dword;
379 unsigned int queue_watchdog_timeout;
380 const char **command_names;
381};
382
383struct iwl_trans;
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447struct iwl_trans_ops {
448
449 int (*start_hw)(struct iwl_trans *iwl_trans);
450 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
451 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
452 bool run_in_rfkill);
453 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
454 void (*stop_device)(struct iwl_trans *trans);
455
456 void (*d3_suspend)(struct iwl_trans *trans, bool test);
457 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
458 bool test);
459
460 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
461
462 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
463 struct iwl_device_cmd *dev_cmd, int queue);
464 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
465 struct sk_buff_head *skbs);
466
467 void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
468 int sta_id, int tid, int frame_limit, u16 ssn);
469 void (*txq_disable)(struct iwl_trans *trans, int queue);
470
471 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
472 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
473
474 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
475 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
476 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
477 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
478 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
479 int (*read_mem)(struct iwl_trans *trans, u32 addr,
480 void *buf, int dwords);
481 int (*write_mem)(struct iwl_trans *trans, u32 addr,
482 const void *buf, int dwords);
483 void (*configure)(struct iwl_trans *trans,
484 const struct iwl_trans_config *trans_cfg);
485 void (*set_pmi)(struct iwl_trans *trans, bool state);
486 bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
487 unsigned long *flags);
488 void (*release_nic_access)(struct iwl_trans *trans,
489 unsigned long *flags);
490 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
491 u32 value);
492};
493
494
495
496
497
498
499
500enum iwl_trans_state {
501 IWL_TRANS_NO_FW = 0,
502 IWL_TRANS_FW_ALIVE = 1,
503};
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527struct iwl_trans {
528 const struct iwl_trans_ops *ops;
529 struct iwl_op_mode *op_mode;
530 const struct iwl_cfg *cfg;
531 enum iwl_trans_state state;
532 unsigned long status;
533
534 struct device *dev;
535 u32 hw_rev;
536 u32 hw_id;
537 char hw_id_str[52];
538
539 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
540
541 bool pm_support;
542
543
544 struct kmem_cache *dev_cmd_pool;
545 size_t dev_cmd_headroom;
546 char dev_cmd_pool_name[50];
547
548 struct dentry *dbgfs_dir;
549
550#ifdef CONFIG_LOCKDEP
551 struct lockdep_map sync_cmd_lockdep_map;
552#endif
553
554
555
556 char trans_specific[0] __aligned(sizeof(void *));
557};
558
559static inline void iwl_trans_configure(struct iwl_trans *trans,
560 const struct iwl_trans_config *trans_cfg)
561{
562 trans->op_mode = trans_cfg->op_mode;
563
564 trans->ops->configure(trans, trans_cfg);
565}
566
567static inline int iwl_trans_start_hw(struct iwl_trans *trans)
568{
569 might_sleep();
570
571 return trans->ops->start_hw(trans);
572}
573
574static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
575{
576 might_sleep();
577
578 if (trans->ops->op_mode_leave)
579 trans->ops->op_mode_leave(trans);
580
581 trans->op_mode = NULL;
582
583 trans->state = IWL_TRANS_NO_FW;
584}
585
586static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
587{
588 might_sleep();
589
590 trans->state = IWL_TRANS_FW_ALIVE;
591
592 trans->ops->fw_alive(trans, scd_addr);
593}
594
595static inline int iwl_trans_start_fw(struct iwl_trans *trans,
596 const struct fw_img *fw,
597 bool run_in_rfkill)
598{
599 might_sleep();
600
601 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
602
603 clear_bit(STATUS_FW_ERROR, &trans->status);
604 return trans->ops->start_fw(trans, fw, run_in_rfkill);
605}
606
607static inline void iwl_trans_stop_device(struct iwl_trans *trans)
608{
609 might_sleep();
610
611 trans->ops->stop_device(trans);
612
613 trans->state = IWL_TRANS_NO_FW;
614}
615
616static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
617{
618 might_sleep();
619 trans->ops->d3_suspend(trans, test);
620}
621
622static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
623 enum iwl_d3_status *status,
624 bool test)
625{
626 might_sleep();
627 return trans->ops->d3_resume(trans, status, test);
628}
629
630static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
631 struct iwl_host_cmd *cmd)
632{
633 int ret;
634
635 if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
636 test_bit(STATUS_RFKILL, &trans->status)))
637 return -ERFKILL;
638
639 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
640 return -EIO;
641
642 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
643 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
644 return -EIO;
645 }
646
647 if (!(cmd->flags & CMD_ASYNC))
648 lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
649
650 ret = trans->ops->send_cmd(trans, cmd);
651
652 if (!(cmd->flags & CMD_ASYNC))
653 lock_map_release(&trans->sync_cmd_lockdep_map);
654
655 return ret;
656}
657
658static inline struct iwl_device_cmd *
659iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
660{
661 u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
662
663 if (unlikely(dev_cmd_ptr == NULL))
664 return NULL;
665
666 return (struct iwl_device_cmd *)
667 (dev_cmd_ptr + trans->dev_cmd_headroom);
668}
669
670static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
671 struct iwl_device_cmd *dev_cmd)
672{
673 u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
674
675 kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
676}
677
678static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
679 struct iwl_device_cmd *dev_cmd, int queue)
680{
681 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
682 return -EIO;
683
684 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
685 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
686
687 return trans->ops->tx(trans, skb, dev_cmd, queue);
688}
689
690static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
691 int ssn, struct sk_buff_head *skbs)
692{
693 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
694 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
695
696 trans->ops->reclaim(trans, queue, ssn, skbs);
697}
698
699static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
700{
701 trans->ops->txq_disable(trans, queue);
702}
703
704static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
705 int fifo, int sta_id, int tid,
706 int frame_limit, u16 ssn)
707{
708 might_sleep();
709
710 if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
711 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
712
713 trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
714 frame_limit, ssn);
715}
716
717static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
718 int fifo)
719{
720 iwl_trans_txq_enable(trans, queue, fifo, -1,
721 IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
722}
723
724static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
725{
726 if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
727 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
728
729 return trans->ops->wait_tx_queue_empty(trans);
730}
731
732static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
733 struct dentry *dir)
734{
735 return trans->ops->dbgfs_register(trans, dir);
736}
737
738static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
739{
740 trans->ops->write8(trans, ofs, val);
741}
742
743static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
744{
745 trans->ops->write32(trans, ofs, val);
746}
747
748static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
749{
750 return trans->ops->read32(trans, ofs);
751}
752
753static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
754{
755 return trans->ops->read_prph(trans, ofs);
756}
757
758static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
759 u32 val)
760{
761 return trans->ops->write_prph(trans, ofs, val);
762}
763
764static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
765 void *buf, int dwords)
766{
767 return trans->ops->read_mem(trans, addr, buf, dwords);
768}
769
770#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
771 do { \
772 if (__builtin_constant_p(bufsize)) \
773 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
774 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
775 } while (0)
776
777static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
778{
779 u32 value;
780
781 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
782 return 0xa5a5a5a5;
783
784 return value;
785}
786
787static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
788 const void *buf, int dwords)
789{
790 return trans->ops->write_mem(trans, addr, buf, dwords);
791}
792
793static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
794 u32 val)
795{
796 return iwl_trans_write_mem(trans, addr, &val, 1);
797}
798
799static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
800{
801 if (trans->ops->set_pmi)
802 trans->ops->set_pmi(trans, state);
803}
804
805static inline void
806iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
807{
808 trans->ops->set_bits_mask(trans, reg, mask, value);
809}
810
811#define iwl_trans_grab_nic_access(trans, silent, flags) \
812 __cond_lock(nic_access, \
813 likely((trans)->ops->grab_nic_access(trans, silent, flags)))
814
815static inline void __releases(nic_access)
816iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
817{
818 trans->ops->release_nic_access(trans, flags);
819 __release(nic_access);
820}
821
822static inline void iwl_trans_fw_error(struct iwl_trans *trans)
823{
824 if (WARN_ON_ONCE(!trans->op_mode))
825 return;
826
827
828 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
829 iwl_op_mode_nic_error(trans->op_mode);
830}
831
832
833
834
835int __must_check iwl_pci_register_driver(void);
836void iwl_pci_unregister_driver(void);
837
838static inline void trans_lockdep_init(struct iwl_trans *trans)
839{
840#ifdef CONFIG_LOCKDEP
841 static struct lock_class_key __key;
842
843 lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
844 &__key, 0);
845#endif
846}
847
848#endif
849