1#ifndef _LINUX_IRQ_H
2#define _LINUX_IRQ_H
3
4
5
6
7
8
9
10
11
12#include <linux/smp.h>
13#include <linux/linkage.h>
14#include <linux/cache.h>
15#include <linux/spinlock.h>
16#include <linux/cpumask.h>
17#include <linux/gfp.h>
18#include <linux/irqhandler.h>
19#include <linux/irqreturn.h>
20#include <linux/irqnr.h>
21#include <linux/errno.h>
22#include <linux/topology.h>
23#include <linux/wait.h>
24#include <linux/io.h>
25
26#include <asm/irq.h>
27#include <asm/ptrace.h>
28#include <asm/irq_regs.h>
29
30struct seq_file;
31struct module;
32struct msi_msg;
33enum irqchip_irq_state;
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77enum {
78 IRQ_TYPE_NONE = 0x00000000,
79 IRQ_TYPE_EDGE_RISING = 0x00000001,
80 IRQ_TYPE_EDGE_FALLING = 0x00000002,
81 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
82 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
83 IRQ_TYPE_LEVEL_LOW = 0x00000008,
84 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
85 IRQ_TYPE_SENSE_MASK = 0x0000000f,
86 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
87
88 IRQ_TYPE_PROBE = 0x00000010,
89
90 IRQ_LEVEL = (1 << 8),
91 IRQ_PER_CPU = (1 << 9),
92 IRQ_NOPROBE = (1 << 10),
93 IRQ_NOREQUEST = (1 << 11),
94 IRQ_NOAUTOEN = (1 << 12),
95 IRQ_NO_BALANCING = (1 << 13),
96 IRQ_MOVE_PCNTXT = (1 << 14),
97 IRQ_NESTED_THREAD = (1 << 15),
98 IRQ_NOTHREAD = (1 << 16),
99 IRQ_PER_CPU_DEVID = (1 << 17),
100 IRQ_IS_POLLED = (1 << 18),
101 IRQ_DISABLE_UNLAZY = (1 << 19),
102};
103
104#define IRQF_MODIFY_MASK \
105 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
106 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
107 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
108 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
109
110#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
111
112
113
114
115
116
117
118
119
120
121enum {
122 IRQ_SET_MASK_OK = 0,
123 IRQ_SET_MASK_OK_NOCOPY,
124 IRQ_SET_MASK_OK_DONE,
125};
126
127struct msi_desc;
128struct irq_domain;
129
130
131
132
133
134
135
136
137
138
139
140
141
142struct irq_common_data {
143 unsigned int __private state_use_accessors;
144#ifdef CONFIG_NUMA
145 unsigned int node;
146#endif
147 void *handler_data;
148 struct msi_desc *msi_desc;
149 cpumask_var_t affinity;
150#ifdef CONFIG_GENERIC_IRQ_IPI
151 unsigned int ipi_offset;
152#endif
153};
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169struct irq_data {
170 u32 mask;
171 unsigned int irq;
172 unsigned long hwirq;
173 struct irq_common_data *common;
174 struct irq_chip *chip;
175 struct irq_domain *domain;
176#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
177 struct irq_data *parent_data;
178#endif
179 void *chip_data;
180};
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202enum {
203 IRQD_TRIGGER_MASK = 0xf,
204 IRQD_SETAFFINITY_PENDING = (1 << 8),
205 IRQD_NO_BALANCING = (1 << 10),
206 IRQD_PER_CPU = (1 << 11),
207 IRQD_AFFINITY_SET = (1 << 12),
208 IRQD_LEVEL = (1 << 13),
209 IRQD_WAKEUP_STATE = (1 << 14),
210 IRQD_MOVE_PCNTXT = (1 << 15),
211 IRQD_IRQ_DISABLED = (1 << 16),
212 IRQD_IRQ_MASKED = (1 << 17),
213 IRQD_IRQ_INPROGRESS = (1 << 18),
214 IRQD_WAKEUP_ARMED = (1 << 19),
215 IRQD_FORWARDED_TO_VCPU = (1 << 20),
216 IRQD_AFFINITY_MANAGED = (1 << 21),
217};
218
219#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
220
221static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
222{
223 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
224}
225
226static inline bool irqd_is_per_cpu(struct irq_data *d)
227{
228 return __irqd_to_state(d) & IRQD_PER_CPU;
229}
230
231static inline bool irqd_can_balance(struct irq_data *d)
232{
233 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
234}
235
236static inline bool irqd_affinity_was_set(struct irq_data *d)
237{
238 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
239}
240
241static inline void irqd_mark_affinity_was_set(struct irq_data *d)
242{
243 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
244}
245
246static inline u32 irqd_get_trigger_type(struct irq_data *d)
247{
248 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
249}
250
251
252
253
254static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
255{
256 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
257 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
258}
259
260static inline bool irqd_is_level_type(struct irq_data *d)
261{
262 return __irqd_to_state(d) & IRQD_LEVEL;
263}
264
265static inline bool irqd_is_wakeup_set(struct irq_data *d)
266{
267 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
268}
269
270static inline bool irqd_can_move_in_process_context(struct irq_data *d)
271{
272 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
273}
274
275static inline bool irqd_irq_disabled(struct irq_data *d)
276{
277 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
278}
279
280static inline bool irqd_irq_masked(struct irq_data *d)
281{
282 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
283}
284
285static inline bool irqd_irq_inprogress(struct irq_data *d)
286{
287 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
288}
289
290static inline bool irqd_is_wakeup_armed(struct irq_data *d)
291{
292 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
293}
294
295static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
296{
297 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
298}
299
300static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
301{
302 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
303}
304
305static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
306{
307 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
308}
309
310static inline bool irqd_affinity_is_managed(struct irq_data *d)
311{
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313}
314
315#undef __irqd_to_state
316
317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
318{
319 return d->hwirq;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct irq_chip {
365 struct device *parent_device;
366 const char *name;
367 unsigned int (*irq_startup)(struct irq_data *data);
368 void (*irq_shutdown)(struct irq_data *data);
369 void (*irq_enable)(struct irq_data *data);
370 void (*irq_disable)(struct irq_data *data);
371
372 void (*irq_ack)(struct irq_data *data);
373 void (*irq_mask)(struct irq_data *data);
374 void (*irq_mask_ack)(struct irq_data *data);
375 void (*irq_unmask)(struct irq_data *data);
376 void (*irq_eoi)(struct irq_data *data);
377
378 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
379 int (*irq_retrigger)(struct irq_data *data);
380 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
381 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
382
383 void (*irq_bus_lock)(struct irq_data *data);
384 void (*irq_bus_sync_unlock)(struct irq_data *data);
385
386 void (*irq_cpu_online)(struct irq_data *data);
387 void (*irq_cpu_offline)(struct irq_data *data);
388
389 void (*irq_suspend)(struct irq_data *data);
390 void (*irq_resume)(struct irq_data *data);
391 void (*irq_pm_shutdown)(struct irq_data *data);
392
393 void (*irq_calc_mask)(struct irq_data *data);
394
395 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
396 int (*irq_request_resources)(struct irq_data *data);
397 void (*irq_release_resources)(struct irq_data *data);
398
399 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
400 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
401
402 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
403 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
404
405 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
406
407 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
408 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
409
410 unsigned long flags;
411};
412
413
414
415
416
417
418
419
420
421
422
423
424
425enum {
426 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
427 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
428 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
429 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
430 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
431 IRQCHIP_ONESHOT_SAFE = (1 << 5),
432 IRQCHIP_EOI_THREADED = (1 << 6),
433};
434
435#include <linux/irqdesc.h>
436
437
438
439
440#include <asm/hw_irq.h>
441
442#ifndef NR_IRQS_LEGACY
443# define NR_IRQS_LEGACY 0
444#endif
445
446#ifndef ARCH_IRQ_INIT_FLAGS
447# define ARCH_IRQ_INIT_FLAGS 0
448#endif
449
450#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
451
452struct irqaction;
453extern int setup_irq(unsigned int irq, struct irqaction *new);
454extern void remove_irq(unsigned int irq, struct irqaction *act);
455extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
456extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
457
458extern void irq_cpu_online(void);
459extern void irq_cpu_offline(void);
460extern int irq_set_affinity_locked(struct irq_data *data,
461 const struct cpumask *cpumask, bool force);
462extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
463
464extern void irq_migrate_all_off_this_cpu(void);
465
466#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
467void irq_move_irq(struct irq_data *data);
468void irq_move_masked_irq(struct irq_data *data);
469#else
470static inline void irq_move_irq(struct irq_data *data) { }
471static inline void irq_move_masked_irq(struct irq_data *data) { }
472#endif
473
474extern int no_irq_affinity;
475
476#ifdef CONFIG_HARDIRQS_SW_RESEND
477int irq_set_parent(int irq, int parent_irq);
478#else
479static inline int irq_set_parent(int irq, int parent_irq)
480{
481 return 0;
482}
483#endif
484
485
486
487
488
489extern void handle_level_irq(struct irq_desc *desc);
490extern void handle_fasteoi_irq(struct irq_desc *desc);
491extern void handle_edge_irq(struct irq_desc *desc);
492extern void handle_edge_eoi_irq(struct irq_desc *desc);
493extern void handle_simple_irq(struct irq_desc *desc);
494extern void handle_untracked_irq(struct irq_desc *desc);
495extern void handle_percpu_irq(struct irq_desc *desc);
496extern void handle_percpu_devid_irq(struct irq_desc *desc);
497extern void handle_bad_irq(struct irq_desc *desc);
498extern void handle_nested_irq(unsigned int irq);
499
500extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
501extern int irq_chip_pm_get(struct irq_data *data);
502extern int irq_chip_pm_put(struct irq_data *data);
503#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
504extern void irq_chip_enable_parent(struct irq_data *data);
505extern void irq_chip_disable_parent(struct irq_data *data);
506extern void irq_chip_ack_parent(struct irq_data *data);
507extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
508extern void irq_chip_mask_parent(struct irq_data *data);
509extern void irq_chip_unmask_parent(struct irq_data *data);
510extern void irq_chip_eoi_parent(struct irq_data *data);
511extern int irq_chip_set_affinity_parent(struct irq_data *data,
512 const struct cpumask *dest,
513 bool force);
514extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
515extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
516 void *vcpu_info);
517extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
518#endif
519
520
521extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
522
523
524
525extern int noirqdebug_setup(char *str);
526
527
528extern int can_request_irq(unsigned int irq, unsigned long irqflags);
529
530
531extern struct irq_chip no_irq_chip;
532extern struct irq_chip dummy_irq_chip;
533
534extern void
535irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
536 irq_flow_handler_t handle, const char *name);
537
538static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
539 irq_flow_handler_t handle)
540{
541 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
542}
543
544extern int irq_set_percpu_devid(unsigned int irq);
545extern int irq_set_percpu_devid_partition(unsigned int irq,
546 const struct cpumask *affinity);
547extern int irq_get_percpu_devid_partition(unsigned int irq,
548 struct cpumask *affinity);
549
550extern void
551__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
552 const char *name);
553
554static inline void
555irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
556{
557 __irq_set_handler(irq, handle, 0, NULL);
558}
559
560
561
562
563
564
565static inline void
566irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
567{
568 __irq_set_handler(irq, handle, 1, NULL);
569}
570
571
572
573
574
575
576void
577irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
578 void *data);
579
580void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
581
582static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
583{
584 irq_modify_status(irq, 0, set);
585}
586
587static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
588{
589 irq_modify_status(irq, clr, 0);
590}
591
592static inline void irq_set_noprobe(unsigned int irq)
593{
594 irq_modify_status(irq, 0, IRQ_NOPROBE);
595}
596
597static inline void irq_set_probe(unsigned int irq)
598{
599 irq_modify_status(irq, IRQ_NOPROBE, 0);
600}
601
602static inline void irq_set_nothread(unsigned int irq)
603{
604 irq_modify_status(irq, 0, IRQ_NOTHREAD);
605}
606
607static inline void irq_set_thread(unsigned int irq)
608{
609 irq_modify_status(irq, IRQ_NOTHREAD, 0);
610}
611
612static inline void irq_set_nested_thread(unsigned int irq, bool nest)
613{
614 if (nest)
615 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
616 else
617 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
618}
619
620static inline void irq_set_percpu_devid_flags(unsigned int irq)
621{
622 irq_set_status_flags(irq,
623 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
624 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
625}
626
627
628extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
629extern int irq_set_handler_data(unsigned int irq, void *data);
630extern int irq_set_chip_data(unsigned int irq, void *data);
631extern int irq_set_irq_type(unsigned int irq, unsigned int type);
632extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
633extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
634 struct msi_desc *entry);
635extern struct irq_data *irq_get_irq_data(unsigned int irq);
636
637static inline struct irq_chip *irq_get_chip(unsigned int irq)
638{
639 struct irq_data *d = irq_get_irq_data(irq);
640 return d ? d->chip : NULL;
641}
642
643static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
644{
645 return d->chip;
646}
647
648static inline void *irq_get_chip_data(unsigned int irq)
649{
650 struct irq_data *d = irq_get_irq_data(irq);
651 return d ? d->chip_data : NULL;
652}
653
654static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
655{
656 return d->chip_data;
657}
658
659static inline void *irq_get_handler_data(unsigned int irq)
660{
661 struct irq_data *d = irq_get_irq_data(irq);
662 return d ? d->common->handler_data : NULL;
663}
664
665static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
666{
667 return d->common->handler_data;
668}
669
670static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
671{
672 struct irq_data *d = irq_get_irq_data(irq);
673 return d ? d->common->msi_desc : NULL;
674}
675
676static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
677{
678 return d->common->msi_desc;
679}
680
681static inline u32 irq_get_trigger_type(unsigned int irq)
682{
683 struct irq_data *d = irq_get_irq_data(irq);
684 return d ? irqd_get_trigger_type(d) : 0;
685}
686
687static inline int irq_common_data_get_node(struct irq_common_data *d)
688{
689#ifdef CONFIG_NUMA
690 return d->node;
691#else
692 return 0;
693#endif
694}
695
696static inline int irq_data_get_node(struct irq_data *d)
697{
698 return irq_common_data_get_node(d->common);
699}
700
701static inline struct cpumask *irq_get_affinity_mask(int irq)
702{
703 struct irq_data *d = irq_get_irq_data(irq);
704
705 return d ? d->common->affinity : NULL;
706}
707
708static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
709{
710 return d->common->affinity;
711}
712
713unsigned int arch_dynirq_lower_bound(unsigned int from);
714
715int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
716 struct module *owner, const struct cpumask *affinity);
717
718
719#define irq_alloc_descs(irq, from, cnt, node) \
720 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
721
722#define irq_alloc_desc(node) \
723 irq_alloc_descs(-1, 0, 1, node)
724
725#define irq_alloc_desc_at(at, node) \
726 irq_alloc_descs(at, at, 1, node)
727
728#define irq_alloc_desc_from(from, node) \
729 irq_alloc_descs(-1, from, 1, node)
730
731#define irq_alloc_descs_from(from, cnt, node) \
732 irq_alloc_descs(-1, from, cnt, node)
733
734void irq_free_descs(unsigned int irq, unsigned int cnt);
735static inline void irq_free_desc(unsigned int irq)
736{
737 irq_free_descs(irq, 1);
738}
739
740#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
741unsigned int irq_alloc_hwirqs(int cnt, int node);
742static inline unsigned int irq_alloc_hwirq(int node)
743{
744 return irq_alloc_hwirqs(1, node);
745}
746void irq_free_hwirqs(unsigned int from, int cnt);
747static inline void irq_free_hwirq(unsigned int irq)
748{
749 return irq_free_hwirqs(irq, 1);
750}
751int arch_setup_hwirq(unsigned int irq, int node);
752void arch_teardown_hwirq(unsigned int irq);
753#endif
754
755#ifdef CONFIG_GENERIC_IRQ_LEGACY
756void irq_init_desc(unsigned int irq);
757#endif
758
759
760
761
762
763
764
765
766
767
768
769struct irq_chip_regs {
770 unsigned long enable;
771 unsigned long disable;
772 unsigned long mask;
773 unsigned long ack;
774 unsigned long eoi;
775 unsigned long type;
776 unsigned long polarity;
777};
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792struct irq_chip_type {
793 struct irq_chip chip;
794 struct irq_chip_regs regs;
795 irq_flow_handler_t handler;
796 u32 type;
797 u32 mask_cache_priv;
798 u32 *mask_cache;
799};
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834struct irq_chip_generic {
835 raw_spinlock_t lock;
836 void __iomem *reg_base;
837 u32 (*reg_readl)(void __iomem *addr);
838 void (*reg_writel)(u32 val, void __iomem *addr);
839 void (*suspend)(struct irq_chip_generic *gc);
840 void (*resume)(struct irq_chip_generic *gc);
841 unsigned int irq_base;
842 unsigned int irq_cnt;
843 u32 mask_cache;
844 u32 type_cache;
845 u32 polarity_cache;
846 u32 wake_enabled;
847 u32 wake_active;
848 unsigned int num_ct;
849 void *private;
850 unsigned long installed;
851 unsigned long unused;
852 struct irq_domain *domain;
853 struct list_head list;
854 struct irq_chip_type chip_types[0];
855};
856
857
858
859
860
861
862
863
864
865
866
867enum irq_gc_flags {
868 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
869 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
870 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
871 IRQ_GC_NO_MASK = 1 << 3,
872 IRQ_GC_BE_IO = 1 << 4,
873};
874
875
876
877
878
879
880
881
882
883
884struct irq_domain_chip_generic {
885 unsigned int irqs_per_chip;
886 unsigned int num_chips;
887 unsigned int irq_flags_to_clear;
888 unsigned int irq_flags_to_set;
889 enum irq_gc_flags gc_flags;
890 struct irq_chip_generic *gc[0];
891};
892
893
894void irq_gc_noop(struct irq_data *d);
895void irq_gc_mask_disable_reg(struct irq_data *d);
896void irq_gc_mask_set_bit(struct irq_data *d);
897void irq_gc_mask_clr_bit(struct irq_data *d);
898void irq_gc_unmask_enable_reg(struct irq_data *d);
899void irq_gc_ack_set_bit(struct irq_data *d);
900void irq_gc_ack_clr_bit(struct irq_data *d);
901void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
902void irq_gc_eoi(struct irq_data *d);
903int irq_gc_set_wake(struct irq_data *d, unsigned int on);
904
905
906int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
907 irq_hw_number_t hw_irq);
908struct irq_chip_generic *
909irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
910 void __iomem *reg_base, irq_flow_handler_t handler);
911void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
912 enum irq_gc_flags flags, unsigned int clr,
913 unsigned int set);
914int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
915void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
916 unsigned int clr, unsigned int set);
917
918struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
919
920int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
921 int num_ct, const char *name,
922 irq_flow_handler_t handler,
923 unsigned int clr, unsigned int set,
924 enum irq_gc_flags flags);
925
926#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
927 handler, clr, set, flags) \
928({ \
929 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
930 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
931 handler, clr, set, flags); \
932})
933
934static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
935{
936 return container_of(d->chip, struct irq_chip_type, chip);
937}
938
939#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
940
941#ifdef CONFIG_SMP
942static inline void irq_gc_lock(struct irq_chip_generic *gc)
943{
944 raw_spin_lock(&gc->lock);
945}
946
947static inline void irq_gc_unlock(struct irq_chip_generic *gc)
948{
949 raw_spin_unlock(&gc->lock);
950}
951#else
952static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
953static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
954#endif
955
956
957
958
959
960#define irq_gc_lock_irqsave(gc, flags) \
961 raw_spin_lock_irqsave(&(gc)->lock, flags)
962
963#define irq_gc_unlock_irqrestore(gc, flags) \
964 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
965
966static inline void irq_reg_writel(struct irq_chip_generic *gc,
967 u32 val, int reg_offset)
968{
969 if (gc->reg_writel)
970 gc->reg_writel(val, gc->reg_base + reg_offset);
971 else
972 writel(val, gc->reg_base + reg_offset);
973}
974
975static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
976 int reg_offset)
977{
978 if (gc->reg_readl)
979 return gc->reg_readl(gc->reg_base + reg_offset);
980 else
981 return readl(gc->reg_base + reg_offset);
982}
983
984
985#define INVALID_HWIRQ (~0UL)
986irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
987int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
988int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
989int ipi_send_single(unsigned int virq, unsigned int cpu);
990int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
991
992#endif
993