1
2#ifndef _LINUX_IRQ_H
3#define _LINUX_IRQ_H
4
5
6
7
8
9
10
11
12
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/cpumask.h>
16#include <linux/irqhandler.h>
17#include <linux/irqreturn.h>
18#include <linux/irqnr.h>
19#include <linux/topology.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22
23#include <asm/irq.h>
24#include <asm/ptrace.h>
25#include <asm/irq_regs.h>
26
27struct seq_file;
28struct module;
29struct msi_msg;
30struct irq_affinity_desc;
31enum irqchip_irq_state;
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77enum {
78 IRQ_TYPE_NONE = 0x00000000,
79 IRQ_TYPE_EDGE_RISING = 0x00000001,
80 IRQ_TYPE_EDGE_FALLING = 0x00000002,
81 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
82 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
83 IRQ_TYPE_LEVEL_LOW = 0x00000008,
84 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
85 IRQ_TYPE_SENSE_MASK = 0x0000000f,
86 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
87
88 IRQ_TYPE_PROBE = 0x00000010,
89
90 IRQ_LEVEL = (1 << 8),
91 IRQ_PER_CPU = (1 << 9),
92 IRQ_NOPROBE = (1 << 10),
93 IRQ_NOREQUEST = (1 << 11),
94 IRQ_NOAUTOEN = (1 << 12),
95 IRQ_NO_BALANCING = (1 << 13),
96 IRQ_MOVE_PCNTXT = (1 << 14),
97 IRQ_NESTED_THREAD = (1 << 15),
98 IRQ_NOTHREAD = (1 << 16),
99 IRQ_PER_CPU_DEVID = (1 << 17),
100 IRQ_IS_POLLED = (1 << 18),
101 IRQ_DISABLE_UNLAZY = (1 << 19),
102 IRQ_HIDDEN = (1 << 20),
103 IRQ_NO_DEBUG = (1 << 21),
104};
105
106#define IRQF_MODIFY_MASK \
107 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
108 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
109 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
110 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
111
112#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
113
114
115
116
117
118
119
120
121
122
123enum {
124 IRQ_SET_MASK_OK = 0,
125 IRQ_SET_MASK_OK_NOCOPY,
126 IRQ_SET_MASK_OK_DONE,
127};
128
129struct msi_desc;
130struct irq_domain;
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct irq_common_data {
148 unsigned int __private state_use_accessors;
149#ifdef CONFIG_NUMA
150 unsigned int node;
151#endif
152 void *handler_data;
153 struct msi_desc *msi_desc;
154 cpumask_var_t affinity;
155#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
156 cpumask_var_t effective_affinity;
157#endif
158#ifdef CONFIG_GENERIC_IRQ_IPI
159 unsigned int ipi_offset;
160#endif
161};
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177struct irq_data {
178 u32 mask;
179 unsigned int irq;
180 unsigned long hwirq;
181 struct irq_common_data *common;
182 struct irq_chip *chip;
183 struct irq_domain *domain;
184#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
185 struct irq_data *parent_data;
186#endif
187 void *chip_data;
188};
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225enum {
226 IRQD_TRIGGER_MASK = 0xf,
227 IRQD_SETAFFINITY_PENDING = (1 << 8),
228 IRQD_ACTIVATED = (1 << 9),
229 IRQD_NO_BALANCING = (1 << 10),
230 IRQD_PER_CPU = (1 << 11),
231 IRQD_AFFINITY_SET = (1 << 12),
232 IRQD_LEVEL = (1 << 13),
233 IRQD_WAKEUP_STATE = (1 << 14),
234 IRQD_MOVE_PCNTXT = (1 << 15),
235 IRQD_IRQ_DISABLED = (1 << 16),
236 IRQD_IRQ_MASKED = (1 << 17),
237 IRQD_IRQ_INPROGRESS = (1 << 18),
238 IRQD_WAKEUP_ARMED = (1 << 19),
239 IRQD_FORWARDED_TO_VCPU = (1 << 20),
240 IRQD_AFFINITY_MANAGED = (1 << 21),
241 IRQD_IRQ_STARTED = (1 << 22),
242 IRQD_MANAGED_SHUTDOWN = (1 << 23),
243 IRQD_SINGLE_TARGET = (1 << 24),
244 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
245 IRQD_CAN_RESERVE = (1 << 26),
246 IRQD_MSI_NOMASK_QUIRK = (1 << 27),
247 IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
248 IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
249 IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
250};
251
252#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
253
254static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
255{
256 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
257}
258
259static inline bool irqd_is_per_cpu(struct irq_data *d)
260{
261 return __irqd_to_state(d) & IRQD_PER_CPU;
262}
263
264static inline bool irqd_can_balance(struct irq_data *d)
265{
266 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
267}
268
269static inline bool irqd_affinity_was_set(struct irq_data *d)
270{
271 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
272}
273
274static inline void irqd_mark_affinity_was_set(struct irq_data *d)
275{
276 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
277}
278
279static inline bool irqd_trigger_type_was_set(struct irq_data *d)
280{
281 return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
282}
283
284static inline u32 irqd_get_trigger_type(struct irq_data *d)
285{
286 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
287}
288
289
290
291
292
293static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
294{
295 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
296 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
297 __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
298}
299
300static inline bool irqd_is_level_type(struct irq_data *d)
301{
302 return __irqd_to_state(d) & IRQD_LEVEL;
303}
304
305
306
307
308
309static inline void irqd_set_single_target(struct irq_data *d)
310{
311 __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
312}
313
314static inline bool irqd_is_single_target(struct irq_data *d)
315{
316 return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
317}
318
319static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
320{
321 __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
322}
323
324static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
325{
326 return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
327}
328
329static inline bool irqd_is_enabled_on_suspend(struct irq_data *d)
330{
331 return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND;
332}
333
334static inline bool irqd_is_wakeup_set(struct irq_data *d)
335{
336 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
337}
338
339static inline bool irqd_can_move_in_process_context(struct irq_data *d)
340{
341 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
342}
343
344static inline bool irqd_irq_disabled(struct irq_data *d)
345{
346 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
347}
348
349static inline bool irqd_irq_masked(struct irq_data *d)
350{
351 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
352}
353
354static inline bool irqd_irq_inprogress(struct irq_data *d)
355{
356 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
357}
358
359static inline bool irqd_is_wakeup_armed(struct irq_data *d)
360{
361 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
362}
363
364static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
365{
366 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
367}
368
369static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
370{
371 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
372}
373
374static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
375{
376 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
377}
378
379static inline bool irqd_affinity_is_managed(struct irq_data *d)
380{
381 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
382}
383
384static inline bool irqd_is_activated(struct irq_data *d)
385{
386 return __irqd_to_state(d) & IRQD_ACTIVATED;
387}
388
389static inline void irqd_set_activated(struct irq_data *d)
390{
391 __irqd_to_state(d) |= IRQD_ACTIVATED;
392}
393
394static inline void irqd_clr_activated(struct irq_data *d)
395{
396 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
397}
398
399static inline bool irqd_is_started(struct irq_data *d)
400{
401 return __irqd_to_state(d) & IRQD_IRQ_STARTED;
402}
403
404static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
405{
406 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
407}
408
409static inline void irqd_set_can_reserve(struct irq_data *d)
410{
411 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
412}
413
414static inline void irqd_clr_can_reserve(struct irq_data *d)
415{
416 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
417}
418
419static inline bool irqd_can_reserve(struct irq_data *d)
420{
421 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
422}
423
424static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
425{
426 __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
427}
428
429static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
430{
431 __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
432}
433
434static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
435{
436 return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
437}
438
439static inline void irqd_set_affinity_on_activate(struct irq_data *d)
440{
441 __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
442}
443
444static inline bool irqd_affinity_on_activate(struct irq_data *d)
445{
446 return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
447}
448
449#undef __irqd_to_state
450
451static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
452{
453 return d->hwirq;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505struct irq_chip {
506 struct device *parent_device;
507 const char *name;
508 unsigned int (*irq_startup)(struct irq_data *data);
509 void (*irq_shutdown)(struct irq_data *data);
510 void (*irq_enable)(struct irq_data *data);
511 void (*irq_disable)(struct irq_data *data);
512
513 void (*irq_ack)(struct irq_data *data);
514 void (*irq_mask)(struct irq_data *data);
515 void (*irq_mask_ack)(struct irq_data *data);
516 void (*irq_unmask)(struct irq_data *data);
517 void (*irq_eoi)(struct irq_data *data);
518
519 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
520 int (*irq_retrigger)(struct irq_data *data);
521 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
522 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
523
524 void (*irq_bus_lock)(struct irq_data *data);
525 void (*irq_bus_sync_unlock)(struct irq_data *data);
526
527 void (*irq_cpu_online)(struct irq_data *data);
528 void (*irq_cpu_offline)(struct irq_data *data);
529
530 void (*irq_suspend)(struct irq_data *data);
531 void (*irq_resume)(struct irq_data *data);
532 void (*irq_pm_shutdown)(struct irq_data *data);
533
534 void (*irq_calc_mask)(struct irq_data *data);
535
536 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
537 int (*irq_request_resources)(struct irq_data *data);
538 void (*irq_release_resources)(struct irq_data *data);
539
540 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
541 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
542
543 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
544 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
545
546 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
547
548 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
549 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
550
551 int (*irq_nmi_setup)(struct irq_data *data);
552 void (*irq_nmi_teardown)(struct irq_data *data);
553
554 unsigned long flags;
555};
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574enum {
575 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
576 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
577 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
578 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
579 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
580 IRQCHIP_ONESHOT_SAFE = (1 << 5),
581 IRQCHIP_EOI_THREADED = (1 << 6),
582 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
583 IRQCHIP_SUPPORTS_NMI = (1 << 8),
584 IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
585 IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
586};
587
588#include <linux/irqdesc.h>
589
590
591
592
593#include <asm/hw_irq.h>
594
595#ifndef NR_IRQS_LEGACY
596# define NR_IRQS_LEGACY 0
597#endif
598
599#ifndef ARCH_IRQ_INIT_FLAGS
600# define ARCH_IRQ_INIT_FLAGS 0
601#endif
602
603#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
604
605struct irqaction;
606extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
607extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
608
609extern void irq_cpu_online(void);
610extern void irq_cpu_offline(void);
611extern int irq_set_affinity_locked(struct irq_data *data,
612 const struct cpumask *cpumask, bool force);
613extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
614
615#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
616extern void irq_migrate_all_off_this_cpu(void);
617extern int irq_affinity_online_cpu(unsigned int cpu);
618#else
619# define irq_affinity_online_cpu NULL
620#endif
621
622#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
623void __irq_move_irq(struct irq_data *data);
624static inline void irq_move_irq(struct irq_data *data)
625{
626 if (unlikely(irqd_is_setaffinity_pending(data)))
627 __irq_move_irq(data);
628}
629void irq_move_masked_irq(struct irq_data *data);
630void irq_force_complete_move(struct irq_desc *desc);
631#else
632static inline void irq_move_irq(struct irq_data *data) { }
633static inline void irq_move_masked_irq(struct irq_data *data) { }
634static inline void irq_force_complete_move(struct irq_desc *desc) { }
635#endif
636
637extern int no_irq_affinity;
638
639#ifdef CONFIG_HARDIRQS_SW_RESEND
640int irq_set_parent(int irq, int parent_irq);
641#else
642static inline int irq_set_parent(int irq, int parent_irq)
643{
644 return 0;
645}
646#endif
647
648
649
650
651
652extern void handle_level_irq(struct irq_desc *desc);
653extern void handle_fasteoi_irq(struct irq_desc *desc);
654extern void handle_edge_irq(struct irq_desc *desc);
655extern void handle_edge_eoi_irq(struct irq_desc *desc);
656extern void handle_simple_irq(struct irq_desc *desc);
657extern void handle_untracked_irq(struct irq_desc *desc);
658extern void handle_percpu_irq(struct irq_desc *desc);
659extern void handle_percpu_devid_irq(struct irq_desc *desc);
660extern void handle_bad_irq(struct irq_desc *desc);
661extern void handle_nested_irq(unsigned int irq);
662
663extern void handle_fasteoi_nmi(struct irq_desc *desc);
664extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
665
666extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
667extern int irq_chip_pm_get(struct irq_data *data);
668extern int irq_chip_pm_put(struct irq_data *data);
669#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
670extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
671extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
672extern int irq_chip_set_parent_state(struct irq_data *data,
673 enum irqchip_irq_state which,
674 bool val);
675extern int irq_chip_get_parent_state(struct irq_data *data,
676 enum irqchip_irq_state which,
677 bool *state);
678extern void irq_chip_enable_parent(struct irq_data *data);
679extern void irq_chip_disable_parent(struct irq_data *data);
680extern void irq_chip_ack_parent(struct irq_data *data);
681extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
682extern void irq_chip_mask_parent(struct irq_data *data);
683extern void irq_chip_mask_ack_parent(struct irq_data *data);
684extern void irq_chip_unmask_parent(struct irq_data *data);
685extern void irq_chip_eoi_parent(struct irq_data *data);
686extern int irq_chip_set_affinity_parent(struct irq_data *data,
687 const struct cpumask *dest,
688 bool force);
689extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
690extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
691 void *vcpu_info);
692extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
693extern int irq_chip_request_resources_parent(struct irq_data *data);
694extern void irq_chip_release_resources_parent(struct irq_data *data);
695#endif
696
697
698extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
699
700
701
702extern int noirqdebug_setup(char *str);
703
704
705extern int can_request_irq(unsigned int irq, unsigned long irqflags);
706
707
708extern struct irq_chip no_irq_chip;
709extern struct irq_chip dummy_irq_chip;
710
711extern void
712irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
713 irq_flow_handler_t handle, const char *name);
714
715static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
716 irq_flow_handler_t handle)
717{
718 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
719}
720
721extern int irq_set_percpu_devid(unsigned int irq);
722extern int irq_set_percpu_devid_partition(unsigned int irq,
723 const struct cpumask *affinity);
724extern int irq_get_percpu_devid_partition(unsigned int irq,
725 struct cpumask *affinity);
726
727extern void
728__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
729 const char *name);
730
731static inline void
732irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
733{
734 __irq_set_handler(irq, handle, 0, NULL);
735}
736
737
738
739
740
741
742static inline void
743irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
744{
745 __irq_set_handler(irq, handle, 1, NULL);
746}
747
748
749
750
751
752
753void
754irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
755 void *data);
756
757void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
758
759static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
760{
761 irq_modify_status(irq, 0, set);
762}
763
764static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
765{
766 irq_modify_status(irq, clr, 0);
767}
768
769static inline void irq_set_noprobe(unsigned int irq)
770{
771 irq_modify_status(irq, 0, IRQ_NOPROBE);
772}
773
774static inline void irq_set_probe(unsigned int irq)
775{
776 irq_modify_status(irq, IRQ_NOPROBE, 0);
777}
778
779static inline void irq_set_nothread(unsigned int irq)
780{
781 irq_modify_status(irq, 0, IRQ_NOTHREAD);
782}
783
784static inline void irq_set_thread(unsigned int irq)
785{
786 irq_modify_status(irq, IRQ_NOTHREAD, 0);
787}
788
789static inline void irq_set_nested_thread(unsigned int irq, bool nest)
790{
791 if (nest)
792 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
793 else
794 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
795}
796
797static inline void irq_set_percpu_devid_flags(unsigned int irq)
798{
799 irq_set_status_flags(irq,
800 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
801 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
802}
803
804
805extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
806extern int irq_set_handler_data(unsigned int irq, void *data);
807extern int irq_set_chip_data(unsigned int irq, void *data);
808extern int irq_set_irq_type(unsigned int irq, unsigned int type);
809extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
810extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
811 struct msi_desc *entry);
812extern struct irq_data *irq_get_irq_data(unsigned int irq);
813
814static inline struct irq_chip *irq_get_chip(unsigned int irq)
815{
816 struct irq_data *d = irq_get_irq_data(irq);
817 return d ? d->chip : NULL;
818}
819
820static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
821{
822 return d->chip;
823}
824
825static inline void *irq_get_chip_data(unsigned int irq)
826{
827 struct irq_data *d = irq_get_irq_data(irq);
828 return d ? d->chip_data : NULL;
829}
830
831static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
832{
833 return d->chip_data;
834}
835
836static inline void *irq_get_handler_data(unsigned int irq)
837{
838 struct irq_data *d = irq_get_irq_data(irq);
839 return d ? d->common->handler_data : NULL;
840}
841
842static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
843{
844 return d->common->handler_data;
845}
846
847static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
848{
849 struct irq_data *d = irq_get_irq_data(irq);
850 return d ? d->common->msi_desc : NULL;
851}
852
853static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
854{
855 return d->common->msi_desc;
856}
857
858static inline u32 irq_get_trigger_type(unsigned int irq)
859{
860 struct irq_data *d = irq_get_irq_data(irq);
861 return d ? irqd_get_trigger_type(d) : 0;
862}
863
864static inline int irq_common_data_get_node(struct irq_common_data *d)
865{
866#ifdef CONFIG_NUMA
867 return d->node;
868#else
869 return 0;
870#endif
871}
872
873static inline int irq_data_get_node(struct irq_data *d)
874{
875 return irq_common_data_get_node(d->common);
876}
877
878static inline struct cpumask *irq_get_affinity_mask(int irq)
879{
880 struct irq_data *d = irq_get_irq_data(irq);
881
882 return d ? d->common->affinity : NULL;
883}
884
885static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
886{
887 return d->common->affinity;
888}
889
890#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
891static inline
892struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
893{
894 return d->common->effective_affinity;
895}
896static inline void irq_data_update_effective_affinity(struct irq_data *d,
897 const struct cpumask *m)
898{
899 cpumask_copy(d->common->effective_affinity, m);
900}
901#else
902static inline void irq_data_update_effective_affinity(struct irq_data *d,
903 const struct cpumask *m)
904{
905}
906static inline
907struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
908{
909 return d->common->affinity;
910}
911#endif
912
913static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
914{
915 struct irq_data *d = irq_get_irq_data(irq);
916
917 return d ? irq_data_get_effective_affinity_mask(d) : NULL;
918}
919
920unsigned int arch_dynirq_lower_bound(unsigned int from);
921
922int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
923 struct module *owner,
924 const struct irq_affinity_desc *affinity);
925
926int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
927 unsigned int cnt, int node, struct module *owner,
928 const struct irq_affinity_desc *affinity);
929
930
931#define irq_alloc_descs(irq, from, cnt, node) \
932 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
933
934#define irq_alloc_desc(node) \
935 irq_alloc_descs(-1, 1, 1, node)
936
937#define irq_alloc_desc_at(at, node) \
938 irq_alloc_descs(at, at, 1, node)
939
940#define irq_alloc_desc_from(from, node) \
941 irq_alloc_descs(-1, from, 1, node)
942
943#define irq_alloc_descs_from(from, cnt, node) \
944 irq_alloc_descs(-1, from, cnt, node)
945
946#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
947 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
948
949#define devm_irq_alloc_desc(dev, node) \
950 devm_irq_alloc_descs(dev, -1, 1, 1, node)
951
952#define devm_irq_alloc_desc_at(dev, at, node) \
953 devm_irq_alloc_descs(dev, at, at, 1, node)
954
955#define devm_irq_alloc_desc_from(dev, from, node) \
956 devm_irq_alloc_descs(dev, -1, from, 1, node)
957
958#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
959 devm_irq_alloc_descs(dev, -1, from, cnt, node)
960
961void irq_free_descs(unsigned int irq, unsigned int cnt);
962static inline void irq_free_desc(unsigned int irq)
963{
964 irq_free_descs(irq, 1);
965}
966
967#ifdef CONFIG_GENERIC_IRQ_LEGACY
968void irq_init_desc(unsigned int irq);
969#endif
970
971
972
973
974
975
976
977
978
979
980
981struct irq_chip_regs {
982 unsigned long enable;
983 unsigned long disable;
984 unsigned long mask;
985 unsigned long ack;
986 unsigned long eoi;
987 unsigned long type;
988 unsigned long polarity;
989};
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004struct irq_chip_type {
1005 struct irq_chip chip;
1006 struct irq_chip_regs regs;
1007 irq_flow_handler_t handler;
1008 u32 type;
1009 u32 mask_cache_priv;
1010 u32 *mask_cache;
1011};
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046struct irq_chip_generic {
1047 raw_spinlock_t lock;
1048 void __iomem *reg_base;
1049 u32 (*reg_readl)(void __iomem *addr);
1050 void (*reg_writel)(u32 val, void __iomem *addr);
1051 void (*suspend)(struct irq_chip_generic *gc);
1052 void (*resume)(struct irq_chip_generic *gc);
1053 unsigned int irq_base;
1054 unsigned int irq_cnt;
1055 u32 mask_cache;
1056 u32 type_cache;
1057 u32 polarity_cache;
1058 u32 wake_enabled;
1059 u32 wake_active;
1060 unsigned int num_ct;
1061 void *private;
1062 unsigned long installed;
1063 unsigned long unused;
1064 struct irq_domain *domain;
1065 struct list_head list;
1066 struct irq_chip_type chip_types[];
1067};
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079enum irq_gc_flags {
1080 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
1081 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
1082 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
1083 IRQ_GC_NO_MASK = 1 << 3,
1084 IRQ_GC_BE_IO = 1 << 4,
1085};
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096struct irq_domain_chip_generic {
1097 unsigned int irqs_per_chip;
1098 unsigned int num_chips;
1099 unsigned int irq_flags_to_clear;
1100 unsigned int irq_flags_to_set;
1101 enum irq_gc_flags gc_flags;
1102 struct irq_chip_generic *gc[];
1103};
1104
1105#ifdef CONFIG_IRQCHIP_XILINX_INTC_MODULE_SUPPORT_EXPERIMENTAL
1106
1107
1108
1109
1110
1111
1112struct irqc_init_remove_funps {
1113 int (*irqchip_initp)(struct device_node *irqc,
1114 struct device_node *parent);
1115 int (*irqchip_removep)(struct device_node *irqc,
1116 struct device_node *parent);
1117};
1118#endif
1119
1120
1121void irq_gc_noop(struct irq_data *d);
1122void irq_gc_mask_disable_reg(struct irq_data *d);
1123void irq_gc_mask_set_bit(struct irq_data *d);
1124void irq_gc_mask_clr_bit(struct irq_data *d);
1125void irq_gc_unmask_enable_reg(struct irq_data *d);
1126void irq_gc_ack_set_bit(struct irq_data *d);
1127void irq_gc_ack_clr_bit(struct irq_data *d);
1128void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1129void irq_gc_eoi(struct irq_data *d);
1130int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1131
1132
1133int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1134 irq_hw_number_t hw_irq);
1135struct irq_chip_generic *
1136irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1137 void __iomem *reg_base, irq_flow_handler_t handler);
1138void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1139 enum irq_gc_flags flags, unsigned int clr,
1140 unsigned int set);
1141int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1142void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1143 unsigned int clr, unsigned int set);
1144
1145struct irq_chip_generic *
1146devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1147 unsigned int irq_base, void __iomem *reg_base,
1148 irq_flow_handler_t handler);
1149int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1150 u32 msk, enum irq_gc_flags flags,
1151 unsigned int clr, unsigned int set);
1152
1153struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1154
1155int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1156 int num_ct, const char *name,
1157 irq_flow_handler_t handler,
1158 unsigned int clr, unsigned int set,
1159 enum irq_gc_flags flags);
1160
1161#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
1162 handler, clr, set, flags) \
1163({ \
1164 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
1165 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1166 handler, clr, set, flags); \
1167})
1168
1169static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1170{
1171 kfree(gc);
1172}
1173
1174static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1175 u32 msk, unsigned int clr,
1176 unsigned int set)
1177{
1178 irq_remove_generic_chip(gc, msk, clr, set);
1179 irq_free_generic_chip(gc);
1180}
1181
1182static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1183{
1184 return container_of(d->chip, struct irq_chip_type, chip);
1185}
1186
1187#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1188
1189#ifdef CONFIG_SMP
1190static inline void irq_gc_lock(struct irq_chip_generic *gc)
1191{
1192 raw_spin_lock(&gc->lock);
1193}
1194
1195static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1196{
1197 raw_spin_unlock(&gc->lock);
1198}
1199#else
1200static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1201static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1202#endif
1203
1204
1205
1206
1207
1208#define irq_gc_lock_irqsave(gc, flags) \
1209 raw_spin_lock_irqsave(&(gc)->lock, flags)
1210
1211#define irq_gc_unlock_irqrestore(gc, flags) \
1212 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1213
1214static inline void irq_reg_writel(struct irq_chip_generic *gc,
1215 u32 val, int reg_offset)
1216{
1217 if (gc->reg_writel)
1218 gc->reg_writel(val, gc->reg_base + reg_offset);
1219 else
1220 writel(val, gc->reg_base + reg_offset);
1221}
1222
1223static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1224 int reg_offset)
1225{
1226 if (gc->reg_readl)
1227 return gc->reg_readl(gc->reg_base + reg_offset);
1228 else
1229 return readl(gc->reg_base + reg_offset);
1230}
1231
1232struct irq_matrix;
1233struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1234 unsigned int alloc_start,
1235 unsigned int alloc_end);
1236void irq_matrix_online(struct irq_matrix *m);
1237void irq_matrix_offline(struct irq_matrix *m);
1238void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1239int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1240void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1241int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1242 unsigned int *mapped_cpu);
1243void irq_matrix_reserve(struct irq_matrix *m);
1244void irq_matrix_remove_reserved(struct irq_matrix *m);
1245int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1246 bool reserved, unsigned int *mapped_cpu);
1247void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1248 unsigned int bit, bool managed);
1249void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1250unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1251unsigned int irq_matrix_allocated(struct irq_matrix *m);
1252unsigned int irq_matrix_reserved(struct irq_matrix *m);
1253void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1254
1255
1256#define INVALID_HWIRQ (~0UL)
1257irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1258int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1259int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1260int ipi_send_single(unsigned int virq, unsigned int cpu);
1261int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1262
1263#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1264
1265
1266
1267
1268
1269
1270
1271
1272#ifndef CONFIG_IRQCHIP_XILINX_INTC_MODULE_SUPPORT_EXPERIMENTAL
1273int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
1274#else
1275int set_handle_irq(void (*handle_irq)(struct pt_regs *));
1276#endif
1277
1278
1279
1280
1281
1282extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
1283#else
1284#ifndef set_handle_irq
1285#define set_handle_irq(handle_irq) \
1286 do { \
1287 (void)handle_irq; \
1288 WARN_ON(1); \
1289 } while (0)
1290#endif
1291#endif
1292
1293#endif
1294