1
2#ifndef _LINUX_IRQ_H
3#define _LINUX_IRQ_H
4
5
6
7
8
9
10
11
12
13#include <linux/cache.h>
14#include <linux/spinlock.h>
15#include <linux/cpumask.h>
16#include <linux/irqhandler.h>
17#include <linux/irqreturn.h>
18#include <linux/irqnr.h>
19#include <linux/topology.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22
23#include <asm/irq.h>
24#include <asm/ptrace.h>
25#include <asm/irq_regs.h>
26
27struct seq_file;
28struct module;
29struct msi_msg;
30enum irqchip_irq_state;
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74enum {
75 IRQ_TYPE_NONE = 0x00000000,
76 IRQ_TYPE_EDGE_RISING = 0x00000001,
77 IRQ_TYPE_EDGE_FALLING = 0x00000002,
78 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
79 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
80 IRQ_TYPE_LEVEL_LOW = 0x00000008,
81 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
82 IRQ_TYPE_SENSE_MASK = 0x0000000f,
83 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
84
85 IRQ_TYPE_PROBE = 0x00000010,
86
87 IRQ_LEVEL = (1 << 8),
88 IRQ_PER_CPU = (1 << 9),
89 IRQ_NOPROBE = (1 << 10),
90 IRQ_NOREQUEST = (1 << 11),
91 IRQ_NOAUTOEN = (1 << 12),
92 IRQ_NO_BALANCING = (1 << 13),
93 IRQ_MOVE_PCNTXT = (1 << 14),
94 IRQ_NESTED_THREAD = (1 << 15),
95 IRQ_NOTHREAD = (1 << 16),
96 IRQ_PER_CPU_DEVID = (1 << 17),
97 IRQ_IS_POLLED = (1 << 18),
98 IRQ_DISABLE_UNLAZY = (1 << 19),
99};
100
101#define IRQF_MODIFY_MASK \
102 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
103 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
104 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
105 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
106
107#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
108
109
110
111
112
113
114
115
116
117
118enum {
119 IRQ_SET_MASK_OK = 0,
120 IRQ_SET_MASK_OK_NOCOPY,
121 IRQ_SET_MASK_OK_DONE,
122};
123
124struct msi_desc;
125struct irq_domain;
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142struct irq_common_data {
143 unsigned int __private state_use_accessors;
144#ifdef CONFIG_NUMA
145 unsigned int node;
146#endif
147 void *handler_data;
148 struct msi_desc *msi_desc;
149 cpumask_var_t affinity;
150#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
151 cpumask_var_t effective_affinity;
152#endif
153#ifdef CONFIG_GENERIC_IRQ_IPI
154 unsigned int ipi_offset;
155#endif
156};
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172struct irq_data {
173 u32 mask;
174 unsigned int irq;
175 unsigned long hwirq;
176 struct irq_common_data *common;
177 struct irq_chip *chip;
178 struct irq_domain *domain;
179#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
180 struct irq_data *parent_data;
181#endif
182 void *chip_data;
183};
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212enum {
213 IRQD_TRIGGER_MASK = 0xf,
214 IRQD_SETAFFINITY_PENDING = (1 << 8),
215 IRQD_ACTIVATED = (1 << 9),
216 IRQD_NO_BALANCING = (1 << 10),
217 IRQD_PER_CPU = (1 << 11),
218 IRQD_AFFINITY_SET = (1 << 12),
219 IRQD_LEVEL = (1 << 13),
220 IRQD_WAKEUP_STATE = (1 << 14),
221 IRQD_MOVE_PCNTXT = (1 << 15),
222 IRQD_IRQ_DISABLED = (1 << 16),
223 IRQD_IRQ_MASKED = (1 << 17),
224 IRQD_IRQ_INPROGRESS = (1 << 18),
225 IRQD_WAKEUP_ARMED = (1 << 19),
226 IRQD_FORWARDED_TO_VCPU = (1 << 20),
227 IRQD_AFFINITY_MANAGED = (1 << 21),
228 IRQD_IRQ_STARTED = (1 << 22),
229 IRQD_MANAGED_SHUTDOWN = (1 << 23),
230 IRQD_SINGLE_TARGET = (1 << 24),
231 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
232 IRQD_CAN_RESERVE = (1 << 26),
233};
234
235#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
236
237static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
238{
239 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
240}
241
242static inline bool irqd_is_per_cpu(struct irq_data *d)
243{
244 return __irqd_to_state(d) & IRQD_PER_CPU;
245}
246
247static inline bool irqd_can_balance(struct irq_data *d)
248{
249 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
250}
251
252static inline bool irqd_affinity_was_set(struct irq_data *d)
253{
254 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
255}
256
257static inline void irqd_mark_affinity_was_set(struct irq_data *d)
258{
259 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
260}
261
262static inline bool irqd_trigger_type_was_set(struct irq_data *d)
263{
264 return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
265}
266
267static inline u32 irqd_get_trigger_type(struct irq_data *d)
268{
269 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
270}
271
272
273
274
275
276static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
277{
278 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
279 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
280 __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
281}
282
283static inline bool irqd_is_level_type(struct irq_data *d)
284{
285 return __irqd_to_state(d) & IRQD_LEVEL;
286}
287
288
289
290
291
292static inline void irqd_set_single_target(struct irq_data *d)
293{
294 __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
295}
296
297static inline bool irqd_is_single_target(struct irq_data *d)
298{
299 return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
300}
301
302static inline bool irqd_is_wakeup_set(struct irq_data *d)
303{
304 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
305}
306
307static inline bool irqd_can_move_in_process_context(struct irq_data *d)
308{
309 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
310}
311
312static inline bool irqd_irq_disabled(struct irq_data *d)
313{
314 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
315}
316
317static inline bool irqd_irq_masked(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
320}
321
322static inline bool irqd_irq_inprogress(struct irq_data *d)
323{
324 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
325}
326
327static inline bool irqd_is_wakeup_armed(struct irq_data *d)
328{
329 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
330}
331
332static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
333{
334 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
335}
336
337static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
338{
339 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
340}
341
342static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
343{
344 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
345}
346
347static inline bool irqd_affinity_is_managed(struct irq_data *d)
348{
349 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
350}
351
352static inline bool irqd_is_activated(struct irq_data *d)
353{
354 return __irqd_to_state(d) & IRQD_ACTIVATED;
355}
356
357static inline void irqd_set_activated(struct irq_data *d)
358{
359 __irqd_to_state(d) |= IRQD_ACTIVATED;
360}
361
362static inline void irqd_clr_activated(struct irq_data *d)
363{
364 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
365}
366
367static inline bool irqd_is_started(struct irq_data *d)
368{
369 return __irqd_to_state(d) & IRQD_IRQ_STARTED;
370}
371
372static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
373{
374 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
375}
376
377static inline void irqd_set_can_reserve(struct irq_data *d)
378{
379 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
380}
381
382static inline void irqd_clr_can_reserve(struct irq_data *d)
383{
384 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
385}
386
387static inline bool irqd_can_reserve(struct irq_data *d)
388{
389 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
390}
391
392#undef __irqd_to_state
393
394static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
395{
396 return d->hwirq;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446struct irq_chip {
447 struct device *parent_device;
448 const char *name;
449 unsigned int (*irq_startup)(struct irq_data *data);
450 void (*irq_shutdown)(struct irq_data *data);
451 void (*irq_enable)(struct irq_data *data);
452 void (*irq_disable)(struct irq_data *data);
453
454 void (*irq_ack)(struct irq_data *data);
455 void (*irq_mask)(struct irq_data *data);
456 void (*irq_mask_ack)(struct irq_data *data);
457 void (*irq_unmask)(struct irq_data *data);
458 void (*irq_eoi)(struct irq_data *data);
459
460 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
461 int (*irq_retrigger)(struct irq_data *data);
462 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
463 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
464
465 void (*irq_bus_lock)(struct irq_data *data);
466 void (*irq_bus_sync_unlock)(struct irq_data *data);
467
468 void (*irq_cpu_online)(struct irq_data *data);
469 void (*irq_cpu_offline)(struct irq_data *data);
470
471 void (*irq_suspend)(struct irq_data *data);
472 void (*irq_resume)(struct irq_data *data);
473 void (*irq_pm_shutdown)(struct irq_data *data);
474
475 void (*irq_calc_mask)(struct irq_data *data);
476
477 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
478 int (*irq_request_resources)(struct irq_data *data);
479 void (*irq_release_resources)(struct irq_data *data);
480
481 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
482 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
483
484 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
485 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
486
487 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
488
489 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
490 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
491
492 unsigned long flags;
493};
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508enum {
509 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
510 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
511 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
512 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
513 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
514 IRQCHIP_ONESHOT_SAFE = (1 << 5),
515 IRQCHIP_EOI_THREADED = (1 << 6),
516 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
517};
518
519#include <linux/irqdesc.h>
520
521
522
523
524#include <asm/hw_irq.h>
525
526#ifndef NR_IRQS_LEGACY
527# define NR_IRQS_LEGACY 0
528#endif
529
530#ifndef ARCH_IRQ_INIT_FLAGS
531# define ARCH_IRQ_INIT_FLAGS 0
532#endif
533
534#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
535
536struct irqaction;
537extern int setup_irq(unsigned int irq, struct irqaction *new);
538extern void remove_irq(unsigned int irq, struct irqaction *act);
539extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
540extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
541
542extern void irq_cpu_online(void);
543extern void irq_cpu_offline(void);
544extern int irq_set_affinity_locked(struct irq_data *data,
545 const struct cpumask *cpumask, bool force);
546extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
547
548#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
549extern void irq_migrate_all_off_this_cpu(void);
550extern int irq_affinity_online_cpu(unsigned int cpu);
551#else
552# define irq_affinity_online_cpu NULL
553#endif
554
555#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
556void __irq_move_irq(struct irq_data *data);
557static inline void irq_move_irq(struct irq_data *data)
558{
559 if (unlikely(irqd_is_setaffinity_pending(data)))
560 __irq_move_irq(data);
561}
562void irq_move_masked_irq(struct irq_data *data);
563void irq_force_complete_move(struct irq_desc *desc);
564#else
565static inline void irq_move_irq(struct irq_data *data) { }
566static inline void irq_move_masked_irq(struct irq_data *data) { }
567static inline void irq_force_complete_move(struct irq_desc *desc) { }
568#endif
569
570extern int no_irq_affinity;
571
572#ifdef CONFIG_HARDIRQS_SW_RESEND
573int irq_set_parent(int irq, int parent_irq);
574#else
575static inline int irq_set_parent(int irq, int parent_irq)
576{
577 return 0;
578}
579#endif
580
581
582
583
584
585extern void handle_level_irq(struct irq_desc *desc);
586extern void handle_fasteoi_irq(struct irq_desc *desc);
587extern void handle_edge_irq(struct irq_desc *desc);
588extern void handle_edge_eoi_irq(struct irq_desc *desc);
589extern void handle_simple_irq(struct irq_desc *desc);
590extern void handle_untracked_irq(struct irq_desc *desc);
591extern void handle_percpu_irq(struct irq_desc *desc);
592extern void handle_percpu_devid_irq(struct irq_desc *desc);
593extern void handle_bad_irq(struct irq_desc *desc);
594extern void handle_nested_irq(unsigned int irq);
595
596extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
597extern int irq_chip_pm_get(struct irq_data *data);
598extern int irq_chip_pm_put(struct irq_data *data);
599#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
600extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
601extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
602extern void irq_chip_enable_parent(struct irq_data *data);
603extern void irq_chip_disable_parent(struct irq_data *data);
604extern void irq_chip_ack_parent(struct irq_data *data);
605extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
606extern void irq_chip_mask_parent(struct irq_data *data);
607extern void irq_chip_unmask_parent(struct irq_data *data);
608extern void irq_chip_eoi_parent(struct irq_data *data);
609extern int irq_chip_set_affinity_parent(struct irq_data *data,
610 const struct cpumask *dest,
611 bool force);
612extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
613extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
614 void *vcpu_info);
615extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
616#endif
617
618
619extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
620
621
622
623extern int noirqdebug_setup(char *str);
624
625
626extern int can_request_irq(unsigned int irq, unsigned long irqflags);
627
628
629extern struct irq_chip no_irq_chip;
630extern struct irq_chip dummy_irq_chip;
631
632extern void
633irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
634 irq_flow_handler_t handle, const char *name);
635
636static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
637 irq_flow_handler_t handle)
638{
639 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
640}
641
642extern int irq_set_percpu_devid(unsigned int irq);
643extern int irq_set_percpu_devid_partition(unsigned int irq,
644 const struct cpumask *affinity);
645extern int irq_get_percpu_devid_partition(unsigned int irq,
646 struct cpumask *affinity);
647
648extern void
649__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
650 const char *name);
651
652static inline void
653irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
654{
655 __irq_set_handler(irq, handle, 0, NULL);
656}
657
658
659
660
661
662
663static inline void
664irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
665{
666 __irq_set_handler(irq, handle, 1, NULL);
667}
668
669
670
671
672
673
674void
675irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
676 void *data);
677
678void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
679
680static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
681{
682 irq_modify_status(irq, 0, set);
683}
684
685static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
686{
687 irq_modify_status(irq, clr, 0);
688}
689
690static inline void irq_set_noprobe(unsigned int irq)
691{
692 irq_modify_status(irq, 0, IRQ_NOPROBE);
693}
694
695static inline void irq_set_probe(unsigned int irq)
696{
697 irq_modify_status(irq, IRQ_NOPROBE, 0);
698}
699
700static inline void irq_set_nothread(unsigned int irq)
701{
702 irq_modify_status(irq, 0, IRQ_NOTHREAD);
703}
704
705static inline void irq_set_thread(unsigned int irq)
706{
707 irq_modify_status(irq, IRQ_NOTHREAD, 0);
708}
709
710static inline void irq_set_nested_thread(unsigned int irq, bool nest)
711{
712 if (nest)
713 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
714 else
715 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
716}
717
718static inline void irq_set_percpu_devid_flags(unsigned int irq)
719{
720 irq_set_status_flags(irq,
721 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
722 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
723}
724
725
726extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
727extern int irq_set_handler_data(unsigned int irq, void *data);
728extern int irq_set_chip_data(unsigned int irq, void *data);
729extern int irq_set_irq_type(unsigned int irq, unsigned int type);
730extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
731extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
732 struct msi_desc *entry);
733extern struct irq_data *irq_get_irq_data(unsigned int irq);
734
735static inline struct irq_chip *irq_get_chip(unsigned int irq)
736{
737 struct irq_data *d = irq_get_irq_data(irq);
738 return d ? d->chip : NULL;
739}
740
741static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
742{
743 return d->chip;
744}
745
746static inline void *irq_get_chip_data(unsigned int irq)
747{
748 struct irq_data *d = irq_get_irq_data(irq);
749 return d ? d->chip_data : NULL;
750}
751
752static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
753{
754 return d->chip_data;
755}
756
757static inline void *irq_get_handler_data(unsigned int irq)
758{
759 struct irq_data *d = irq_get_irq_data(irq);
760 return d ? d->common->handler_data : NULL;
761}
762
763static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
764{
765 return d->common->handler_data;
766}
767
768static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
769{
770 struct irq_data *d = irq_get_irq_data(irq);
771 return d ? d->common->msi_desc : NULL;
772}
773
774static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
775{
776 return d->common->msi_desc;
777}
778
779static inline u32 irq_get_trigger_type(unsigned int irq)
780{
781 struct irq_data *d = irq_get_irq_data(irq);
782 return d ? irqd_get_trigger_type(d) : 0;
783}
784
785static inline int irq_common_data_get_node(struct irq_common_data *d)
786{
787#ifdef CONFIG_NUMA
788 return d->node;
789#else
790 return 0;
791#endif
792}
793
794static inline int irq_data_get_node(struct irq_data *d)
795{
796 return irq_common_data_get_node(d->common);
797}
798
799static inline struct cpumask *irq_get_affinity_mask(int irq)
800{
801 struct irq_data *d = irq_get_irq_data(irq);
802
803 return d ? d->common->affinity : NULL;
804}
805
806static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
807{
808 return d->common->affinity;
809}
810
811#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
812static inline
813struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
814{
815 return d->common->effective_affinity;
816}
817static inline void irq_data_update_effective_affinity(struct irq_data *d,
818 const struct cpumask *m)
819{
820 cpumask_copy(d->common->effective_affinity, m);
821}
822#else
823static inline void irq_data_update_effective_affinity(struct irq_data *d,
824 const struct cpumask *m)
825{
826}
827static inline
828struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
829{
830 return d->common->affinity;
831}
832#endif
833
834unsigned int arch_dynirq_lower_bound(unsigned int from);
835
836int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
837 struct module *owner, const struct cpumask *affinity);
838
839int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
840 unsigned int cnt, int node, struct module *owner,
841 const struct cpumask *affinity);
842
843
844#define irq_alloc_descs(irq, from, cnt, node) \
845 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
846
847#define irq_alloc_desc(node) \
848 irq_alloc_descs(-1, 0, 1, node)
849
850#define irq_alloc_desc_at(at, node) \
851 irq_alloc_descs(at, at, 1, node)
852
853#define irq_alloc_desc_from(from, node) \
854 irq_alloc_descs(-1, from, 1, node)
855
856#define irq_alloc_descs_from(from, cnt, node) \
857 irq_alloc_descs(-1, from, cnt, node)
858
859#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
860 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
861
862#define devm_irq_alloc_desc(dev, node) \
863 devm_irq_alloc_descs(dev, -1, 0, 1, node)
864
865#define devm_irq_alloc_desc_at(dev, at, node) \
866 devm_irq_alloc_descs(dev, at, at, 1, node)
867
868#define devm_irq_alloc_desc_from(dev, from, node) \
869 devm_irq_alloc_descs(dev, -1, from, 1, node)
870
871#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
872 devm_irq_alloc_descs(dev, -1, from, cnt, node)
873
874void irq_free_descs(unsigned int irq, unsigned int cnt);
875static inline void irq_free_desc(unsigned int irq)
876{
877 irq_free_descs(irq, 1);
878}
879
880#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
881unsigned int irq_alloc_hwirqs(int cnt, int node);
882static inline unsigned int irq_alloc_hwirq(int node)
883{
884 return irq_alloc_hwirqs(1, node);
885}
886void irq_free_hwirqs(unsigned int from, int cnt);
887static inline void irq_free_hwirq(unsigned int irq)
888{
889 return irq_free_hwirqs(irq, 1);
890}
891int arch_setup_hwirq(unsigned int irq, int node);
892void arch_teardown_hwirq(unsigned int irq);
893#endif
894
895#ifdef CONFIG_GENERIC_IRQ_LEGACY
896void irq_init_desc(unsigned int irq);
897#endif
898
899
900
901
902
903
904
905
906
907
908
909struct irq_chip_regs {
910 unsigned long enable;
911 unsigned long disable;
912 unsigned long mask;
913 unsigned long ack;
914 unsigned long eoi;
915 unsigned long type;
916 unsigned long polarity;
917};
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932struct irq_chip_type {
933 struct irq_chip chip;
934 struct irq_chip_regs regs;
935 irq_flow_handler_t handler;
936 u32 type;
937 u32 mask_cache_priv;
938 u32 *mask_cache;
939};
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974struct irq_chip_generic {
975 raw_spinlock_t lock;
976 void __iomem *reg_base;
977 u32 (*reg_readl)(void __iomem *addr);
978 void (*reg_writel)(u32 val, void __iomem *addr);
979 void (*suspend)(struct irq_chip_generic *gc);
980 void (*resume)(struct irq_chip_generic *gc);
981 unsigned int irq_base;
982 unsigned int irq_cnt;
983 u32 mask_cache;
984 u32 type_cache;
985 u32 polarity_cache;
986 u32 wake_enabled;
987 u32 wake_active;
988 unsigned int num_ct;
989 void *private;
990 unsigned long installed;
991 unsigned long unused;
992 struct irq_domain *domain;
993 struct list_head list;
994 struct irq_chip_type chip_types[0];
995};
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007enum irq_gc_flags {
1008 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
1009 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
1010 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
1011 IRQ_GC_NO_MASK = 1 << 3,
1012 IRQ_GC_BE_IO = 1 << 4,
1013};
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024struct irq_domain_chip_generic {
1025 unsigned int irqs_per_chip;
1026 unsigned int num_chips;
1027 unsigned int irq_flags_to_clear;
1028 unsigned int irq_flags_to_set;
1029 enum irq_gc_flags gc_flags;
1030 struct irq_chip_generic *gc[0];
1031};
1032
1033
1034void irq_gc_noop(struct irq_data *d);
1035void irq_gc_mask_disable_reg(struct irq_data *d);
1036void irq_gc_mask_set_bit(struct irq_data *d);
1037void irq_gc_mask_clr_bit(struct irq_data *d);
1038void irq_gc_unmask_enable_reg(struct irq_data *d);
1039void irq_gc_ack_set_bit(struct irq_data *d);
1040void irq_gc_ack_clr_bit(struct irq_data *d);
1041void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1042void irq_gc_eoi(struct irq_data *d);
1043int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1044
1045
1046int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1047 irq_hw_number_t hw_irq);
1048struct irq_chip_generic *
1049irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1050 void __iomem *reg_base, irq_flow_handler_t handler);
1051void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1052 enum irq_gc_flags flags, unsigned int clr,
1053 unsigned int set);
1054int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1055void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1056 unsigned int clr, unsigned int set);
1057
1058struct irq_chip_generic *
1059devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1060 unsigned int irq_base, void __iomem *reg_base,
1061 irq_flow_handler_t handler);
1062int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1063 u32 msk, enum irq_gc_flags flags,
1064 unsigned int clr, unsigned int set);
1065
1066struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1067
1068int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1069 int num_ct, const char *name,
1070 irq_flow_handler_t handler,
1071 unsigned int clr, unsigned int set,
1072 enum irq_gc_flags flags);
1073
1074#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
1075 handler, clr, set, flags) \
1076({ \
1077 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
1078 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1079 handler, clr, set, flags); \
1080})
1081
1082static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1083{
1084 kfree(gc);
1085}
1086
1087static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1088 u32 msk, unsigned int clr,
1089 unsigned int set)
1090{
1091 irq_remove_generic_chip(gc, msk, clr, set);
1092 irq_free_generic_chip(gc);
1093}
1094
1095static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1096{
1097 return container_of(d->chip, struct irq_chip_type, chip);
1098}
1099
1100#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1101
1102#ifdef CONFIG_SMP
1103static inline void irq_gc_lock(struct irq_chip_generic *gc)
1104{
1105 raw_spin_lock(&gc->lock);
1106}
1107
1108static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1109{
1110 raw_spin_unlock(&gc->lock);
1111}
1112#else
1113static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1114static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1115#endif
1116
1117
1118
1119
1120
1121#define irq_gc_lock_irqsave(gc, flags) \
1122 raw_spin_lock_irqsave(&(gc)->lock, flags)
1123
1124#define irq_gc_unlock_irqrestore(gc, flags) \
1125 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1126
1127static inline void irq_reg_writel(struct irq_chip_generic *gc,
1128 u32 val, int reg_offset)
1129{
1130 if (gc->reg_writel)
1131 gc->reg_writel(val, gc->reg_base + reg_offset);
1132 else
1133 writel(val, gc->reg_base + reg_offset);
1134}
1135
1136static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1137 int reg_offset)
1138{
1139 if (gc->reg_readl)
1140 return gc->reg_readl(gc->reg_base + reg_offset);
1141 else
1142 return readl(gc->reg_base + reg_offset);
1143}
1144
1145struct irq_matrix;
1146struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1147 unsigned int alloc_start,
1148 unsigned int alloc_end);
1149void irq_matrix_online(struct irq_matrix *m);
1150void irq_matrix_offline(struct irq_matrix *m);
1151void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1152int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1153void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1154int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
1155void irq_matrix_reserve(struct irq_matrix *m);
1156void irq_matrix_remove_reserved(struct irq_matrix *m);
1157int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1158 bool reserved, unsigned int *mapped_cpu);
1159void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1160 unsigned int bit, bool managed);
1161void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1162unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1163unsigned int irq_matrix_allocated(struct irq_matrix *m);
1164unsigned int irq_matrix_reserved(struct irq_matrix *m);
1165void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1166
1167
1168#define INVALID_HWIRQ (~0UL)
1169irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1170int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1171int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1172int ipi_send_single(unsigned int virq, unsigned int cpu);
1173int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1174
1175#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1176
1177
1178
1179
1180
1181
1182
1183
1184int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
1185
1186
1187
1188
1189
1190extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
1191#endif
1192
1193#endif
1194