1
2#ifndef _LINUX_IRQ_H
3#define _LINUX_IRQ_H
4
5
6
7
8
9
10
11
12
13#include <linux/smp.h>
14#include <linux/linkage.h>
15#include <linux/cache.h>
16#include <linux/spinlock.h>
17#include <linux/cpumask.h>
18#include <linux/gfp.h>
19#include <linux/irqhandler.h>
20#include <linux/irqreturn.h>
21#include <linux/irqnr.h>
22#include <linux/errno.h>
23#include <linux/topology.h>
24#include <linux/wait.h>
25#include <linux/io.h>
26#include <linux/slab.h>
27
28#include <asm/irq.h>
29#include <asm/ptrace.h>
30#include <asm/irq_regs.h>
31
32struct seq_file;
33struct module;
34struct msi_msg;
35enum irqchip_irq_state;
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79enum {
80 IRQ_TYPE_NONE = 0x00000000,
81 IRQ_TYPE_EDGE_RISING = 0x00000001,
82 IRQ_TYPE_EDGE_FALLING = 0x00000002,
83 IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
84 IRQ_TYPE_LEVEL_HIGH = 0x00000004,
85 IRQ_TYPE_LEVEL_LOW = 0x00000008,
86 IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
87 IRQ_TYPE_SENSE_MASK = 0x0000000f,
88 IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
89
90 IRQ_TYPE_PROBE = 0x00000010,
91
92 IRQ_LEVEL = (1 << 8),
93 IRQ_PER_CPU = (1 << 9),
94 IRQ_NOPROBE = (1 << 10),
95 IRQ_NOREQUEST = (1 << 11),
96 IRQ_NOAUTOEN = (1 << 12),
97 IRQ_NO_BALANCING = (1 << 13),
98 IRQ_MOVE_PCNTXT = (1 << 14),
99 IRQ_NESTED_THREAD = (1 << 15),
100 IRQ_NOTHREAD = (1 << 16),
101 IRQ_PER_CPU_DEVID = (1 << 17),
102 IRQ_IS_POLLED = (1 << 18),
103 IRQ_DISABLE_UNLAZY = (1 << 19),
104};
105
106#define IRQF_MODIFY_MASK \
107 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
108 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
109 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
110 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
111
112#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
113
114
115
116
117
118
119
120
121
122
123enum {
124 IRQ_SET_MASK_OK = 0,
125 IRQ_SET_MASK_OK_NOCOPY,
126 IRQ_SET_MASK_OK_DONE,
127};
128
129struct msi_desc;
130struct irq_domain;
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct irq_common_data {
148 unsigned int __private state_use_accessors;
149#ifdef CONFIG_NUMA
150 unsigned int node;
151#endif
152 void *handler_data;
153 struct msi_desc *msi_desc;
154 cpumask_var_t affinity;
155#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
156 cpumask_var_t effective_affinity;
157#endif
158#ifdef CONFIG_GENERIC_IRQ_IPI
159 unsigned int ipi_offset;
160#endif
161};
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177struct irq_data {
178 u32 mask;
179 unsigned int irq;
180 unsigned long hwirq;
181 struct irq_common_data *common;
182 struct irq_chip *chip;
183 struct irq_domain *domain;
184#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
185 struct irq_data *parent_data;
186#endif
187 void *chip_data;
188};
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217enum {
218 IRQD_TRIGGER_MASK = 0xf,
219 IRQD_SETAFFINITY_PENDING = (1 << 8),
220 IRQD_ACTIVATED = (1 << 9),
221 IRQD_NO_BALANCING = (1 << 10),
222 IRQD_PER_CPU = (1 << 11),
223 IRQD_AFFINITY_SET = (1 << 12),
224 IRQD_LEVEL = (1 << 13),
225 IRQD_WAKEUP_STATE = (1 << 14),
226 IRQD_MOVE_PCNTXT = (1 << 15),
227 IRQD_IRQ_DISABLED = (1 << 16),
228 IRQD_IRQ_MASKED = (1 << 17),
229 IRQD_IRQ_INPROGRESS = (1 << 18),
230 IRQD_WAKEUP_ARMED = (1 << 19),
231 IRQD_FORWARDED_TO_VCPU = (1 << 20),
232 IRQD_AFFINITY_MANAGED = (1 << 21),
233 IRQD_IRQ_STARTED = (1 << 22),
234 IRQD_MANAGED_SHUTDOWN = (1 << 23),
235 IRQD_SINGLE_TARGET = (1 << 24),
236 IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
237 IRQD_CAN_RESERVE = (1 << 26),
238};
239
240#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
241
242static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
243{
244 return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
245}
246
247static inline bool irqd_is_per_cpu(struct irq_data *d)
248{
249 return __irqd_to_state(d) & IRQD_PER_CPU;
250}
251
252static inline bool irqd_can_balance(struct irq_data *d)
253{
254 return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING));
255}
256
257static inline bool irqd_affinity_was_set(struct irq_data *d)
258{
259 return __irqd_to_state(d) & IRQD_AFFINITY_SET;
260}
261
262static inline void irqd_mark_affinity_was_set(struct irq_data *d)
263{
264 __irqd_to_state(d) |= IRQD_AFFINITY_SET;
265}
266
267static inline bool irqd_trigger_type_was_set(struct irq_data *d)
268{
269 return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
270}
271
272static inline u32 irqd_get_trigger_type(struct irq_data *d)
273{
274 return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
275}
276
277
278
279
280
281static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
282{
283 __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
284 __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
285 __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
286}
287
288static inline bool irqd_is_level_type(struct irq_data *d)
289{
290 return __irqd_to_state(d) & IRQD_LEVEL;
291}
292
293
294
295
296
297static inline void irqd_set_single_target(struct irq_data *d)
298{
299 __irqd_to_state(d) |= IRQD_SINGLE_TARGET;
300}
301
302static inline bool irqd_is_single_target(struct irq_data *d)
303{
304 return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
305}
306
307static inline bool irqd_is_wakeup_set(struct irq_data *d)
308{
309 return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
310}
311
312static inline bool irqd_can_move_in_process_context(struct irq_data *d)
313{
314 return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
315}
316
317static inline bool irqd_irq_disabled(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
320}
321
322static inline bool irqd_irq_masked(struct irq_data *d)
323{
324 return __irqd_to_state(d) & IRQD_IRQ_MASKED;
325}
326
327static inline bool irqd_irq_inprogress(struct irq_data *d)
328{
329 return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS;
330}
331
332static inline bool irqd_is_wakeup_armed(struct irq_data *d)
333{
334 return __irqd_to_state(d) & IRQD_WAKEUP_ARMED;
335}
336
337static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d)
338{
339 return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU;
340}
341
342static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d)
343{
344 __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU;
345}
346
347static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
348{
349 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
350}
351
352static inline bool irqd_affinity_is_managed(struct irq_data *d)
353{
354 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
355}
356
357static inline bool irqd_is_activated(struct irq_data *d)
358{
359 return __irqd_to_state(d) & IRQD_ACTIVATED;
360}
361
362static inline void irqd_set_activated(struct irq_data *d)
363{
364 __irqd_to_state(d) |= IRQD_ACTIVATED;
365}
366
367static inline void irqd_clr_activated(struct irq_data *d)
368{
369 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
370}
371
372static inline bool irqd_is_started(struct irq_data *d)
373{
374 return __irqd_to_state(d) & IRQD_IRQ_STARTED;
375}
376
377static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
378{
379 return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
380}
381
382static inline void irqd_set_can_reserve(struct irq_data *d)
383{
384 __irqd_to_state(d) |= IRQD_CAN_RESERVE;
385}
386
387static inline void irqd_clr_can_reserve(struct irq_data *d)
388{
389 __irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
390}
391
392static inline bool irqd_can_reserve(struct irq_data *d)
393{
394 return __irqd_to_state(d) & IRQD_CAN_RESERVE;
395}
396
397#undef __irqd_to_state
398
399static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
400{
401 return d->hwirq;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451struct irq_chip {
452 struct device *parent_device;
453 const char *name;
454 unsigned int (*irq_startup)(struct irq_data *data);
455 void (*irq_shutdown)(struct irq_data *data);
456 void (*irq_enable)(struct irq_data *data);
457 void (*irq_disable)(struct irq_data *data);
458
459 void (*irq_ack)(struct irq_data *data);
460 void (*irq_mask)(struct irq_data *data);
461 void (*irq_mask_ack)(struct irq_data *data);
462 void (*irq_unmask)(struct irq_data *data);
463 void (*irq_eoi)(struct irq_data *data);
464
465 int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
466 int (*irq_retrigger)(struct irq_data *data);
467 int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
468 int (*irq_set_wake)(struct irq_data *data, unsigned int on);
469
470 void (*irq_bus_lock)(struct irq_data *data);
471 void (*irq_bus_sync_unlock)(struct irq_data *data);
472
473 void (*irq_cpu_online)(struct irq_data *data);
474 void (*irq_cpu_offline)(struct irq_data *data);
475
476 void (*irq_suspend)(struct irq_data *data);
477 void (*irq_resume)(struct irq_data *data);
478 void (*irq_pm_shutdown)(struct irq_data *data);
479
480 void (*irq_calc_mask)(struct irq_data *data);
481
482 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
483 int (*irq_request_resources)(struct irq_data *data);
484 void (*irq_release_resources)(struct irq_data *data);
485
486 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
487 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
488
489 int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
490 int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
491
492 int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
493
494 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
495 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
496
497 unsigned long flags;
498};
499
500
501
502
503
504
505
506
507
508
509
510
511
512enum {
513 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
514 IRQCHIP_EOI_IF_HANDLED = (1 << 1),
515 IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
516 IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
517 IRQCHIP_SKIP_SET_WAKE = (1 << 4),
518 IRQCHIP_ONESHOT_SAFE = (1 << 5),
519 IRQCHIP_EOI_THREADED = (1 << 6),
520};
521
522#include <linux/irqdesc.h>
523
524
525
526
527#include <asm/hw_irq.h>
528
529#ifndef NR_IRQS_LEGACY
530# define NR_IRQS_LEGACY 0
531#endif
532
533#ifndef ARCH_IRQ_INIT_FLAGS
534# define ARCH_IRQ_INIT_FLAGS 0
535#endif
536
537#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
538
539struct irqaction;
540extern int setup_irq(unsigned int irq, struct irqaction *new);
541extern void remove_irq(unsigned int irq, struct irqaction *act);
542extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
543extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
544
545extern void irq_cpu_online(void);
546extern void irq_cpu_offline(void);
547extern int irq_set_affinity_locked(struct irq_data *data,
548 const struct cpumask *cpumask, bool force);
549extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
550
551#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION)
552extern void irq_migrate_all_off_this_cpu(void);
553extern int irq_affinity_online_cpu(unsigned int cpu);
554#else
555# define irq_affinity_online_cpu NULL
556#endif
557
558#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
559void irq_move_irq(struct irq_data *data);
560void irq_move_masked_irq(struct irq_data *data);
561void irq_force_complete_move(struct irq_desc *desc);
562#else
563static inline void irq_move_irq(struct irq_data *data) { }
564static inline void irq_move_masked_irq(struct irq_data *data) { }
565static inline void irq_force_complete_move(struct irq_desc *desc) { }
566#endif
567
568extern int no_irq_affinity;
569
570#ifdef CONFIG_HARDIRQS_SW_RESEND
571int irq_set_parent(int irq, int parent_irq);
572#else
573static inline int irq_set_parent(int irq, int parent_irq)
574{
575 return 0;
576}
577#endif
578
579
580
581
582
583extern void handle_level_irq(struct irq_desc *desc);
584extern void handle_fasteoi_irq(struct irq_desc *desc);
585extern void handle_edge_irq(struct irq_desc *desc);
586extern void handle_edge_eoi_irq(struct irq_desc *desc);
587extern void handle_simple_irq(struct irq_desc *desc);
588extern void handle_untracked_irq(struct irq_desc *desc);
589extern void handle_percpu_irq(struct irq_desc *desc);
590extern void handle_percpu_devid_irq(struct irq_desc *desc);
591extern void handle_bad_irq(struct irq_desc *desc);
592extern void handle_nested_irq(unsigned int irq);
593
594extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
595extern int irq_chip_pm_get(struct irq_data *data);
596extern int irq_chip_pm_put(struct irq_data *data);
597#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
598extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
599extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
600extern void irq_chip_enable_parent(struct irq_data *data);
601extern void irq_chip_disable_parent(struct irq_data *data);
602extern void irq_chip_ack_parent(struct irq_data *data);
603extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
604extern void irq_chip_mask_parent(struct irq_data *data);
605extern void irq_chip_unmask_parent(struct irq_data *data);
606extern void irq_chip_eoi_parent(struct irq_data *data);
607extern int irq_chip_set_affinity_parent(struct irq_data *data,
608 const struct cpumask *dest,
609 bool force);
610extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
611extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
612 void *vcpu_info);
613extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
614#endif
615
616
617extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
618
619
620
621extern int noirqdebug_setup(char *str);
622
623
624extern int can_request_irq(unsigned int irq, unsigned long irqflags);
625
626
627extern struct irq_chip no_irq_chip;
628extern struct irq_chip dummy_irq_chip;
629
630extern void
631irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
632 irq_flow_handler_t handle, const char *name);
633
634static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
635 irq_flow_handler_t handle)
636{
637 irq_set_chip_and_handler_name(irq, chip, handle, NULL);
638}
639
640extern int irq_set_percpu_devid(unsigned int irq);
641extern int irq_set_percpu_devid_partition(unsigned int irq,
642 const struct cpumask *affinity);
643extern int irq_get_percpu_devid_partition(unsigned int irq,
644 struct cpumask *affinity);
645
646extern void
647__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
648 const char *name);
649
650static inline void
651irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
652{
653 __irq_set_handler(irq, handle, 0, NULL);
654}
655
656
657
658
659
660
661static inline void
662irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
663{
664 __irq_set_handler(irq, handle, 1, NULL);
665}
666
667
668
669
670
671
672void
673irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
674 void *data);
675
676void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
677
678static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
679{
680 irq_modify_status(irq, 0, set);
681}
682
683static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
684{
685 irq_modify_status(irq, clr, 0);
686}
687
688static inline void irq_set_noprobe(unsigned int irq)
689{
690 irq_modify_status(irq, 0, IRQ_NOPROBE);
691}
692
693static inline void irq_set_probe(unsigned int irq)
694{
695 irq_modify_status(irq, IRQ_NOPROBE, 0);
696}
697
698static inline void irq_set_nothread(unsigned int irq)
699{
700 irq_modify_status(irq, 0, IRQ_NOTHREAD);
701}
702
703static inline void irq_set_thread(unsigned int irq)
704{
705 irq_modify_status(irq, IRQ_NOTHREAD, 0);
706}
707
708static inline void irq_set_nested_thread(unsigned int irq, bool nest)
709{
710 if (nest)
711 irq_set_status_flags(irq, IRQ_NESTED_THREAD);
712 else
713 irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
714}
715
716static inline void irq_set_percpu_devid_flags(unsigned int irq)
717{
718 irq_set_status_flags(irq,
719 IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
720 IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
721}
722
723
724extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
725extern int irq_set_handler_data(unsigned int irq, void *data);
726extern int irq_set_chip_data(unsigned int irq, void *data);
727extern int irq_set_irq_type(unsigned int irq, unsigned int type);
728extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
729extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
730 struct msi_desc *entry);
731extern struct irq_data *irq_get_irq_data(unsigned int irq);
732
733static inline struct irq_chip *irq_get_chip(unsigned int irq)
734{
735 struct irq_data *d = irq_get_irq_data(irq);
736 return d ? d->chip : NULL;
737}
738
739static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
740{
741 return d->chip;
742}
743
744static inline void *irq_get_chip_data(unsigned int irq)
745{
746 struct irq_data *d = irq_get_irq_data(irq);
747 return d ? d->chip_data : NULL;
748}
749
750static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
751{
752 return d->chip_data;
753}
754
755static inline void *irq_get_handler_data(unsigned int irq)
756{
757 struct irq_data *d = irq_get_irq_data(irq);
758 return d ? d->common->handler_data : NULL;
759}
760
761static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
762{
763 return d->common->handler_data;
764}
765
766static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
767{
768 struct irq_data *d = irq_get_irq_data(irq);
769 return d ? d->common->msi_desc : NULL;
770}
771
772static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
773{
774 return d->common->msi_desc;
775}
776
777static inline u32 irq_get_trigger_type(unsigned int irq)
778{
779 struct irq_data *d = irq_get_irq_data(irq);
780 return d ? irqd_get_trigger_type(d) : 0;
781}
782
783static inline int irq_common_data_get_node(struct irq_common_data *d)
784{
785#ifdef CONFIG_NUMA
786 return d->node;
787#else
788 return 0;
789#endif
790}
791
792static inline int irq_data_get_node(struct irq_data *d)
793{
794 return irq_common_data_get_node(d->common);
795}
796
797static inline struct cpumask *irq_get_affinity_mask(int irq)
798{
799 struct irq_data *d = irq_get_irq_data(irq);
800
801 return d ? d->common->affinity : NULL;
802}
803
804static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
805{
806 return d->common->affinity;
807}
808
809#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
810static inline
811struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
812{
813 return d->common->effective_affinity;
814}
815static inline void irq_data_update_effective_affinity(struct irq_data *d,
816 const struct cpumask *m)
817{
818 cpumask_copy(d->common->effective_affinity, m);
819}
820#else
821static inline void irq_data_update_effective_affinity(struct irq_data *d,
822 const struct cpumask *m)
823{
824}
825static inline
826struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
827{
828 return d->common->affinity;
829}
830#endif
831
832unsigned int arch_dynirq_lower_bound(unsigned int from);
833
834int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
835 struct module *owner, const struct cpumask *affinity);
836
837int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
838 unsigned int cnt, int node, struct module *owner,
839 const struct cpumask *affinity);
840
841
842#define irq_alloc_descs(irq, from, cnt, node) \
843 __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
844
845#define irq_alloc_desc(node) \
846 irq_alloc_descs(-1, 0, 1, node)
847
848#define irq_alloc_desc_at(at, node) \
849 irq_alloc_descs(at, at, 1, node)
850
851#define irq_alloc_desc_from(from, node) \
852 irq_alloc_descs(-1, from, 1, node)
853
854#define irq_alloc_descs_from(from, cnt, node) \
855 irq_alloc_descs(-1, from, cnt, node)
856
857#define devm_irq_alloc_descs(dev, irq, from, cnt, node) \
858 __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
859
860#define devm_irq_alloc_desc(dev, node) \
861 devm_irq_alloc_descs(dev, -1, 0, 1, node)
862
863#define devm_irq_alloc_desc_at(dev, at, node) \
864 devm_irq_alloc_descs(dev, at, at, 1, node)
865
866#define devm_irq_alloc_desc_from(dev, from, node) \
867 devm_irq_alloc_descs(dev, -1, from, 1, node)
868
869#define devm_irq_alloc_descs_from(dev, from, cnt, node) \
870 devm_irq_alloc_descs(dev, -1, from, cnt, node)
871
872void irq_free_descs(unsigned int irq, unsigned int cnt);
873static inline void irq_free_desc(unsigned int irq)
874{
875 irq_free_descs(irq, 1);
876}
877
878#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
879unsigned int irq_alloc_hwirqs(int cnt, int node);
880static inline unsigned int irq_alloc_hwirq(int node)
881{
882 return irq_alloc_hwirqs(1, node);
883}
884void irq_free_hwirqs(unsigned int from, int cnt);
885static inline void irq_free_hwirq(unsigned int irq)
886{
887 return irq_free_hwirqs(irq, 1);
888}
889int arch_setup_hwirq(unsigned int irq, int node);
890void arch_teardown_hwirq(unsigned int irq);
891#endif
892
893#ifdef CONFIG_GENERIC_IRQ_LEGACY
894void irq_init_desc(unsigned int irq);
895#endif
896
897
898
899
900
901
902
903
904
905
906
907struct irq_chip_regs {
908 unsigned long enable;
909 unsigned long disable;
910 unsigned long mask;
911 unsigned long ack;
912 unsigned long eoi;
913 unsigned long type;
914 unsigned long polarity;
915};
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930struct irq_chip_type {
931 struct irq_chip chip;
932 struct irq_chip_regs regs;
933 irq_flow_handler_t handler;
934 u32 type;
935 u32 mask_cache_priv;
936 u32 *mask_cache;
937};
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972struct irq_chip_generic {
973 raw_spinlock_t lock;
974 void __iomem *reg_base;
975 u32 (*reg_readl)(void __iomem *addr);
976 void (*reg_writel)(u32 val, void __iomem *addr);
977 void (*suspend)(struct irq_chip_generic *gc);
978 void (*resume)(struct irq_chip_generic *gc);
979 unsigned int irq_base;
980 unsigned int irq_cnt;
981 u32 mask_cache;
982 u32 type_cache;
983 u32 polarity_cache;
984 u32 wake_enabled;
985 u32 wake_active;
986 unsigned int num_ct;
987 void *private;
988 unsigned long installed;
989 unsigned long unused;
990 struct irq_domain *domain;
991 struct list_head list;
992 struct irq_chip_type chip_types[0];
993};
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005enum irq_gc_flags {
1006 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
1007 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
1008 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
1009 IRQ_GC_NO_MASK = 1 << 3,
1010 IRQ_GC_BE_IO = 1 << 4,
1011};
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022struct irq_domain_chip_generic {
1023 unsigned int irqs_per_chip;
1024 unsigned int num_chips;
1025 unsigned int irq_flags_to_clear;
1026 unsigned int irq_flags_to_set;
1027 enum irq_gc_flags gc_flags;
1028 struct irq_chip_generic *gc[0];
1029};
1030
1031
1032void irq_gc_noop(struct irq_data *d);
1033void irq_gc_mask_disable_reg(struct irq_data *d);
1034void irq_gc_mask_set_bit(struct irq_data *d);
1035void irq_gc_mask_clr_bit(struct irq_data *d);
1036void irq_gc_unmask_enable_reg(struct irq_data *d);
1037void irq_gc_ack_set_bit(struct irq_data *d);
1038void irq_gc_ack_clr_bit(struct irq_data *d);
1039void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1040void irq_gc_eoi(struct irq_data *d);
1041int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1042
1043
1044int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
1045 irq_hw_number_t hw_irq);
1046struct irq_chip_generic *
1047irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
1048 void __iomem *reg_base, irq_flow_handler_t handler);
1049void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
1050 enum irq_gc_flags flags, unsigned int clr,
1051 unsigned int set);
1052int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
1053void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
1054 unsigned int clr, unsigned int set);
1055
1056struct irq_chip_generic *
1057devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct,
1058 unsigned int irq_base, void __iomem *reg_base,
1059 irq_flow_handler_t handler);
1060int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
1061 u32 msk, enum irq_gc_flags flags,
1062 unsigned int clr, unsigned int set);
1063
1064struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
1065
1066int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
1067 int num_ct, const char *name,
1068 irq_flow_handler_t handler,
1069 unsigned int clr, unsigned int set,
1070 enum irq_gc_flags flags);
1071
1072#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
1073 handler, clr, set, flags) \
1074({ \
1075 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
1076 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
1077 handler, clr, set, flags); \
1078})
1079
1080static inline void irq_free_generic_chip(struct irq_chip_generic *gc)
1081{
1082 kfree(gc);
1083}
1084
1085static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc,
1086 u32 msk, unsigned int clr,
1087 unsigned int set)
1088{
1089 irq_remove_generic_chip(gc, msk, clr, set);
1090 irq_free_generic_chip(gc);
1091}
1092
1093static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
1094{
1095 return container_of(d->chip, struct irq_chip_type, chip);
1096}
1097
1098#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
1099
1100#ifdef CONFIG_SMP
1101static inline void irq_gc_lock(struct irq_chip_generic *gc)
1102{
1103 raw_spin_lock(&gc->lock);
1104}
1105
1106static inline void irq_gc_unlock(struct irq_chip_generic *gc)
1107{
1108 raw_spin_unlock(&gc->lock);
1109}
1110#else
1111static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
1112static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
1113#endif
1114
1115
1116
1117
1118
1119#define irq_gc_lock_irqsave(gc, flags) \
1120 raw_spin_lock_irqsave(&(gc)->lock, flags)
1121
1122#define irq_gc_unlock_irqrestore(gc, flags) \
1123 raw_spin_unlock_irqrestore(&(gc)->lock, flags)
1124
1125static inline void irq_reg_writel(struct irq_chip_generic *gc,
1126 u32 val, int reg_offset)
1127{
1128 if (gc->reg_writel)
1129 gc->reg_writel(val, gc->reg_base + reg_offset);
1130 else
1131 writel(val, gc->reg_base + reg_offset);
1132}
1133
1134static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
1135 int reg_offset)
1136{
1137 if (gc->reg_readl)
1138 return gc->reg_readl(gc->reg_base + reg_offset);
1139 else
1140 return readl(gc->reg_base + reg_offset);
1141}
1142
1143struct irq_matrix;
1144struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
1145 unsigned int alloc_start,
1146 unsigned int alloc_end);
1147void irq_matrix_online(struct irq_matrix *m);
1148void irq_matrix_offline(struct irq_matrix *m);
1149void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1150int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1151void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1152int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
1153void irq_matrix_reserve(struct irq_matrix *m);
1154void irq_matrix_remove_reserved(struct irq_matrix *m);
1155int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
1156 bool reserved, unsigned int *mapped_cpu);
1157void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
1158 unsigned int bit, bool managed);
1159void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
1160unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
1161unsigned int irq_matrix_allocated(struct irq_matrix *m);
1162unsigned int irq_matrix_reserved(struct irq_matrix *m);
1163void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
1164
1165
1166#define INVALID_HWIRQ (~0UL)
1167irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
1168int __ipi_send_single(struct irq_desc *desc, unsigned int cpu);
1169int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
1170int ipi_send_single(unsigned int virq, unsigned int cpu);
1171int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
1172
1173#endif
1174