1
2
3
4
5#ifndef __KVM_ARM_VGIC_NEW_H__
6#define __KVM_ARM_VGIC_NEW_H__
7
8#include <linux/irqchip/arm-gic-common.h>
9
10#define PRODUCT_ID_KVM 0x4b
11#define IMPLEMENTER_ARM 0x43b
12
13#define VGIC_ADDR_UNDEF (-1)
14#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
15
16#define INTERRUPT_ID_BITS_SPIS 10
17#define INTERRUPT_ID_BITS_ITS 16
18#define VGIC_PRI_BITS 5
19
20#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
21
22#define VGIC_AFFINITY_0_SHIFT 0
23#define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
24#define VGIC_AFFINITY_1_SHIFT 8
25#define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
26#define VGIC_AFFINITY_2_SHIFT 16
27#define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
28#define VGIC_AFFINITY_3_SHIFT 24
29#define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
30
31#define VGIC_AFFINITY_LEVEL(reg, level) \
32 ((((reg) & VGIC_AFFINITY_## level ##_MASK) \
33 >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
34
35
36
37
38
39#define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
40 VGIC_AFFINITY_LEVEL(val, 1) | \
41 VGIC_AFFINITY_LEVEL(val, 2) | \
42 VGIC_AFFINITY_LEVEL(val, 3))
43
44
45
46
47
48#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000
49#define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT 14
50#define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK 0x0000000000003800
51#define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT 11
52#define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK 0x0000000000000780
53#define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT 7
54#define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK 0x0000000000000078
55#define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT 3
56#define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK 0x0000000000000007
57#define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT 0
58
59#define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
60 KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
61 KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
62 KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
63 KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
64
65
66
67
68
69#define KVM_ITS_CTE_VALID_SHIFT 63
70#define KVM_ITS_CTE_VALID_MASK BIT_ULL(63)
71#define KVM_ITS_CTE_RDBASE_SHIFT 16
72#define KVM_ITS_CTE_ICID_MASK GENMASK_ULL(15, 0)
73#define KVM_ITS_ITE_NEXT_SHIFT 48
74#define KVM_ITS_ITE_PINTID_SHIFT 16
75#define KVM_ITS_ITE_PINTID_MASK GENMASK_ULL(47, 16)
76#define KVM_ITS_ITE_ICID_MASK GENMASK_ULL(15, 0)
77#define KVM_ITS_DTE_VALID_SHIFT 63
78#define KVM_ITS_DTE_VALID_MASK BIT_ULL(63)
79#define KVM_ITS_DTE_NEXT_SHIFT 49
80#define KVM_ITS_DTE_NEXT_MASK GENMASK_ULL(62, 49)
81#define KVM_ITS_DTE_ITTADDR_SHIFT 5
82#define KVM_ITS_DTE_ITTADDR_MASK GENMASK_ULL(48, 5)
83#define KVM_ITS_DTE_SIZE_MASK GENMASK_ULL(4, 0)
84#define KVM_ITS_L1E_VALID_MASK BIT_ULL(63)
85
86#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
87
88#define KVM_VGIC_V3_RDIST_INDEX_MASK GENMASK_ULL(11, 0)
89#define KVM_VGIC_V3_RDIST_FLAGS_MASK GENMASK_ULL(15, 12)
90#define KVM_VGIC_V3_RDIST_FLAGS_SHIFT 12
91#define KVM_VGIC_V3_RDIST_BASE_MASK GENMASK_ULL(51, 16)
92#define KVM_VGIC_V3_RDIST_COUNT_MASK GENMASK_ULL(63, 52)
93#define KVM_VGIC_V3_RDIST_COUNT_SHIFT 52
94
95#ifdef CONFIG_DEBUG_SPINLOCK
96#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
97#else
98#define DEBUG_SPINLOCK_BUG_ON(p)
99#endif
100
101
102static inline bool irq_is_pending(struct vgic_irq *irq)
103{
104 if (irq->config == VGIC_CONFIG_EDGE)
105 return irq->pending_latch;
106 else
107 return irq->pending_latch || irq->line_level;
108}
109
110static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
111{
112 return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
113}
114
115static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
116{
117
118 if (vgic_irq_is_sgi(irq->intid) && irq->source)
119 return hweight8(irq->source) + irq->active;
120
121 return irq_is_pending(irq) || irq->active;
122}
123
124static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
125{
126 return vgic_irq_get_lr_count(irq) > 1;
127}
128
129
130
131
132
133
134
135struct vgic_vmcr {
136 u32 grpen0;
137 u32 grpen1;
138
139 u32 ackctl;
140 u32 fiqen;
141 u32 cbpr;
142 u32 eoim;
143
144 u32 abpr;
145 u32 bpr;
146 u32 pmr;
147
148};
149
150struct vgic_reg_attr {
151 struct kvm_vcpu *vcpu;
152 gpa_t addr;
153};
154
155int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
156 struct vgic_reg_attr *reg_attr);
157int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
158 struct vgic_reg_attr *reg_attr);
159const struct vgic_register_region *
160vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
161 gpa_t addr, int len);
162struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
163 u32 intid);
164void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
165void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
166bool vgic_get_phys_line_level(struct vgic_irq *irq);
167void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
168void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
169bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
170 unsigned long flags);
171void vgic_kick_vcpus(struct kvm *kvm);
172
173int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
174 phys_addr_t addr, phys_addr_t alignment);
175
176void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
177void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
178void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
179void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
180void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
181int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
182int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
183 int offset, u32 *val);
184int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
185 int offset, u32 *val);
186void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
187void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
188void vgic_v2_enable(struct kvm_vcpu *vcpu);
189int vgic_v2_probe(const struct gic_kvm_info *info);
190int vgic_v2_map_resources(struct kvm *kvm);
191int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
192 enum vgic_type);
193
194void vgic_v2_init_lrs(void);
195void vgic_v2_load(struct kvm_vcpu *vcpu);
196void vgic_v2_put(struct kvm_vcpu *vcpu);
197void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
198
199void vgic_v2_save_state(struct kvm_vcpu *vcpu);
200void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
201
202static inline void vgic_get_irq_kref(struct vgic_irq *irq)
203{
204 if (irq->intid < VGIC_MIN_LPI)
205 return;
206
207 kref_get(&irq->refcount);
208}
209
210void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
211void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
212void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
213void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
214void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
215void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
216void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
217void vgic_v3_enable(struct kvm_vcpu *vcpu);
218int vgic_v3_probe(const struct gic_kvm_info *info);
219int vgic_v3_map_resources(struct kvm *kvm);
220int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
221int vgic_v3_save_pending_tables(struct kvm *kvm);
222int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
223int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
224bool vgic_v3_check_base(struct kvm *kvm);
225
226void vgic_v3_load(struct kvm_vcpu *vcpu);
227void vgic_v3_put(struct kvm_vcpu *vcpu);
228void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
229
230bool vgic_has_its(struct kvm *kvm);
231int kvm_vgic_register_its_device(void);
232void vgic_enable_lpis(struct kvm_vcpu *vcpu);
233void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
234int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
235int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
236int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
237 int offset, u32 *val);
238int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
239 int offset, u32 *val);
240int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
241 u64 id, u64 *val);
242int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
243 u64 *reg);
244int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
245 u32 intid, u64 *val);
246int kvm_register_vgic_device(unsigned long type);
247void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
248void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
249int vgic_lazy_init(struct kvm *kvm);
250int vgic_init(struct kvm *kvm);
251
252void vgic_debug_init(struct kvm *kvm);
253void vgic_debug_destroy(struct kvm *kvm);
254
255bool lock_all_vcpus(struct kvm *kvm);
256void unlock_all_vcpus(struct kvm *kvm);
257
258static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
259{
260 struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
261
262
263
264
265
266
267 switch (cpu_if->num_pri_bits) {
268 case 7: return 3;
269 case 6: return 1;
270 default: return 0;
271 }
272}
273
274static inline bool
275vgic_v3_redist_region_full(struct vgic_redist_region *region)
276{
277 if (!region->count)
278 return false;
279
280 return (region->free_index >= region->count);
281}
282
283struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
284
285static inline size_t
286vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
287{
288 if (!rdreg->count)
289 return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
290 else
291 return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
292}
293
294struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
295 u32 index);
296
297bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
298
299static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
300{
301 struct vgic_dist *d = &kvm->arch.vgic;
302
303 return (base + size > d->vgic_dist_base) &&
304 (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
305}
306
307int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
308int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
309 u32 devid, u32 eventid, struct vgic_irq **irq);
310struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
311int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
312void vgic_lpi_translation_cache_init(struct kvm *kvm);
313void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
314void vgic_its_invalidate_cache(struct kvm *kvm);
315
316bool vgic_supports_direct_msis(struct kvm *kvm);
317int vgic_v4_init(struct kvm *kvm);
318void vgic_v4_teardown(struct kvm *kvm);
319int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
320int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
321
322#endif
323