1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Copyright (C) 2015, 2016 ARM Ltd. |
4 | */ |
5 | #ifndef __KVM_ARM_VGIC_NEW_H__ |
6 | #define __KVM_ARM_VGIC_NEW_H__ |
7 | |
8 | #include <linux/irqchip/arm-gic-common.h> |
9 | #include <asm/kvm_mmu.h> |
10 | |
11 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ |
12 | #define IMPLEMENTER_ARM 0x43b |
13 | |
14 | #define VGIC_ADDR_UNDEF (-1) |
15 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) |
16 | |
17 | #define INTERRUPT_ID_BITS_SPIS 10 |
18 | #define INTERRUPT_ID_BITS_ITS 16 |
19 | #define VGIC_PRI_BITS 5 |
20 | |
21 | #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS) |
22 | |
23 | #define VGIC_AFFINITY_0_SHIFT 0 |
24 | #define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT) |
25 | #define VGIC_AFFINITY_1_SHIFT 8 |
26 | #define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT) |
27 | #define VGIC_AFFINITY_2_SHIFT 16 |
28 | #define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT) |
29 | #define VGIC_AFFINITY_3_SHIFT 24 |
30 | #define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT) |
31 | |
32 | #define VGIC_AFFINITY_LEVEL(reg, level) \ |
33 | ((((reg) & VGIC_AFFINITY_## level ##_MASK) \ |
34 | >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) |
35 | |
36 | /* |
37 | * The Userspace encodes the affinity differently from the MPIDR, |
38 | * Below macro converts vgic userspace format to MPIDR reg format. |
39 | */ |
40 | #define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \ |
41 | VGIC_AFFINITY_LEVEL(val, 1) | \ |
42 | VGIC_AFFINITY_LEVEL(val, 2) | \ |
43 | VGIC_AFFINITY_LEVEL(val, 3)) |
44 | |
45 | /* |
46 | * As per Documentation/virt/kvm/devices/arm-vgic-v3.rst, |
47 | * below macros are defined for CPUREG encoding. |
48 | */ |
49 | #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000 |
50 | #define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT 14 |
51 | #define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK 0x0000000000003800 |
52 | #define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT 11 |
53 | #define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK 0x0000000000000780 |
54 | #define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT 7 |
55 | #define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK 0x0000000000000078 |
56 | #define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT 3 |
57 | #define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK 0x0000000000000007 |
58 | #define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT 0 |
59 | |
60 | #define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \ |
61 | KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \ |
62 | KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \ |
63 | KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \ |
64 | KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) |
65 | |
66 | /* |
67 | * As per Documentation/virt/kvm/devices/arm-vgic-its.rst, |
68 | * below macros are defined for ITS table entry encoding. |
69 | */ |
70 | #define KVM_ITS_CTE_VALID_SHIFT 63 |
71 | #define KVM_ITS_CTE_VALID_MASK BIT_ULL(63) |
72 | #define KVM_ITS_CTE_RDBASE_SHIFT 16 |
73 | #define KVM_ITS_CTE_ICID_MASK GENMASK_ULL(15, 0) |
74 | #define KVM_ITS_ITE_NEXT_SHIFT 48 |
75 | #define KVM_ITS_ITE_PINTID_SHIFT 16 |
76 | #define KVM_ITS_ITE_PINTID_MASK GENMASK_ULL(47, 16) |
77 | #define KVM_ITS_ITE_ICID_MASK GENMASK_ULL(15, 0) |
78 | #define KVM_ITS_DTE_VALID_SHIFT 63 |
79 | #define KVM_ITS_DTE_VALID_MASK BIT_ULL(63) |
80 | #define KVM_ITS_DTE_NEXT_SHIFT 49 |
81 | #define KVM_ITS_DTE_NEXT_MASK GENMASK_ULL(62, 49) |
82 | #define KVM_ITS_DTE_ITTADDR_SHIFT 5 |
83 | #define KVM_ITS_DTE_ITTADDR_MASK GENMASK_ULL(48, 5) |
84 | #define KVM_ITS_DTE_SIZE_MASK GENMASK_ULL(4, 0) |
85 | #define KVM_ITS_L1E_VALID_MASK BIT_ULL(63) |
86 | /* we only support 64 kB translation table page size */ |
87 | #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) |
88 | |
89 | #define KVM_VGIC_V3_RDIST_INDEX_MASK GENMASK_ULL(11, 0) |
90 | #define KVM_VGIC_V3_RDIST_FLAGS_MASK GENMASK_ULL(15, 12) |
91 | #define KVM_VGIC_V3_RDIST_FLAGS_SHIFT 12 |
92 | #define KVM_VGIC_V3_RDIST_BASE_MASK GENMASK_ULL(51, 16) |
93 | #define KVM_VGIC_V3_RDIST_COUNT_MASK GENMASK_ULL(63, 52) |
94 | #define KVM_VGIC_V3_RDIST_COUNT_SHIFT 52 |
95 | |
96 | #ifdef CONFIG_DEBUG_SPINLOCK |
97 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) |
98 | #else |
99 | #define DEBUG_SPINLOCK_BUG_ON(p) |
100 | #endif |
101 | |
102 | static inline u32 vgic_get_implementation_rev(struct kvm_vcpu *vcpu) |
103 | { |
104 | return vcpu->kvm->arch.vgic.implementation_rev; |
105 | } |
106 | |
107 | /* Requires the irq_lock to be held by the caller. */ |
108 | static inline bool irq_is_pending(struct vgic_irq *irq) |
109 | { |
110 | if (irq->config == VGIC_CONFIG_EDGE) |
111 | return irq->pending_latch; |
112 | else |
113 | return irq->pending_latch || irq->line_level; |
114 | } |
115 | |
116 | static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq) |
117 | { |
118 | return irq->config == VGIC_CONFIG_LEVEL && irq->hw; |
119 | } |
120 | |
121 | static inline int vgic_irq_get_lr_count(struct vgic_irq *irq) |
122 | { |
123 | /* Account for the active state as an interrupt */ |
124 | if (vgic_irq_is_sgi(irq->intid) && irq->source) |
125 | return hweight8(irq->source) + irq->active; |
126 | |
127 | return irq_is_pending(irq) || irq->active; |
128 | } |
129 | |
130 | static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) |
131 | { |
132 | return vgic_irq_get_lr_count(irq) > 1; |
133 | } |
134 | |
135 | static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, |
136 | const void *data, unsigned long len) |
137 | { |
138 | struct vgic_dist *dist = &kvm->arch.vgic; |
139 | int ret; |
140 | |
141 | dist->table_write_in_progress = true; |
142 | ret = kvm_write_guest_lock(kvm, gpa, data, len); |
143 | dist->table_write_in_progress = false; |
144 | |
145 | return ret; |
146 | } |
147 | |
148 | /* |
149 | * This struct provides an intermediate representation of the fields contained |
150 | * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC |
151 | * state to userspace can generate either GICv2 or GICv3 CPU interface |
152 | * registers regardless of the hardware backed GIC used. |
153 | */ |
154 | struct vgic_vmcr { |
155 | u32 grpen0; |
156 | u32 grpen1; |
157 | |
158 | u32 ackctl; |
159 | u32 fiqen; |
160 | u32 cbpr; |
161 | u32 eoim; |
162 | |
163 | u32 abpr; |
164 | u32 bpr; |
165 | u32 pmr; /* Priority mask field in the GICC_PMR and |
166 | * ICC_PMR_EL1 priority field format */ |
167 | }; |
168 | |
169 | struct vgic_reg_attr { |
170 | struct kvm_vcpu *vcpu; |
171 | gpa_t addr; |
172 | }; |
173 | |
174 | int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, |
175 | struct vgic_reg_attr *reg_attr); |
176 | int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, |
177 | struct vgic_reg_attr *reg_attr); |
178 | const struct vgic_register_region * |
179 | vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, |
180 | gpa_t addr, int len); |
181 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
182 | u32 intid); |
183 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); |
184 | bool vgic_get_phys_line_level(struct vgic_irq *irq); |
185 | void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending); |
186 | void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active); |
187 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, |
188 | unsigned long flags); |
189 | void vgic_kick_vcpus(struct kvm *kvm); |
190 | void vgic_irq_handle_resampling(struct vgic_irq *irq, |
191 | bool lr_deactivated, bool lr_pending); |
192 | |
193 | int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, |
194 | phys_addr_t addr, phys_addr_t alignment, |
195 | phys_addr_t size); |
196 | |
197 | void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); |
198 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
199 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); |
200 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); |
201 | int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); |
202 | int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
203 | int offset, u32 *val); |
204 | int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
205 | int offset, u32 *val); |
206 | void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
207 | void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
208 | void vgic_v2_enable(struct kvm_vcpu *vcpu); |
209 | int vgic_v2_probe(const struct gic_kvm_info *info); |
210 | int vgic_v2_map_resources(struct kvm *kvm); |
211 | int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, |
212 | enum vgic_type); |
213 | |
214 | void vgic_v2_init_lrs(void); |
215 | void vgic_v2_load(struct kvm_vcpu *vcpu); |
216 | void vgic_v2_put(struct kvm_vcpu *vcpu); |
217 | void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu); |
218 | |
219 | void vgic_v2_save_state(struct kvm_vcpu *vcpu); |
220 | void vgic_v2_restore_state(struct kvm_vcpu *vcpu); |
221 | |
222 | static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq) |
223 | { |
224 | if (!irq) |
225 | return false; |
226 | |
227 | if (irq->intid < VGIC_MIN_LPI) |
228 | return true; |
229 | |
230 | return kref_get_unless_zero(kref: &irq->refcount); |
231 | } |
232 | |
233 | static inline void vgic_get_irq_kref(struct vgic_irq *irq) |
234 | { |
235 | WARN_ON_ONCE(!vgic_try_get_irq_kref(irq)); |
236 | } |
237 | |
238 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); |
239 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); |
240 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); |
241 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); |
242 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
243 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
244 | void vgic_v3_enable(struct kvm_vcpu *vcpu); |
245 | int vgic_v3_probe(const struct gic_kvm_info *info); |
246 | int vgic_v3_map_resources(struct kvm *kvm); |
247 | int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq); |
248 | int vgic_v3_save_pending_tables(struct kvm *kvm); |
249 | int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count); |
250 | int vgic_register_redist_iodev(struct kvm_vcpu *vcpu); |
251 | void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu); |
252 | bool vgic_v3_check_base(struct kvm *kvm); |
253 | |
254 | void vgic_v3_load(struct kvm_vcpu *vcpu); |
255 | void vgic_v3_put(struct kvm_vcpu *vcpu); |
256 | void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu); |
257 | |
258 | bool vgic_has_its(struct kvm *kvm); |
259 | int kvm_vgic_register_its_device(void); |
260 | void vgic_enable_lpis(struct kvm_vcpu *vcpu); |
261 | void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu); |
262 | int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi); |
263 | int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); |
264 | int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
265 | int offset, u32 *val); |
266 | int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
267 | int offset, u32 *val); |
268 | int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, |
269 | struct kvm_device_attr *attr, bool is_write); |
270 | int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); |
271 | int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
272 | u32 intid, u32 *val); |
273 | int kvm_register_vgic_device(unsigned long type); |
274 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
275 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); |
276 | int vgic_lazy_init(struct kvm *kvm); |
277 | int vgic_init(struct kvm *kvm); |
278 | |
279 | void vgic_debug_init(struct kvm *kvm); |
280 | void vgic_debug_destroy(struct kvm *kvm); |
281 | |
282 | static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) |
283 | { |
284 | struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; |
285 | |
286 | /* |
287 | * num_pri_bits are initialized with HW supported values. |
288 | * We can rely safely on num_pri_bits even if VM has not |
289 | * restored ICC_CTLR_EL1 before restoring APnR registers. |
290 | */ |
291 | switch (cpu_if->num_pri_bits) { |
292 | case 7: return 3; |
293 | case 6: return 1; |
294 | default: return 0; |
295 | } |
296 | } |
297 | |
298 | static inline bool |
299 | vgic_v3_redist_region_full(struct vgic_redist_region *region) |
300 | { |
301 | if (!region->count) |
302 | return false; |
303 | |
304 | return (region->free_index >= region->count); |
305 | } |
306 | |
307 | struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs); |
308 | |
309 | static inline size_t |
310 | vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg) |
311 | { |
312 | if (!rdreg->count) |
313 | return atomic_read(v: &kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE; |
314 | else |
315 | return rdreg->count * KVM_VGIC_V3_REDIST_SIZE; |
316 | } |
317 | |
318 | struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm, |
319 | u32 index); |
320 | void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg); |
321 | |
322 | bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size); |
323 | |
324 | static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size) |
325 | { |
326 | struct vgic_dist *d = &kvm->arch.vgic; |
327 | |
328 | return (base + size > d->vgic_dist_base) && |
329 | (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE); |
330 | } |
331 | |
332 | bool vgic_lpis_enabled(struct kvm_vcpu *vcpu); |
333 | int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr); |
334 | int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, |
335 | u32 devid, u32 eventid, struct vgic_irq **irq); |
336 | struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi); |
337 | int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi); |
338 | void vgic_lpi_translation_cache_init(struct kvm *kvm); |
339 | void vgic_lpi_translation_cache_destroy(struct kvm *kvm); |
340 | void vgic_its_invalidate_cache(struct kvm *kvm); |
341 | |
342 | /* GICv4.1 MMIO interface */ |
343 | int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq); |
344 | int vgic_its_invall(struct kvm_vcpu *vcpu); |
345 | |
346 | bool vgic_supports_direct_msis(struct kvm *kvm); |
347 | int vgic_v4_init(struct kvm *kvm); |
348 | void vgic_v4_teardown(struct kvm *kvm); |
349 | void vgic_v4_configure_vsgis(struct kvm *kvm); |
350 | void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); |
351 | int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); |
352 | |
353 | #endif |
354 | |