1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * VFIO platform devices interrupt handling |
4 | * |
5 | * Copyright (C) 2013 - Virtual Open Systems |
6 | * Author: Antonios Motakis <a.motakis@virtualopensystems.com> |
7 | */ |
8 | |
9 | #include <linux/eventfd.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/types.h> |
13 | #include <linux/vfio.h> |
14 | #include <linux/irq.h> |
15 | |
16 | #include "vfio_platform_private.h" |
17 | |
18 | static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx) |
19 | { |
20 | unsigned long flags; |
21 | |
22 | spin_lock_irqsave(&irq_ctx->lock, flags); |
23 | |
24 | if (!irq_ctx->masked) { |
25 | disable_irq_nosync(irq: irq_ctx->hwirq); |
26 | irq_ctx->masked = true; |
27 | } |
28 | |
29 | spin_unlock_irqrestore(lock: &irq_ctx->lock, flags); |
30 | } |
31 | |
32 | static int vfio_platform_mask_handler(void *opaque, void *unused) |
33 | { |
34 | struct vfio_platform_irq *irq_ctx = opaque; |
35 | |
36 | vfio_platform_mask(irq_ctx); |
37 | |
38 | return 0; |
39 | } |
40 | |
41 | static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev, |
42 | unsigned index, unsigned start, |
43 | unsigned count, uint32_t flags, |
44 | void *data) |
45 | { |
46 | if (start != 0 || count != 1) |
47 | return -EINVAL; |
48 | |
49 | if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) |
50 | return -EINVAL; |
51 | |
52 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { |
53 | int32_t fd = *(int32_t *)data; |
54 | |
55 | if (fd >= 0) |
56 | return vfio_virqfd_enable(opaque: (void *) &vdev->irqs[index], |
57 | handler: vfio_platform_mask_handler, |
58 | NULL, NULL, |
59 | pvirqfd: &vdev->irqs[index].mask, fd); |
60 | |
61 | vfio_virqfd_disable(pvirqfd: &vdev->irqs[index].mask); |
62 | return 0; |
63 | } |
64 | |
65 | if (flags & VFIO_IRQ_SET_DATA_NONE) { |
66 | vfio_platform_mask(irq_ctx: &vdev->irqs[index]); |
67 | |
68 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { |
69 | uint8_t mask = *(uint8_t *)data; |
70 | |
71 | if (mask) |
72 | vfio_platform_mask(irq_ctx: &vdev->irqs[index]); |
73 | } |
74 | |
75 | return 0; |
76 | } |
77 | |
78 | static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx) |
79 | { |
80 | unsigned long flags; |
81 | |
82 | spin_lock_irqsave(&irq_ctx->lock, flags); |
83 | |
84 | if (irq_ctx->masked) { |
85 | enable_irq(irq: irq_ctx->hwirq); |
86 | irq_ctx->masked = false; |
87 | } |
88 | |
89 | spin_unlock_irqrestore(lock: &irq_ctx->lock, flags); |
90 | } |
91 | |
92 | static int vfio_platform_unmask_handler(void *opaque, void *unused) |
93 | { |
94 | struct vfio_platform_irq *irq_ctx = opaque; |
95 | |
96 | vfio_platform_unmask(irq_ctx); |
97 | |
98 | return 0; |
99 | } |
100 | |
101 | static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, |
102 | unsigned index, unsigned start, |
103 | unsigned count, uint32_t flags, |
104 | void *data) |
105 | { |
106 | if (start != 0 || count != 1) |
107 | return -EINVAL; |
108 | |
109 | if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) |
110 | return -EINVAL; |
111 | |
112 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { |
113 | int32_t fd = *(int32_t *)data; |
114 | |
115 | if (fd >= 0) |
116 | return vfio_virqfd_enable(opaque: (void *) &vdev->irqs[index], |
117 | handler: vfio_platform_unmask_handler, |
118 | NULL, NULL, |
119 | pvirqfd: &vdev->irqs[index].unmask, |
120 | fd); |
121 | |
122 | vfio_virqfd_disable(pvirqfd: &vdev->irqs[index].unmask); |
123 | return 0; |
124 | } |
125 | |
126 | if (flags & VFIO_IRQ_SET_DATA_NONE) { |
127 | vfio_platform_unmask(irq_ctx: &vdev->irqs[index]); |
128 | |
129 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { |
130 | uint8_t unmask = *(uint8_t *)data; |
131 | |
132 | if (unmask) |
133 | vfio_platform_unmask(irq_ctx: &vdev->irqs[index]); |
134 | } |
135 | |
136 | return 0; |
137 | } |
138 | |
139 | static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) |
140 | { |
141 | struct vfio_platform_irq *irq_ctx = dev_id; |
142 | unsigned long flags; |
143 | int ret = IRQ_NONE; |
144 | |
145 | spin_lock_irqsave(&irq_ctx->lock, flags); |
146 | |
147 | if (!irq_ctx->masked) { |
148 | ret = IRQ_HANDLED; |
149 | |
150 | /* automask maskable interrupts */ |
151 | disable_irq_nosync(irq: irq_ctx->hwirq); |
152 | irq_ctx->masked = true; |
153 | } |
154 | |
155 | spin_unlock_irqrestore(lock: &irq_ctx->lock, flags); |
156 | |
157 | if (ret == IRQ_HANDLED) |
158 | eventfd_signal(ctx: irq_ctx->trigger, n: 1); |
159 | |
160 | return ret; |
161 | } |
162 | |
163 | static irqreturn_t vfio_irq_handler(int irq, void *dev_id) |
164 | { |
165 | struct vfio_platform_irq *irq_ctx = dev_id; |
166 | |
167 | eventfd_signal(ctx: irq_ctx->trigger, n: 1); |
168 | |
169 | return IRQ_HANDLED; |
170 | } |
171 | |
172 | static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, |
173 | int fd, irq_handler_t handler) |
174 | { |
175 | struct vfio_platform_irq *irq = &vdev->irqs[index]; |
176 | struct eventfd_ctx *trigger; |
177 | int ret; |
178 | |
179 | if (irq->trigger) { |
180 | irq_clear_status_flags(irq: irq->hwirq, clr: IRQ_NOAUTOEN); |
181 | free_irq(irq->hwirq, irq); |
182 | kfree(objp: irq->name); |
183 | eventfd_ctx_put(ctx: irq->trigger); |
184 | irq->trigger = NULL; |
185 | } |
186 | |
187 | if (fd < 0) /* Disable only */ |
188 | return 0; |
189 | irq->name = kasprintf(GFP_KERNEL_ACCOUNT, fmt: "vfio-irq[%d](%s)" , |
190 | irq->hwirq, vdev->name); |
191 | if (!irq->name) |
192 | return -ENOMEM; |
193 | |
194 | trigger = eventfd_ctx_fdget(fd); |
195 | if (IS_ERR(ptr: trigger)) { |
196 | kfree(objp: irq->name); |
197 | return PTR_ERR(ptr: trigger); |
198 | } |
199 | |
200 | irq->trigger = trigger; |
201 | |
202 | irq_set_status_flags(irq: irq->hwirq, set: IRQ_NOAUTOEN); |
203 | ret = request_irq(irq: irq->hwirq, handler, flags: 0, name: irq->name, dev: irq); |
204 | if (ret) { |
205 | kfree(objp: irq->name); |
206 | eventfd_ctx_put(ctx: trigger); |
207 | irq->trigger = NULL; |
208 | return ret; |
209 | } |
210 | |
211 | if (!irq->masked) |
212 | enable_irq(irq: irq->hwirq); |
213 | |
214 | return 0; |
215 | } |
216 | |
217 | static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, |
218 | unsigned index, unsigned start, |
219 | unsigned count, uint32_t flags, |
220 | void *data) |
221 | { |
222 | struct vfio_platform_irq *irq = &vdev->irqs[index]; |
223 | irq_handler_t handler; |
224 | |
225 | if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED) |
226 | handler = vfio_automasked_irq_handler; |
227 | else |
228 | handler = vfio_irq_handler; |
229 | |
230 | if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) |
231 | return vfio_set_trigger(vdev, index, fd: -1, handler); |
232 | |
233 | if (start != 0 || count != 1) |
234 | return -EINVAL; |
235 | |
236 | if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { |
237 | int32_t fd = *(int32_t *)data; |
238 | |
239 | return vfio_set_trigger(vdev, index, fd, handler); |
240 | } |
241 | |
242 | if (flags & VFIO_IRQ_SET_DATA_NONE) { |
243 | handler(irq->hwirq, irq); |
244 | |
245 | } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { |
246 | uint8_t trigger = *(uint8_t *)data; |
247 | |
248 | if (trigger) |
249 | handler(irq->hwirq, irq); |
250 | } |
251 | |
252 | return 0; |
253 | } |
254 | |
255 | int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, |
256 | uint32_t flags, unsigned index, unsigned start, |
257 | unsigned count, void *data) |
258 | { |
259 | int (*func)(struct vfio_platform_device *vdev, unsigned index, |
260 | unsigned start, unsigned count, uint32_t flags, |
261 | void *data) = NULL; |
262 | |
263 | switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { |
264 | case VFIO_IRQ_SET_ACTION_MASK: |
265 | func = vfio_platform_set_irq_mask; |
266 | break; |
267 | case VFIO_IRQ_SET_ACTION_UNMASK: |
268 | func = vfio_platform_set_irq_unmask; |
269 | break; |
270 | case VFIO_IRQ_SET_ACTION_TRIGGER: |
271 | func = vfio_platform_set_irq_trigger; |
272 | break; |
273 | } |
274 | |
275 | if (!func) |
276 | return -ENOTTY; |
277 | |
278 | return func(vdev, index, start, count, flags, data); |
279 | } |
280 | |
281 | int vfio_platform_irq_init(struct vfio_platform_device *vdev) |
282 | { |
283 | int cnt = 0, i; |
284 | |
285 | while (vdev->get_irq(vdev, cnt) >= 0) |
286 | cnt++; |
287 | |
288 | vdev->irqs = kcalloc(n: cnt, size: sizeof(struct vfio_platform_irq), |
289 | GFP_KERNEL_ACCOUNT); |
290 | if (!vdev->irqs) |
291 | return -ENOMEM; |
292 | |
293 | for (i = 0; i < cnt; i++) { |
294 | int hwirq = vdev->get_irq(vdev, i); |
295 | |
296 | if (hwirq < 0) |
297 | goto err; |
298 | |
299 | spin_lock_init(&vdev->irqs[i].lock); |
300 | |
301 | vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; |
302 | |
303 | if (irq_get_trigger_type(irq: hwirq) & IRQ_TYPE_LEVEL_MASK) |
304 | vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE |
305 | | VFIO_IRQ_INFO_AUTOMASKED; |
306 | |
307 | vdev->irqs[i].count = 1; |
308 | vdev->irqs[i].hwirq = hwirq; |
309 | vdev->irqs[i].masked = false; |
310 | } |
311 | |
312 | vdev->num_irqs = cnt; |
313 | |
314 | return 0; |
315 | err: |
316 | kfree(objp: vdev->irqs); |
317 | return -EINVAL; |
318 | } |
319 | |
320 | void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) |
321 | { |
322 | int i; |
323 | |
324 | for (i = 0; i < vdev->num_irqs; i++) |
325 | vfio_set_trigger(vdev, index: i, fd: -1, NULL); |
326 | |
327 | vdev->num_irqs = 0; |
328 | kfree(objp: vdev->irqs); |
329 | } |
330 | |