1 | /* |
2 | * 8259 interrupt controller emulation |
3 | * |
4 | * Copyright (c) 2003-2004 Fabrice Bellard |
5 | * Copyright (c) 2007 Intel Corporation |
6 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
9 | * of this software and associated documentation files (the "Software"), to deal |
10 | * in the Software without restriction, including without limitation the rights |
11 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
12 | * copies of the Software, and to permit persons to whom the Software is |
13 | * furnished to do so, subject to the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice shall be included in |
16 | * all copies or substantial portions of the Software. |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
21 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
23 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
24 | * THE SOFTWARE. |
25 | * Authors: |
26 | * Yaozu (Eddie) Dong <Eddie.dong@intel.com> |
27 | * Port from Qemu. |
28 | */ |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | |
31 | #include <linux/mm.h> |
32 | #include <linux/slab.h> |
33 | #include <linux/bitops.h> |
34 | #include "irq.h" |
35 | |
36 | #include <linux/kvm_host.h> |
37 | #include "trace.h" |
38 | |
39 | #define pr_pic_unimpl(fmt, ...) \ |
40 | pr_err_ratelimited("pic: " fmt, ## __VA_ARGS__) |
41 | |
42 | static void pic_irq_request(struct kvm *kvm, int level); |
43 | |
44 | static void pic_lock(struct kvm_pic *s) |
45 | __acquires(&s->lock) |
46 | { |
47 | spin_lock(lock: &s->lock); |
48 | } |
49 | |
50 | static void pic_unlock(struct kvm_pic *s) |
51 | __releases(&s->lock) |
52 | { |
53 | bool wakeup = s->wakeup_needed; |
54 | struct kvm_vcpu *vcpu; |
55 | unsigned long i; |
56 | |
57 | s->wakeup_needed = false; |
58 | |
59 | spin_unlock(lock: &s->lock); |
60 | |
61 | if (wakeup) { |
62 | kvm_for_each_vcpu(i, vcpu, s->kvm) { |
63 | if (kvm_apic_accept_pic_intr(vcpu)) { |
64 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
65 | kvm_vcpu_kick(vcpu); |
66 | return; |
67 | } |
68 | } |
69 | } |
70 | } |
71 | |
72 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) |
73 | { |
74 | s->isr &= ~(1 << irq); |
75 | if (s != &s->pics_state->pics[0]) |
76 | irq += 8; |
77 | /* |
78 | * We are dropping lock while calling ack notifiers since ack |
79 | * notifier callbacks for assigned devices call into PIC recursively. |
80 | * Other interrupt may be delivered to PIC while lock is dropped but |
81 | * it should be safe since PIC state is already updated at this stage. |
82 | */ |
83 | pic_unlock(s: s->pics_state); |
84 | kvm_notify_acked_irq(kvm: s->pics_state->kvm, SELECT_PIC(irq), pin: irq); |
85 | pic_lock(s: s->pics_state); |
86 | } |
87 | |
88 | /* |
89 | * set irq level. If an edge is detected, then the IRR is set to 1 |
90 | */ |
91 | static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) |
92 | { |
93 | int mask, ret = 1; |
94 | mask = 1 << irq; |
95 | if (s->elcr & mask) /* level triggered */ |
96 | if (level) { |
97 | ret = !(s->irr & mask); |
98 | s->irr |= mask; |
99 | s->last_irr |= mask; |
100 | } else { |
101 | s->irr &= ~mask; |
102 | s->last_irr &= ~mask; |
103 | } |
104 | else /* edge triggered */ |
105 | if (level) { |
106 | if ((s->last_irr & mask) == 0) { |
107 | ret = !(s->irr & mask); |
108 | s->irr |= mask; |
109 | } |
110 | s->last_irr |= mask; |
111 | } else |
112 | s->last_irr &= ~mask; |
113 | |
114 | return (s->imr & mask) ? -1 : ret; |
115 | } |
116 | |
117 | /* |
118 | * return the highest priority found in mask (highest = smallest |
119 | * number). Return 8 if no irq |
120 | */ |
121 | static inline int get_priority(struct kvm_kpic_state *s, int mask) |
122 | { |
123 | int priority; |
124 | if (mask == 0) |
125 | return 8; |
126 | priority = 0; |
127 | while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) |
128 | priority++; |
129 | return priority; |
130 | } |
131 | |
132 | /* |
133 | * return the pic wanted interrupt. return -1 if none |
134 | */ |
135 | static int pic_get_irq(struct kvm_kpic_state *s) |
136 | { |
137 | int mask, cur_priority, priority; |
138 | |
139 | mask = s->irr & ~s->imr; |
140 | priority = get_priority(s, mask); |
141 | if (priority == 8) |
142 | return -1; |
143 | /* |
144 | * compute current priority. If special fully nested mode on the |
145 | * master, the IRQ coming from the slave is not taken into account |
146 | * for the priority computation. |
147 | */ |
148 | mask = s->isr; |
149 | if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) |
150 | mask &= ~(1 << 2); |
151 | cur_priority = get_priority(s, mask); |
152 | if (priority < cur_priority) |
153 | /* |
154 | * higher priority found: an irq should be generated |
155 | */ |
156 | return (priority + s->priority_add) & 7; |
157 | else |
158 | return -1; |
159 | } |
160 | |
161 | /* |
162 | * raise irq to CPU if necessary. must be called every time the active |
163 | * irq may change |
164 | */ |
165 | static void pic_update_irq(struct kvm_pic *s) |
166 | { |
167 | int irq2, irq; |
168 | |
169 | irq2 = pic_get_irq(s: &s->pics[1]); |
170 | if (irq2 >= 0) { |
171 | /* |
172 | * if irq request by slave pic, signal master PIC |
173 | */ |
174 | pic_set_irq1(s: &s->pics[0], irq: 2, level: 1); |
175 | pic_set_irq1(s: &s->pics[0], irq: 2, level: 0); |
176 | } |
177 | irq = pic_get_irq(s: &s->pics[0]); |
178 | pic_irq_request(kvm: s->kvm, level: irq >= 0); |
179 | } |
180 | |
181 | void kvm_pic_update_irq(struct kvm_pic *s) |
182 | { |
183 | pic_lock(s); |
184 | pic_update_irq(s); |
185 | pic_unlock(s); |
186 | } |
187 | |
188 | int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level) |
189 | { |
190 | int ret, irq_level; |
191 | |
192 | BUG_ON(irq < 0 || irq >= PIC_NUM_PINS); |
193 | |
194 | pic_lock(s); |
195 | irq_level = __kvm_irq_line_state(irq_state: &s->irq_states[irq], |
196 | irq_source_id, level); |
197 | ret = pic_set_irq1(s: &s->pics[irq >> 3], irq: irq & 7, level: irq_level); |
198 | pic_update_irq(s); |
199 | trace_kvm_pic_set_irq(chip: irq >> 3, pin: irq & 7, elcr: s->pics[irq >> 3].elcr, |
200 | imr: s->pics[irq >> 3].imr, coalesced: ret == 0); |
201 | pic_unlock(s); |
202 | |
203 | return ret; |
204 | } |
205 | |
206 | void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id) |
207 | { |
208 | int i; |
209 | |
210 | pic_lock(s); |
211 | for (i = 0; i < PIC_NUM_PINS; i++) |
212 | __clear_bit(irq_source_id, &s->irq_states[i]); |
213 | pic_unlock(s); |
214 | } |
215 | |
216 | /* |
217 | * acknowledge interrupt 'irq' |
218 | */ |
219 | static inline void pic_intack(struct kvm_kpic_state *s, int irq) |
220 | { |
221 | s->isr |= 1 << irq; |
222 | /* |
223 | * We don't clear a level sensitive interrupt here |
224 | */ |
225 | if (!(s->elcr & (1 << irq))) |
226 | s->irr &= ~(1 << irq); |
227 | |
228 | if (s->auto_eoi) { |
229 | if (s->rotate_on_auto_eoi) |
230 | s->priority_add = (irq + 1) & 7; |
231 | pic_clear_isr(s, irq); |
232 | } |
233 | |
234 | } |
235 | |
236 | int kvm_pic_read_irq(struct kvm *kvm) |
237 | { |
238 | int irq, irq2, intno; |
239 | struct kvm_pic *s = kvm->arch.vpic; |
240 | |
241 | s->output = 0; |
242 | |
243 | pic_lock(s); |
244 | irq = pic_get_irq(s: &s->pics[0]); |
245 | if (irq >= 0) { |
246 | pic_intack(s: &s->pics[0], irq); |
247 | if (irq == 2) { |
248 | irq2 = pic_get_irq(s: &s->pics[1]); |
249 | if (irq2 >= 0) |
250 | pic_intack(s: &s->pics[1], irq: irq2); |
251 | else |
252 | /* |
253 | * spurious IRQ on slave controller |
254 | */ |
255 | irq2 = 7; |
256 | intno = s->pics[1].irq_base + irq2; |
257 | } else |
258 | intno = s->pics[0].irq_base + irq; |
259 | } else { |
260 | /* |
261 | * spurious IRQ on host controller |
262 | */ |
263 | irq = 7; |
264 | intno = s->pics[0].irq_base + irq; |
265 | } |
266 | pic_update_irq(s); |
267 | pic_unlock(s); |
268 | |
269 | return intno; |
270 | } |
271 | |
272 | static void kvm_pic_reset(struct kvm_kpic_state *s) |
273 | { |
274 | int irq; |
275 | unsigned long i; |
276 | struct kvm_vcpu *vcpu; |
277 | u8 edge_irr = s->irr & ~s->elcr; |
278 | bool found = false; |
279 | |
280 | s->last_irr = 0; |
281 | s->irr &= s->elcr; |
282 | s->imr = 0; |
283 | s->priority_add = 0; |
284 | s->special_mask = 0; |
285 | s->read_reg_select = 0; |
286 | if (!s->init4) { |
287 | s->special_fully_nested_mode = 0; |
288 | s->auto_eoi = 0; |
289 | } |
290 | s->init_state = 1; |
291 | |
292 | kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) |
293 | if (kvm_apic_accept_pic_intr(vcpu)) { |
294 | found = true; |
295 | break; |
296 | } |
297 | |
298 | |
299 | if (!found) |
300 | return; |
301 | |
302 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) |
303 | if (edge_irr & (1 << irq)) |
304 | pic_clear_isr(s, irq); |
305 | } |
306 | |
307 | static void pic_ioport_write(void *opaque, u32 addr, u32 val) |
308 | { |
309 | struct kvm_kpic_state *s = opaque; |
310 | int priority, cmd, irq; |
311 | |
312 | addr &= 1; |
313 | if (addr == 0) { |
314 | if (val & 0x10) { |
315 | s->init4 = val & 1; |
316 | if (val & 0x02) |
317 | pr_pic_unimpl("single mode not supported" ); |
318 | if (val & 0x08) |
319 | pr_pic_unimpl( |
320 | "level sensitive irq not supported" ); |
321 | kvm_pic_reset(s); |
322 | } else if (val & 0x08) { |
323 | if (val & 0x04) |
324 | s->poll = 1; |
325 | if (val & 0x02) |
326 | s->read_reg_select = val & 1; |
327 | if (val & 0x40) |
328 | s->special_mask = (val >> 5) & 1; |
329 | } else { |
330 | cmd = val >> 5; |
331 | switch (cmd) { |
332 | case 0: |
333 | case 4: |
334 | s->rotate_on_auto_eoi = cmd >> 2; |
335 | break; |
336 | case 1: /* end of interrupt */ |
337 | case 5: |
338 | priority = get_priority(s, mask: s->isr); |
339 | if (priority != 8) { |
340 | irq = (priority + s->priority_add) & 7; |
341 | if (cmd == 5) |
342 | s->priority_add = (irq + 1) & 7; |
343 | pic_clear_isr(s, irq); |
344 | pic_update_irq(s: s->pics_state); |
345 | } |
346 | break; |
347 | case 3: |
348 | irq = val & 7; |
349 | pic_clear_isr(s, irq); |
350 | pic_update_irq(s: s->pics_state); |
351 | break; |
352 | case 6: |
353 | s->priority_add = (val + 1) & 7; |
354 | pic_update_irq(s: s->pics_state); |
355 | break; |
356 | case 7: |
357 | irq = val & 7; |
358 | s->priority_add = (irq + 1) & 7; |
359 | pic_clear_isr(s, irq); |
360 | pic_update_irq(s: s->pics_state); |
361 | break; |
362 | default: |
363 | break; /* no operation */ |
364 | } |
365 | } |
366 | } else |
367 | switch (s->init_state) { |
368 | case 0: { /* normal mode */ |
369 | u8 imr_diff = s->imr ^ val, |
370 | off = (s == &s->pics_state->pics[0]) ? 0 : 8; |
371 | s->imr = val; |
372 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) |
373 | if (imr_diff & (1 << irq)) |
374 | kvm_fire_mask_notifiers( |
375 | kvm: s->pics_state->kvm, |
376 | SELECT_PIC(irq + off), |
377 | pin: irq + off, |
378 | mask: !!(s->imr & (1 << irq))); |
379 | pic_update_irq(s: s->pics_state); |
380 | break; |
381 | } |
382 | case 1: |
383 | s->irq_base = val & 0xf8; |
384 | s->init_state = 2; |
385 | break; |
386 | case 2: |
387 | if (s->init4) |
388 | s->init_state = 3; |
389 | else |
390 | s->init_state = 0; |
391 | break; |
392 | case 3: |
393 | s->special_fully_nested_mode = (val >> 4) & 1; |
394 | s->auto_eoi = (val >> 1) & 1; |
395 | s->init_state = 0; |
396 | break; |
397 | } |
398 | } |
399 | |
400 | static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) |
401 | { |
402 | int ret; |
403 | |
404 | ret = pic_get_irq(s); |
405 | if (ret >= 0) { |
406 | if (addr1 >> 7) { |
407 | s->pics_state->pics[0].isr &= ~(1 << 2); |
408 | s->pics_state->pics[0].irr &= ~(1 << 2); |
409 | } |
410 | s->irr &= ~(1 << ret); |
411 | pic_clear_isr(s, irq: ret); |
412 | if (addr1 >> 7 || ret != 2) |
413 | pic_update_irq(s: s->pics_state); |
414 | /* Bit 7 is 1, means there's an interrupt */ |
415 | ret |= 0x80; |
416 | } else { |
417 | /* Bit 7 is 0, means there's no interrupt */ |
418 | ret = 0x07; |
419 | pic_update_irq(s: s->pics_state); |
420 | } |
421 | |
422 | return ret; |
423 | } |
424 | |
425 | static u32 pic_ioport_read(void *opaque, u32 addr) |
426 | { |
427 | struct kvm_kpic_state *s = opaque; |
428 | int ret; |
429 | |
430 | if (s->poll) { |
431 | ret = pic_poll_read(s, addr1: addr); |
432 | s->poll = 0; |
433 | } else |
434 | if ((addr & 1) == 0) |
435 | if (s->read_reg_select) |
436 | ret = s->isr; |
437 | else |
438 | ret = s->irr; |
439 | else |
440 | ret = s->imr; |
441 | return ret; |
442 | } |
443 | |
444 | static void elcr_ioport_write(void *opaque, u32 val) |
445 | { |
446 | struct kvm_kpic_state *s = opaque; |
447 | s->elcr = val & s->elcr_mask; |
448 | } |
449 | |
450 | static u32 elcr_ioport_read(void *opaque) |
451 | { |
452 | struct kvm_kpic_state *s = opaque; |
453 | return s->elcr; |
454 | } |
455 | |
456 | static int picdev_write(struct kvm_pic *s, |
457 | gpa_t addr, int len, const void *val) |
458 | { |
459 | unsigned char data = *(unsigned char *)val; |
460 | |
461 | if (len != 1) { |
462 | pr_pic_unimpl("non byte write\n" ); |
463 | return 0; |
464 | } |
465 | switch (addr) { |
466 | case 0x20: |
467 | case 0x21: |
468 | pic_lock(s); |
469 | pic_ioport_write(opaque: &s->pics[0], addr, val: data); |
470 | pic_unlock(s); |
471 | break; |
472 | case 0xa0: |
473 | case 0xa1: |
474 | pic_lock(s); |
475 | pic_ioport_write(opaque: &s->pics[1], addr, val: data); |
476 | pic_unlock(s); |
477 | break; |
478 | case 0x4d0: |
479 | case 0x4d1: |
480 | pic_lock(s); |
481 | elcr_ioport_write(opaque: &s->pics[addr & 1], val: data); |
482 | pic_unlock(s); |
483 | break; |
484 | default: |
485 | return -EOPNOTSUPP; |
486 | } |
487 | return 0; |
488 | } |
489 | |
490 | static int picdev_read(struct kvm_pic *s, |
491 | gpa_t addr, int len, void *val) |
492 | { |
493 | unsigned char *data = (unsigned char *)val; |
494 | |
495 | if (len != 1) { |
496 | memset(val, 0, len); |
497 | pr_pic_unimpl("non byte read\n" ); |
498 | return 0; |
499 | } |
500 | switch (addr) { |
501 | case 0x20: |
502 | case 0x21: |
503 | case 0xa0: |
504 | case 0xa1: |
505 | pic_lock(s); |
506 | *data = pic_ioport_read(opaque: &s->pics[addr >> 7], addr); |
507 | pic_unlock(s); |
508 | break; |
509 | case 0x4d0: |
510 | case 0x4d1: |
511 | pic_lock(s); |
512 | *data = elcr_ioport_read(opaque: &s->pics[addr & 1]); |
513 | pic_unlock(s); |
514 | break; |
515 | default: |
516 | return -EOPNOTSUPP; |
517 | } |
518 | return 0; |
519 | } |
520 | |
521 | static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
522 | gpa_t addr, int len, const void *val) |
523 | { |
524 | return picdev_write(container_of(dev, struct kvm_pic, dev_master), |
525 | addr, len, val); |
526 | } |
527 | |
528 | static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
529 | gpa_t addr, int len, void *val) |
530 | { |
531 | return picdev_read(container_of(dev, struct kvm_pic, dev_master), |
532 | addr, len, val); |
533 | } |
534 | |
535 | static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
536 | gpa_t addr, int len, const void *val) |
537 | { |
538 | return picdev_write(container_of(dev, struct kvm_pic, dev_slave), |
539 | addr, len, val); |
540 | } |
541 | |
542 | static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
543 | gpa_t addr, int len, void *val) |
544 | { |
545 | return picdev_read(container_of(dev, struct kvm_pic, dev_slave), |
546 | addr, len, val); |
547 | } |
548 | |
549 | static int picdev_elcr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
550 | gpa_t addr, int len, const void *val) |
551 | { |
552 | return picdev_write(container_of(dev, struct kvm_pic, dev_elcr), |
553 | addr, len, val); |
554 | } |
555 | |
556 | static int picdev_elcr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
557 | gpa_t addr, int len, void *val) |
558 | { |
559 | return picdev_read(container_of(dev, struct kvm_pic, dev_elcr), |
560 | addr, len, val); |
561 | } |
562 | |
563 | /* |
564 | * callback when PIC0 irq status changed |
565 | */ |
566 | static void pic_irq_request(struct kvm *kvm, int level) |
567 | { |
568 | struct kvm_pic *s = kvm->arch.vpic; |
569 | |
570 | if (!s->output) |
571 | s->wakeup_needed = true; |
572 | s->output = level; |
573 | } |
574 | |
575 | static const struct kvm_io_device_ops picdev_master_ops = { |
576 | .read = picdev_master_read, |
577 | .write = picdev_master_write, |
578 | }; |
579 | |
580 | static const struct kvm_io_device_ops picdev_slave_ops = { |
581 | .read = picdev_slave_read, |
582 | .write = picdev_slave_write, |
583 | }; |
584 | |
585 | static const struct kvm_io_device_ops picdev_elcr_ops = { |
586 | .read = picdev_elcr_read, |
587 | .write = picdev_elcr_write, |
588 | }; |
589 | |
590 | int kvm_pic_init(struct kvm *kvm) |
591 | { |
592 | struct kvm_pic *s; |
593 | int ret; |
594 | |
595 | s = kzalloc(size: sizeof(struct kvm_pic), GFP_KERNEL_ACCOUNT); |
596 | if (!s) |
597 | return -ENOMEM; |
598 | spin_lock_init(&s->lock); |
599 | s->kvm = kvm; |
600 | s->pics[0].elcr_mask = 0xf8; |
601 | s->pics[1].elcr_mask = 0xde; |
602 | s->pics[0].pics_state = s; |
603 | s->pics[1].pics_state = s; |
604 | |
605 | /* |
606 | * Initialize PIO device |
607 | */ |
608 | kvm_iodevice_init(dev: &s->dev_master, ops: &picdev_master_ops); |
609 | kvm_iodevice_init(dev: &s->dev_slave, ops: &picdev_slave_ops); |
610 | kvm_iodevice_init(dev: &s->dev_elcr, ops: &picdev_elcr_ops); |
611 | mutex_lock(&kvm->slots_lock); |
612 | ret = kvm_io_bus_register_dev(kvm, bus_idx: KVM_PIO_BUS, addr: 0x20, len: 2, |
613 | dev: &s->dev_master); |
614 | if (ret < 0) |
615 | goto fail_unlock; |
616 | |
617 | ret = kvm_io_bus_register_dev(kvm, bus_idx: KVM_PIO_BUS, addr: 0xa0, len: 2, dev: &s->dev_slave); |
618 | if (ret < 0) |
619 | goto fail_unreg_2; |
620 | |
621 | ret = kvm_io_bus_register_dev(kvm, bus_idx: KVM_PIO_BUS, addr: 0x4d0, len: 2, dev: &s->dev_elcr); |
622 | if (ret < 0) |
623 | goto fail_unreg_1; |
624 | |
625 | mutex_unlock(lock: &kvm->slots_lock); |
626 | |
627 | kvm->arch.vpic = s; |
628 | |
629 | return 0; |
630 | |
631 | fail_unreg_1: |
632 | kvm_io_bus_unregister_dev(kvm, bus_idx: KVM_PIO_BUS, dev: &s->dev_slave); |
633 | |
634 | fail_unreg_2: |
635 | kvm_io_bus_unregister_dev(kvm, bus_idx: KVM_PIO_BUS, dev: &s->dev_master); |
636 | |
637 | fail_unlock: |
638 | mutex_unlock(lock: &kvm->slots_lock); |
639 | |
640 | kfree(objp: s); |
641 | |
642 | return ret; |
643 | } |
644 | |
645 | void kvm_pic_destroy(struct kvm *kvm) |
646 | { |
647 | struct kvm_pic *vpic = kvm->arch.vpic; |
648 | |
649 | if (!vpic) |
650 | return; |
651 | |
652 | mutex_lock(&kvm->slots_lock); |
653 | kvm_io_bus_unregister_dev(kvm: vpic->kvm, bus_idx: KVM_PIO_BUS, dev: &vpic->dev_master); |
654 | kvm_io_bus_unregister_dev(kvm: vpic->kvm, bus_idx: KVM_PIO_BUS, dev: &vpic->dev_slave); |
655 | kvm_io_bus_unregister_dev(kvm: vpic->kvm, bus_idx: KVM_PIO_BUS, dev: &vpic->dev_elcr); |
656 | mutex_unlock(lock: &kvm->slots_lock); |
657 | |
658 | kvm->arch.vpic = NULL; |
659 | kfree(objp: vpic); |
660 | } |
661 | |