1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | |
23 | #include <linux/mm_types.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/types.h> |
26 | #include <linux/sched/signal.h> |
27 | #include <linux/sched/mm.h> |
28 | #include <linux/uaccess.h> |
29 | #include <linux/mman.h> |
30 | #include <linux/memory.h> |
31 | #include "kfd_priv.h" |
32 | #include "kfd_events.h" |
33 | #include "kfd_iommu.h" |
34 | #include <linux/device.h> |
35 | |
36 | /* |
37 | * Wrapper around wait_queue_entry_t |
38 | */ |
39 | struct kfd_event_waiter { |
40 | wait_queue_entry_t wait; |
41 | struct kfd_event *event; /* Event to wait for */ |
42 | bool activated; /* Becomes true when event is signaled */ |
43 | }; |
44 | |
45 | /* |
46 | * Each signal event needs a 64-bit signal slot where the signaler will write |
47 | * a 1 before sending an interrupt. (This is needed because some interrupts |
48 | * do not contain enough spare data bits to identify an event.) |
49 | * We get whole pages and map them to the process VA. |
50 | * Individual signal events use their event_id as slot index. |
51 | */ |
52 | struct kfd_signal_page { |
53 | uint64_t *kernel_address; |
54 | uint64_t __user *user_address; |
55 | bool need_to_free_pages; |
56 | }; |
57 | |
58 | |
59 | static uint64_t *page_slots(struct kfd_signal_page *page) |
60 | { |
61 | return page->kernel_address; |
62 | } |
63 | |
64 | static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p) |
65 | { |
66 | void *backing_store; |
67 | struct kfd_signal_page *page; |
68 | |
69 | page = kzalloc(sizeof(*page), GFP_KERNEL); |
70 | if (!page) |
71 | return NULL; |
72 | |
73 | backing_store = (void *) __get_free_pages(GFP_KERNEL, |
74 | get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); |
75 | if (!backing_store) |
76 | goto fail_alloc_signal_store; |
77 | |
78 | /* Initialize all events to unsignaled */ |
79 | memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT, |
80 | KFD_SIGNAL_EVENT_LIMIT * 8); |
81 | |
82 | page->kernel_address = backing_store; |
83 | page->need_to_free_pages = true; |
84 | pr_debug("Allocated new event signal page at %p, for process %p\n" , |
85 | page, p); |
86 | |
87 | return page; |
88 | |
89 | fail_alloc_signal_store: |
90 | kfree(page); |
91 | return NULL; |
92 | } |
93 | |
94 | static int allocate_event_notification_slot(struct kfd_process *p, |
95 | struct kfd_event *ev) |
96 | { |
97 | int id; |
98 | |
99 | if (!p->signal_page) { |
100 | p->signal_page = allocate_signal_page(p); |
101 | if (!p->signal_page) |
102 | return -ENOMEM; |
103 | /* Oldest user mode expects 256 event slots */ |
104 | p->signal_mapped_size = 256*8; |
105 | } |
106 | |
107 | /* |
108 | * Compatibility with old user mode: Only use signal slots |
109 | * user mode has mapped, may be less than |
110 | * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase |
111 | * of the event limit without breaking user mode. |
112 | */ |
113 | id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8, |
114 | GFP_KERNEL); |
115 | if (id < 0) |
116 | return id; |
117 | |
118 | ev->event_id = id; |
119 | page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT; |
120 | |
121 | return 0; |
122 | } |
123 | |
124 | /* |
125 | * Assumes that p->event_mutex is held and of course that p is not going |
126 | * away (current or locked). |
127 | */ |
128 | static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id) |
129 | { |
130 | return idr_find(&p->event_idr, id); |
131 | } |
132 | |
133 | /** |
134 | * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID |
135 | * @p: Pointer to struct kfd_process |
136 | * @id: ID to look up |
137 | * @bits: Number of valid bits in @id |
138 | * |
139 | * Finds the first signaled event with a matching partial ID. If no |
140 | * matching signaled event is found, returns NULL. In that case the |
141 | * caller should assume that the partial ID is invalid and do an |
142 | * exhaustive search of all siglaned events. |
143 | * |
144 | * If multiple events with the same partial ID signal at the same |
145 | * time, they will be found one interrupt at a time, not necessarily |
146 | * in the same order the interrupts occurred. As long as the number of |
147 | * interrupts is correct, all signaled events will be seen by the |
148 | * driver. |
149 | */ |
150 | static struct kfd_event *lookup_signaled_event_by_partial_id( |
151 | struct kfd_process *p, uint32_t id, uint32_t bits) |
152 | { |
153 | struct kfd_event *ev; |
154 | |
155 | if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT) |
156 | return NULL; |
157 | |
158 | /* Fast path for the common case that @id is not a partial ID |
159 | * and we only need a single lookup. |
160 | */ |
161 | if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) { |
162 | if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) |
163 | return NULL; |
164 | |
165 | return idr_find(&p->event_idr, id); |
166 | } |
167 | |
168 | /* General case for partial IDs: Iterate over all matching IDs |
169 | * and find the first one that has signaled. |
170 | */ |
171 | for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) { |
172 | if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT) |
173 | continue; |
174 | |
175 | ev = idr_find(&p->event_idr, id); |
176 | } |
177 | |
178 | return ev; |
179 | } |
180 | |
181 | static int create_signal_event(struct file *devkfd, |
182 | struct kfd_process *p, |
183 | struct kfd_event *ev) |
184 | { |
185 | int ret; |
186 | |
187 | if (p->signal_mapped_size && |
188 | p->signal_event_count == p->signal_mapped_size / 8) { |
189 | if (!p->signal_event_limit_reached) { |
190 | pr_warn("Signal event wasn't created because limit was reached\n" ); |
191 | p->signal_event_limit_reached = true; |
192 | } |
193 | return -ENOSPC; |
194 | } |
195 | |
196 | ret = allocate_event_notification_slot(p, ev); |
197 | if (ret) { |
198 | pr_warn("Signal event wasn't created because out of kernel memory\n" ); |
199 | return ret; |
200 | } |
201 | |
202 | p->signal_event_count++; |
203 | |
204 | ev->user_signal_address = &p->signal_page->user_address[ev->event_id]; |
205 | pr_debug("Signal event number %zu created with id %d, address %p\n" , |
206 | p->signal_event_count, ev->event_id, |
207 | ev->user_signal_address); |
208 | |
209 | return 0; |
210 | } |
211 | |
212 | static int create_other_event(struct kfd_process *p, struct kfd_event *ev) |
213 | { |
214 | /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an |
215 | * intentional integer overflow to -1 without a compiler |
216 | * warning. idr_alloc treats a negative value as "maximum |
217 | * signed integer". |
218 | */ |
219 | int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID, |
220 | (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1, |
221 | GFP_KERNEL); |
222 | |
223 | if (id < 0) |
224 | return id; |
225 | ev->event_id = id; |
226 | |
227 | return 0; |
228 | } |
229 | |
230 | void kfd_event_init_process(struct kfd_process *p) |
231 | { |
232 | mutex_init(&p->event_mutex); |
233 | idr_init(&p->event_idr); |
234 | p->signal_page = NULL; |
235 | p->signal_event_count = 0; |
236 | } |
237 | |
238 | static void destroy_event(struct kfd_process *p, struct kfd_event *ev) |
239 | { |
240 | struct kfd_event_waiter *waiter; |
241 | |
242 | /* Wake up pending waiters. They will return failure */ |
243 | list_for_each_entry(waiter, &ev->wq.head, wait.entry) |
244 | waiter->event = NULL; |
245 | wake_up_all(&ev->wq); |
246 | |
247 | if (ev->type == KFD_EVENT_TYPE_SIGNAL || |
248 | ev->type == KFD_EVENT_TYPE_DEBUG) |
249 | p->signal_event_count--; |
250 | |
251 | idr_remove(&p->event_idr, ev->event_id); |
252 | kfree(ev); |
253 | } |
254 | |
255 | static void destroy_events(struct kfd_process *p) |
256 | { |
257 | struct kfd_event *ev; |
258 | uint32_t id; |
259 | |
260 | idr_for_each_entry(&p->event_idr, ev, id) |
261 | destroy_event(p, ev); |
262 | idr_destroy(&p->event_idr); |
263 | } |
264 | |
265 | /* |
266 | * We assume that the process is being destroyed and there is no need to |
267 | * unmap the pages or keep bookkeeping data in order. |
268 | */ |
269 | static void shutdown_signal_page(struct kfd_process *p) |
270 | { |
271 | struct kfd_signal_page *page = p->signal_page; |
272 | |
273 | if (page) { |
274 | if (page->need_to_free_pages) |
275 | free_pages((unsigned long)page->kernel_address, |
276 | get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); |
277 | kfree(page); |
278 | } |
279 | } |
280 | |
281 | void kfd_event_free_process(struct kfd_process *p) |
282 | { |
283 | destroy_events(p); |
284 | shutdown_signal_page(p); |
285 | } |
286 | |
287 | static bool event_can_be_gpu_signaled(const struct kfd_event *ev) |
288 | { |
289 | return ev->type == KFD_EVENT_TYPE_SIGNAL || |
290 | ev->type == KFD_EVENT_TYPE_DEBUG; |
291 | } |
292 | |
293 | static bool event_can_be_cpu_signaled(const struct kfd_event *ev) |
294 | { |
295 | return ev->type == KFD_EVENT_TYPE_SIGNAL; |
296 | } |
297 | |
298 | int kfd_event_page_set(struct kfd_process *p, void *kernel_address, |
299 | uint64_t size) |
300 | { |
301 | struct kfd_signal_page *page; |
302 | |
303 | if (p->signal_page) |
304 | return -EBUSY; |
305 | |
306 | page = kzalloc(sizeof(*page), GFP_KERNEL); |
307 | if (!page) |
308 | return -ENOMEM; |
309 | |
310 | /* Initialize all events to unsignaled */ |
311 | memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT, |
312 | KFD_SIGNAL_EVENT_LIMIT * 8); |
313 | |
314 | page->kernel_address = kernel_address; |
315 | |
316 | p->signal_page = page; |
317 | p->signal_mapped_size = size; |
318 | |
319 | return 0; |
320 | } |
321 | |
322 | int kfd_event_create(struct file *devkfd, struct kfd_process *p, |
323 | uint32_t event_type, bool auto_reset, uint32_t node_id, |
324 | uint32_t *event_id, uint32_t *event_trigger_data, |
325 | uint64_t *event_page_offset, uint32_t *event_slot_index) |
326 | { |
327 | int ret = 0; |
328 | struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
329 | |
330 | if (!ev) |
331 | return -ENOMEM; |
332 | |
333 | ev->type = event_type; |
334 | ev->auto_reset = auto_reset; |
335 | ev->signaled = false; |
336 | |
337 | init_waitqueue_head(&ev->wq); |
338 | |
339 | *event_page_offset = 0; |
340 | |
341 | mutex_lock(&p->event_mutex); |
342 | |
343 | switch (event_type) { |
344 | case KFD_EVENT_TYPE_SIGNAL: |
345 | case KFD_EVENT_TYPE_DEBUG: |
346 | ret = create_signal_event(devkfd, p, ev); |
347 | if (!ret) { |
348 | *event_page_offset = KFD_MMAP_TYPE_EVENTS; |
349 | *event_page_offset <<= PAGE_SHIFT; |
350 | *event_slot_index = ev->event_id; |
351 | } |
352 | break; |
353 | default: |
354 | ret = create_other_event(p, ev); |
355 | break; |
356 | } |
357 | |
358 | if (!ret) { |
359 | *event_id = ev->event_id; |
360 | *event_trigger_data = ev->event_id; |
361 | } else { |
362 | kfree(ev); |
363 | } |
364 | |
365 | mutex_unlock(&p->event_mutex); |
366 | |
367 | return ret; |
368 | } |
369 | |
370 | /* Assumes that p is current. */ |
371 | int kfd_event_destroy(struct kfd_process *p, uint32_t event_id) |
372 | { |
373 | struct kfd_event *ev; |
374 | int ret = 0; |
375 | |
376 | mutex_lock(&p->event_mutex); |
377 | |
378 | ev = lookup_event_by_id(p, event_id); |
379 | |
380 | if (ev) |
381 | destroy_event(p, ev); |
382 | else |
383 | ret = -EINVAL; |
384 | |
385 | mutex_unlock(&p->event_mutex); |
386 | return ret; |
387 | } |
388 | |
389 | static void set_event(struct kfd_event *ev) |
390 | { |
391 | struct kfd_event_waiter *waiter; |
392 | |
393 | /* Auto reset if the list is non-empty and we're waking |
394 | * someone. waitqueue_active is safe here because we're |
395 | * protected by the p->event_mutex, which is also held when |
396 | * updating the wait queues in kfd_wait_on_events. |
397 | */ |
398 | ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); |
399 | |
400 | list_for_each_entry(waiter, &ev->wq.head, wait.entry) |
401 | waiter->activated = true; |
402 | |
403 | wake_up_all(&ev->wq); |
404 | } |
405 | |
406 | /* Assumes that p is current. */ |
407 | int kfd_set_event(struct kfd_process *p, uint32_t event_id) |
408 | { |
409 | int ret = 0; |
410 | struct kfd_event *ev; |
411 | |
412 | mutex_lock(&p->event_mutex); |
413 | |
414 | ev = lookup_event_by_id(p, event_id); |
415 | |
416 | if (ev && event_can_be_cpu_signaled(ev)) |
417 | set_event(ev); |
418 | else |
419 | ret = -EINVAL; |
420 | |
421 | mutex_unlock(&p->event_mutex); |
422 | return ret; |
423 | } |
424 | |
425 | static void reset_event(struct kfd_event *ev) |
426 | { |
427 | ev->signaled = false; |
428 | } |
429 | |
430 | /* Assumes that p is current. */ |
431 | int kfd_reset_event(struct kfd_process *p, uint32_t event_id) |
432 | { |
433 | int ret = 0; |
434 | struct kfd_event *ev; |
435 | |
436 | mutex_lock(&p->event_mutex); |
437 | |
438 | ev = lookup_event_by_id(p, event_id); |
439 | |
440 | if (ev && event_can_be_cpu_signaled(ev)) |
441 | reset_event(ev); |
442 | else |
443 | ret = -EINVAL; |
444 | |
445 | mutex_unlock(&p->event_mutex); |
446 | return ret; |
447 | |
448 | } |
449 | |
450 | static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev) |
451 | { |
452 | page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT; |
453 | } |
454 | |
455 | static void set_event_from_interrupt(struct kfd_process *p, |
456 | struct kfd_event *ev) |
457 | { |
458 | if (ev && event_can_be_gpu_signaled(ev)) { |
459 | acknowledge_signal(p, ev); |
460 | set_event(ev); |
461 | } |
462 | } |
463 | |
464 | void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, |
465 | uint32_t valid_id_bits) |
466 | { |
467 | struct kfd_event *ev = NULL; |
468 | |
469 | /* |
470 | * Because we are called from arbitrary context (workqueue) as opposed |
471 | * to process context, kfd_process could attempt to exit while we are |
472 | * running so the lookup function increments the process ref count. |
473 | */ |
474 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
475 | |
476 | if (!p) |
477 | return; /* Presumably process exited. */ |
478 | |
479 | mutex_lock(&p->event_mutex); |
480 | |
481 | if (valid_id_bits) |
482 | ev = lookup_signaled_event_by_partial_id(p, partial_id, |
483 | valid_id_bits); |
484 | if (ev) { |
485 | set_event_from_interrupt(p, ev); |
486 | } else if (p->signal_page) { |
487 | /* |
488 | * Partial ID lookup failed. Assume that the event ID |
489 | * in the interrupt payload was invalid and do an |
490 | * exhaustive search of signaled events. |
491 | */ |
492 | uint64_t *slots = page_slots(p->signal_page); |
493 | uint32_t id; |
494 | |
495 | if (valid_id_bits) |
496 | pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n" , |
497 | partial_id, valid_id_bits); |
498 | |
499 | if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { |
500 | /* With relatively few events, it's faster to |
501 | * iterate over the event IDR |
502 | */ |
503 | idr_for_each_entry(&p->event_idr, ev, id) { |
504 | if (id >= KFD_SIGNAL_EVENT_LIMIT) |
505 | break; |
506 | |
507 | if (slots[id] != UNSIGNALED_EVENT_SLOT) |
508 | set_event_from_interrupt(p, ev); |
509 | } |
510 | } else { |
511 | /* With relatively many events, it's faster to |
512 | * iterate over the signal slots and lookup |
513 | * only signaled events from the IDR. |
514 | */ |
515 | for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++) |
516 | if (slots[id] != UNSIGNALED_EVENT_SLOT) { |
517 | ev = lookup_event_by_id(p, id); |
518 | set_event_from_interrupt(p, ev); |
519 | } |
520 | } |
521 | } |
522 | |
523 | mutex_unlock(&p->event_mutex); |
524 | kfd_unref_process(p); |
525 | } |
526 | |
527 | static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) |
528 | { |
529 | struct kfd_event_waiter *event_waiters; |
530 | uint32_t i; |
531 | |
532 | event_waiters = kmalloc_array(num_events, |
533 | sizeof(struct kfd_event_waiter), |
534 | GFP_KERNEL); |
535 | |
536 | for (i = 0; (event_waiters) && (i < num_events) ; i++) { |
537 | init_wait(&event_waiters[i].wait); |
538 | event_waiters[i].activated = false; |
539 | } |
540 | |
541 | return event_waiters; |
542 | } |
543 | |
544 | static int init_event_waiter_get_status(struct kfd_process *p, |
545 | struct kfd_event_waiter *waiter, |
546 | uint32_t event_id) |
547 | { |
548 | struct kfd_event *ev = lookup_event_by_id(p, event_id); |
549 | |
550 | if (!ev) |
551 | return -EINVAL; |
552 | |
553 | waiter->event = ev; |
554 | waiter->activated = ev->signaled; |
555 | ev->signaled = ev->signaled && !ev->auto_reset; |
556 | |
557 | return 0; |
558 | } |
559 | |
560 | static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter) |
561 | { |
562 | struct kfd_event *ev = waiter->event; |
563 | |
564 | /* Only add to the wait list if we actually need to |
565 | * wait on this event. |
566 | */ |
567 | if (!waiter->activated) |
568 | add_wait_queue(&ev->wq, &waiter->wait); |
569 | } |
570 | |
571 | /* test_event_condition - Test condition of events being waited for |
572 | * @all: Return completion only if all events have signaled |
573 | * @num_events: Number of events to wait for |
574 | * @event_waiters: Array of event waiters, one per event |
575 | * |
576 | * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have |
577 | * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all) |
578 | * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of |
579 | * the events have been destroyed. |
580 | */ |
581 | static uint32_t test_event_condition(bool all, uint32_t num_events, |
582 | struct kfd_event_waiter *event_waiters) |
583 | { |
584 | uint32_t i; |
585 | uint32_t activated_count = 0; |
586 | |
587 | for (i = 0; i < num_events; i++) { |
588 | if (!event_waiters[i].event) |
589 | return KFD_IOC_WAIT_RESULT_FAIL; |
590 | |
591 | if (event_waiters[i].activated) { |
592 | if (!all) |
593 | return KFD_IOC_WAIT_RESULT_COMPLETE; |
594 | |
595 | activated_count++; |
596 | } |
597 | } |
598 | |
599 | return activated_count == num_events ? |
600 | KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT; |
601 | } |
602 | |
603 | /* |
604 | * Copy event specific data, if defined. |
605 | * Currently only memory exception events have additional data to copy to user |
606 | */ |
607 | static int copy_signaled_event_data(uint32_t num_events, |
608 | struct kfd_event_waiter *event_waiters, |
609 | struct kfd_event_data __user *data) |
610 | { |
611 | struct kfd_hsa_memory_exception_data *src; |
612 | struct kfd_hsa_memory_exception_data __user *dst; |
613 | struct kfd_event_waiter *waiter; |
614 | struct kfd_event *event; |
615 | uint32_t i; |
616 | |
617 | for (i = 0; i < num_events; i++) { |
618 | waiter = &event_waiters[i]; |
619 | event = waiter->event; |
620 | if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { |
621 | dst = &data[i].memory_exception_data; |
622 | src = &event->memory_exception_data; |
623 | if (copy_to_user(dst, src, |
624 | sizeof(struct kfd_hsa_memory_exception_data))) |
625 | return -EFAULT; |
626 | } |
627 | } |
628 | |
629 | return 0; |
630 | |
631 | } |
632 | |
633 | |
634 | |
635 | static long user_timeout_to_jiffies(uint32_t user_timeout_ms) |
636 | { |
637 | if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE) |
638 | return 0; |
639 | |
640 | if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE) |
641 | return MAX_SCHEDULE_TIMEOUT; |
642 | |
643 | /* |
644 | * msecs_to_jiffies interprets all values above 2^31-1 as infinite, |
645 | * but we consider them finite. |
646 | * This hack is wrong, but nobody is likely to notice. |
647 | */ |
648 | user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF); |
649 | |
650 | return msecs_to_jiffies(user_timeout_ms) + 1; |
651 | } |
652 | |
653 | static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) |
654 | { |
655 | uint32_t i; |
656 | |
657 | for (i = 0; i < num_events; i++) |
658 | if (waiters[i].event) |
659 | remove_wait_queue(&waiters[i].event->wq, |
660 | &waiters[i].wait); |
661 | |
662 | kfree(waiters); |
663 | } |
664 | |
665 | int kfd_wait_on_events(struct kfd_process *p, |
666 | uint32_t num_events, void __user *data, |
667 | bool all, uint32_t user_timeout_ms, |
668 | uint32_t *wait_result) |
669 | { |
670 | struct kfd_event_data __user *events = |
671 | (struct kfd_event_data __user *) data; |
672 | uint32_t i; |
673 | int ret = 0; |
674 | |
675 | struct kfd_event_waiter *event_waiters = NULL; |
676 | long timeout = user_timeout_to_jiffies(user_timeout_ms); |
677 | |
678 | event_waiters = alloc_event_waiters(num_events); |
679 | if (!event_waiters) { |
680 | ret = -ENOMEM; |
681 | goto out; |
682 | } |
683 | |
684 | mutex_lock(&p->event_mutex); |
685 | |
686 | for (i = 0; i < num_events; i++) { |
687 | struct kfd_event_data event_data; |
688 | |
689 | if (copy_from_user(&event_data, &events[i], |
690 | sizeof(struct kfd_event_data))) { |
691 | ret = -EFAULT; |
692 | goto out_unlock; |
693 | } |
694 | |
695 | ret = init_event_waiter_get_status(p, &event_waiters[i], |
696 | event_data.event_id); |
697 | if (ret) |
698 | goto out_unlock; |
699 | } |
700 | |
701 | /* Check condition once. */ |
702 | *wait_result = test_event_condition(all, num_events, event_waiters); |
703 | if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) { |
704 | ret = copy_signaled_event_data(num_events, |
705 | event_waiters, events); |
706 | goto out_unlock; |
707 | } else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) { |
708 | /* This should not happen. Events shouldn't be |
709 | * destroyed while we're holding the event_mutex |
710 | */ |
711 | goto out_unlock; |
712 | } |
713 | |
714 | /* Add to wait lists if we need to wait. */ |
715 | for (i = 0; i < num_events; i++) |
716 | init_event_waiter_add_to_waitlist(&event_waiters[i]); |
717 | |
718 | mutex_unlock(&p->event_mutex); |
719 | |
720 | while (true) { |
721 | if (fatal_signal_pending(current)) { |
722 | ret = -EINTR; |
723 | break; |
724 | } |
725 | |
726 | if (signal_pending(current)) { |
727 | /* |
728 | * This is wrong when a nonzero, non-infinite timeout |
729 | * is specified. We need to use |
730 | * ERESTARTSYS_RESTARTBLOCK, but struct restart_block |
731 | * contains a union with data for each user and it's |
732 | * in generic kernel code that I don't want to |
733 | * touch yet. |
734 | */ |
735 | ret = -ERESTARTSYS; |
736 | break; |
737 | } |
738 | |
739 | /* Set task state to interruptible sleep before |
740 | * checking wake-up conditions. A concurrent wake-up |
741 | * will put the task back into runnable state. In that |
742 | * case schedule_timeout will not put the task to |
743 | * sleep and we'll get a chance to re-check the |
744 | * updated conditions almost immediately. Otherwise, |
745 | * this race condition would lead to a soft hang or a |
746 | * very long sleep. |
747 | */ |
748 | set_current_state(TASK_INTERRUPTIBLE); |
749 | |
750 | *wait_result = test_event_condition(all, num_events, |
751 | event_waiters); |
752 | if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT) |
753 | break; |
754 | |
755 | if (timeout <= 0) |
756 | break; |
757 | |
758 | timeout = schedule_timeout(timeout); |
759 | } |
760 | __set_current_state(TASK_RUNNING); |
761 | |
762 | /* copy_signaled_event_data may sleep. So this has to happen |
763 | * after the task state is set back to RUNNING. |
764 | */ |
765 | if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) |
766 | ret = copy_signaled_event_data(num_events, |
767 | event_waiters, events); |
768 | |
769 | mutex_lock(&p->event_mutex); |
770 | out_unlock: |
771 | free_waiters(num_events, event_waiters); |
772 | mutex_unlock(&p->event_mutex); |
773 | out: |
774 | if (ret) |
775 | *wait_result = KFD_IOC_WAIT_RESULT_FAIL; |
776 | else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL) |
777 | ret = -EIO; |
778 | |
779 | return ret; |
780 | } |
781 | |
782 | int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma) |
783 | { |
784 | unsigned long pfn; |
785 | struct kfd_signal_page *page; |
786 | int ret; |
787 | |
788 | /* check required size doesn't exceed the allocated size */ |
789 | if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) < |
790 | get_order(vma->vm_end - vma->vm_start)) { |
791 | pr_err("Event page mmap requested illegal size\n" ); |
792 | return -EINVAL; |
793 | } |
794 | |
795 | page = p->signal_page; |
796 | if (!page) { |
797 | /* Probably KFD bug, but mmap is user-accessible. */ |
798 | pr_debug("Signal page could not be found\n" ); |
799 | return -EINVAL; |
800 | } |
801 | |
802 | pfn = __pa(page->kernel_address); |
803 | pfn >>= PAGE_SHIFT; |
804 | |
805 | vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
806 | | VM_DONTDUMP | VM_PFNMAP; |
807 | |
808 | pr_debug("Mapping signal page\n" ); |
809 | pr_debug(" start user address == 0x%08lx\n" , vma->vm_start); |
810 | pr_debug(" end user address == 0x%08lx\n" , vma->vm_end); |
811 | pr_debug(" pfn == 0x%016lX\n" , pfn); |
812 | pr_debug(" vm_flags == 0x%08lX\n" , vma->vm_flags); |
813 | pr_debug(" size == 0x%08lX\n" , |
814 | vma->vm_end - vma->vm_start); |
815 | |
816 | page->user_address = (uint64_t __user *)vma->vm_start; |
817 | |
818 | /* mapping the page to user process */ |
819 | ret = remap_pfn_range(vma, vma->vm_start, pfn, |
820 | vma->vm_end - vma->vm_start, vma->vm_page_prot); |
821 | if (!ret) |
822 | p->signal_mapped_size = vma->vm_end - vma->vm_start; |
823 | |
824 | return ret; |
825 | } |
826 | |
827 | /* |
828 | * Assumes that p->event_mutex is held and of course |
829 | * that p is not going away (current or locked). |
830 | */ |
831 | static void lookup_events_by_type_and_signal(struct kfd_process *p, |
832 | int type, void *event_data) |
833 | { |
834 | struct kfd_hsa_memory_exception_data *ev_data; |
835 | struct kfd_event *ev; |
836 | uint32_t id; |
837 | bool send_signal = true; |
838 | |
839 | ev_data = (struct kfd_hsa_memory_exception_data *) event_data; |
840 | |
841 | id = KFD_FIRST_NONSIGNAL_EVENT_ID; |
842 | idr_for_each_entry_continue(&p->event_idr, ev, id) |
843 | if (ev->type == type) { |
844 | send_signal = false; |
845 | dev_dbg(kfd_device, |
846 | "Event found: id %X type %d" , |
847 | ev->event_id, ev->type); |
848 | set_event(ev); |
849 | if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data) |
850 | ev->memory_exception_data = *ev_data; |
851 | } |
852 | |
853 | if (type == KFD_EVENT_TYPE_MEMORY) { |
854 | dev_warn(kfd_device, |
855 | "Sending SIGSEGV to HSA Process with PID %d " , |
856 | p->lead_thread->pid); |
857 | send_sig(SIGSEGV, p->lead_thread, 0); |
858 | } |
859 | |
860 | /* Send SIGTERM no event of type "type" has been found*/ |
861 | if (send_signal) { |
862 | if (send_sigterm) { |
863 | dev_warn(kfd_device, |
864 | "Sending SIGTERM to HSA Process with PID %d " , |
865 | p->lead_thread->pid); |
866 | send_sig(SIGTERM, p->lead_thread, 0); |
867 | } else { |
868 | dev_err(kfd_device, |
869 | "HSA Process (PID %d) got unhandled exception" , |
870 | p->lead_thread->pid); |
871 | } |
872 | } |
873 | } |
874 | |
875 | #ifdef KFD_SUPPORT_IOMMU_V2 |
876 | void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, |
877 | unsigned long address, bool is_write_requested, |
878 | bool is_execute_requested) |
879 | { |
880 | struct kfd_hsa_memory_exception_data memory_exception_data; |
881 | struct vm_area_struct *vma; |
882 | |
883 | /* |
884 | * Because we are called from arbitrary context (workqueue) as opposed |
885 | * to process context, kfd_process could attempt to exit while we are |
886 | * running so the lookup function increments the process ref count. |
887 | */ |
888 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
889 | struct mm_struct *mm; |
890 | |
891 | if (!p) |
892 | return; /* Presumably process exited. */ |
893 | |
894 | /* Take a safe reference to the mm_struct, which may otherwise |
895 | * disappear even while the kfd_process is still referenced. |
896 | */ |
897 | mm = get_task_mm(p->lead_thread); |
898 | if (!mm) { |
899 | kfd_unref_process(p); |
900 | return; /* Process is exiting */ |
901 | } |
902 | |
903 | memset(&memory_exception_data, 0, sizeof(memory_exception_data)); |
904 | |
905 | down_read(&mm->mmap_sem); |
906 | vma = find_vma(mm, address); |
907 | |
908 | memory_exception_data.gpu_id = dev->id; |
909 | memory_exception_data.va = address; |
910 | /* Set failure reason */ |
911 | memory_exception_data.failure.NotPresent = 1; |
912 | memory_exception_data.failure.NoExecute = 0; |
913 | memory_exception_data.failure.ReadOnly = 0; |
914 | if (vma && address >= vma->vm_start) { |
915 | memory_exception_data.failure.NotPresent = 0; |
916 | |
917 | if (is_write_requested && !(vma->vm_flags & VM_WRITE)) |
918 | memory_exception_data.failure.ReadOnly = 1; |
919 | else |
920 | memory_exception_data.failure.ReadOnly = 0; |
921 | |
922 | if (is_execute_requested && !(vma->vm_flags & VM_EXEC)) |
923 | memory_exception_data.failure.NoExecute = 1; |
924 | else |
925 | memory_exception_data.failure.NoExecute = 0; |
926 | } |
927 | |
928 | up_read(&mm->mmap_sem); |
929 | mmput(mm); |
930 | |
931 | pr_debug("notpresent %d, noexecute %d, readonly %d\n" , |
932 | memory_exception_data.failure.NotPresent, |
933 | memory_exception_data.failure.NoExecute, |
934 | memory_exception_data.failure.ReadOnly); |
935 | |
936 | /* Workaround on Raven to not kill the process when memory is freed |
937 | * before IOMMU is able to finish processing all the excessive PPRs |
938 | */ |
939 | if (dev->device_info->asic_family != CHIP_RAVEN) { |
940 | mutex_lock(&p->event_mutex); |
941 | |
942 | /* Lookup events by type and signal them */ |
943 | lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY, |
944 | &memory_exception_data); |
945 | |
946 | mutex_unlock(&p->event_mutex); |
947 | } |
948 | |
949 | kfd_unref_process(p); |
950 | } |
951 | #endif /* KFD_SUPPORT_IOMMU_V2 */ |
952 | |
953 | void kfd_signal_hw_exception_event(unsigned int pasid) |
954 | { |
955 | /* |
956 | * Because we are called from arbitrary context (workqueue) as opposed |
957 | * to process context, kfd_process could attempt to exit while we are |
958 | * running so the lookup function increments the process ref count. |
959 | */ |
960 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
961 | |
962 | if (!p) |
963 | return; /* Presumably process exited. */ |
964 | |
965 | mutex_lock(&p->event_mutex); |
966 | |
967 | /* Lookup events by type and signal them */ |
968 | lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL); |
969 | |
970 | mutex_unlock(&p->event_mutex); |
971 | kfd_unref_process(p); |
972 | } |
973 | |
974 | void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid, |
975 | struct kfd_vm_fault_info *info) |
976 | { |
977 | struct kfd_event *ev; |
978 | uint32_t id; |
979 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
980 | struct kfd_hsa_memory_exception_data memory_exception_data; |
981 | |
982 | if (!p) |
983 | return; /* Presumably process exited. */ |
984 | memset(&memory_exception_data, 0, sizeof(memory_exception_data)); |
985 | memory_exception_data.gpu_id = dev->id; |
986 | memory_exception_data.failure.imprecise = 1; |
987 | /* Set failure reason */ |
988 | if (info) { |
989 | memory_exception_data.va = (info->page_addr) << PAGE_SHIFT; |
990 | memory_exception_data.failure.NotPresent = |
991 | info->prot_valid ? 1 : 0; |
992 | memory_exception_data.failure.NoExecute = |
993 | info->prot_exec ? 1 : 0; |
994 | memory_exception_data.failure.ReadOnly = |
995 | info->prot_write ? 1 : 0; |
996 | memory_exception_data.failure.imprecise = 0; |
997 | } |
998 | mutex_lock(&p->event_mutex); |
999 | |
1000 | id = KFD_FIRST_NONSIGNAL_EVENT_ID; |
1001 | idr_for_each_entry_continue(&p->event_idr, ev, id) |
1002 | if (ev->type == KFD_EVENT_TYPE_MEMORY) { |
1003 | ev->memory_exception_data = memory_exception_data; |
1004 | set_event(ev); |
1005 | } |
1006 | |
1007 | mutex_unlock(&p->event_mutex); |
1008 | kfd_unref_process(p); |
1009 | } |
1010 | |
1011 | void kfd_signal_reset_event(struct kfd_dev *dev) |
1012 | { |
1013 | struct kfd_hsa_hw_exception_data hw_exception_data; |
1014 | struct kfd_process *p; |
1015 | struct kfd_event *ev; |
1016 | unsigned int temp; |
1017 | uint32_t id, idx; |
1018 | |
1019 | /* Whole gpu reset caused by GPU hang and memory is lost */ |
1020 | memset(&hw_exception_data, 0, sizeof(hw_exception_data)); |
1021 | hw_exception_data.gpu_id = dev->id; |
1022 | hw_exception_data.memory_lost = 1; |
1023 | |
1024 | idx = srcu_read_lock(&kfd_processes_srcu); |
1025 | hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { |
1026 | mutex_lock(&p->event_mutex); |
1027 | id = KFD_FIRST_NONSIGNAL_EVENT_ID; |
1028 | idr_for_each_entry_continue(&p->event_idr, ev, id) |
1029 | if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) { |
1030 | ev->hw_exception_data = hw_exception_data; |
1031 | set_event(ev); |
1032 | } |
1033 | mutex_unlock(&p->event_mutex); |
1034 | } |
1035 | srcu_read_unlock(&kfd_processes_srcu, idx); |
1036 | } |
1037 | |