1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * High memory handling common code and variables. |
4 | * |
5 | * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de |
6 | * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de |
7 | * |
8 | * |
9 | * Redesigned the x86 32-bit VM architecture to deal with |
10 | * 64-bit physical space. With current x86 CPUs this |
11 | * means up to 64 Gigabytes physical RAM. |
12 | * |
13 | * Rewrote high memory support to move the page cache into |
14 | * high memory. Implemented permanent (schedulable) kmaps |
15 | * based on Linus' idea. |
16 | * |
17 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
18 | */ |
19 | |
20 | #include <linux/mm.h> |
21 | #include <linux/export.h> |
22 | #include <linux/swap.h> |
23 | #include <linux/bio.h> |
24 | #include <linux/pagemap.h> |
25 | #include <linux/mempool.h> |
26 | #include <linux/init.h> |
27 | #include <linux/hash.h> |
28 | #include <linux/highmem.h> |
29 | #include <linux/kgdb.h> |
30 | #include <asm/tlbflush.h> |
31 | #include <linux/vmalloc.h> |
32 | |
33 | #ifdef CONFIG_KMAP_LOCAL |
34 | static inline int kmap_local_calc_idx(int idx) |
35 | { |
36 | return idx + KM_MAX_IDX * smp_processor_id(); |
37 | } |
38 | |
39 | #ifndef arch_kmap_local_map_idx |
40 | #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) |
41 | #endif |
42 | #endif /* CONFIG_KMAP_LOCAL */ |
43 | |
44 | /* |
45 | * Virtual_count is not a pure "count". |
46 | * 0 means that it is not mapped, and has not been mapped |
47 | * since a TLB flush - it is usable. |
48 | * 1 means that there are no users, but it has been mapped |
49 | * since the last TLB flush - so we can't use it. |
50 | * n means that there are (n-1) current users of it. |
51 | */ |
52 | #ifdef CONFIG_HIGHMEM |
53 | |
54 | /* |
55 | * Architecture with aliasing data cache may define the following family of |
56 | * helper functions in its asm/highmem.h to control cache color of virtual |
57 | * addresses where physical memory pages are mapped by kmap. |
58 | */ |
59 | #ifndef get_pkmap_color |
60 | |
61 | /* |
62 | * Determine color of virtual address where the page should be mapped. |
63 | */ |
64 | static inline unsigned int get_pkmap_color(struct page *page) |
65 | { |
66 | return 0; |
67 | } |
68 | #define get_pkmap_color get_pkmap_color |
69 | |
70 | /* |
71 | * Get next index for mapping inside PKMAP region for page with given color. |
72 | */ |
73 | static inline unsigned int get_next_pkmap_nr(unsigned int color) |
74 | { |
75 | static unsigned int last_pkmap_nr; |
76 | |
77 | last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; |
78 | return last_pkmap_nr; |
79 | } |
80 | |
81 | /* |
82 | * Determine if page index inside PKMAP region (pkmap_nr) of given color |
83 | * has wrapped around PKMAP region end. When this happens an attempt to |
84 | * flush all unused PKMAP slots is made. |
85 | */ |
86 | static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) |
87 | { |
88 | return pkmap_nr == 0; |
89 | } |
90 | |
91 | /* |
92 | * Get the number of PKMAP entries of the given color. If no free slot is |
93 | * found after checking that many entries, kmap will sleep waiting for |
94 | * someone to call kunmap and free PKMAP slot. |
95 | */ |
96 | static inline int get_pkmap_entries_count(unsigned int color) |
97 | { |
98 | return LAST_PKMAP; |
99 | } |
100 | |
101 | /* |
102 | * Get head of a wait queue for PKMAP entries of the given color. |
103 | * Wait queues for different mapping colors should be independent to avoid |
104 | * unnecessary wakeups caused by freeing of slots of other colors. |
105 | */ |
106 | static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) |
107 | { |
108 | static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); |
109 | |
110 | return &pkmap_map_wait; |
111 | } |
112 | #endif |
113 | |
114 | atomic_long_t _totalhigh_pages __read_mostly; |
115 | EXPORT_SYMBOL(_totalhigh_pages); |
116 | |
117 | unsigned int __nr_free_highpages(void) |
118 | { |
119 | struct zone *zone; |
120 | unsigned int pages = 0; |
121 | |
122 | for_each_populated_zone(zone) { |
123 | if (is_highmem(zone)) |
124 | pages += zone_page_state(zone, NR_FREE_PAGES); |
125 | } |
126 | |
127 | return pages; |
128 | } |
129 | |
130 | static int pkmap_count[LAST_PKMAP]; |
131 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
132 | |
133 | pte_t *pkmap_page_table; |
134 | |
135 | /* |
136 | * Most architectures have no use for kmap_high_get(), so let's abstract |
137 | * the disabling of IRQ out of the locking in that case to save on a |
138 | * potential useless overhead. |
139 | */ |
140 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
141 | #define lock_kmap() spin_lock_irq(&kmap_lock) |
142 | #define unlock_kmap() spin_unlock_irq(&kmap_lock) |
143 | #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) |
144 | #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) |
145 | #else |
146 | #define lock_kmap() spin_lock(&kmap_lock) |
147 | #define unlock_kmap() spin_unlock(&kmap_lock) |
148 | #define lock_kmap_any(flags) \ |
149 | do { spin_lock(&kmap_lock); (void)(flags); } while (0) |
150 | #define unlock_kmap_any(flags) \ |
151 | do { spin_unlock(&kmap_lock); (void)(flags); } while (0) |
152 | #endif |
153 | |
154 | struct page *__kmap_to_page(void *vaddr) |
155 | { |
156 | unsigned long base = (unsigned long) vaddr & PAGE_MASK; |
157 | struct kmap_ctrl *kctrl = ¤t->kmap_ctrl; |
158 | unsigned long addr = (unsigned long)vaddr; |
159 | int i; |
160 | |
161 | /* kmap() mappings */ |
162 | if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && |
163 | addr < PKMAP_ADDR(LAST_PKMAP))) |
164 | return pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(addr)])); |
165 | |
166 | /* kmap_local_page() mappings */ |
167 | if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && |
168 | base < __fix_to_virt(FIX_KMAP_BEGIN))) { |
169 | for (i = 0; i < kctrl->idx; i++) { |
170 | unsigned long base_addr; |
171 | int idx; |
172 | |
173 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); |
174 | base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
175 | |
176 | if (base_addr == base) |
177 | return pte_page(kctrl->pteval[i]); |
178 | } |
179 | } |
180 | |
181 | return virt_to_page(vaddr); |
182 | } |
183 | EXPORT_SYMBOL(__kmap_to_page); |
184 | |
185 | static void flush_all_zero_pkmaps(void) |
186 | { |
187 | int i; |
188 | int need_flush = 0; |
189 | |
190 | flush_cache_kmaps(); |
191 | |
192 | for (i = 0; i < LAST_PKMAP; i++) { |
193 | struct page *page; |
194 | pte_t ptent; |
195 | |
196 | /* |
197 | * zero means we don't have anything to do, |
198 | * >1 means that it is still in use. Only |
199 | * a count of 1 means that it is free but |
200 | * needs to be unmapped |
201 | */ |
202 | if (pkmap_count[i] != 1) |
203 | continue; |
204 | pkmap_count[i] = 0; |
205 | |
206 | /* sanity check */ |
207 | ptent = ptep_get(&pkmap_page_table[i]); |
208 | BUG_ON(pte_none(ptent)); |
209 | |
210 | /* |
211 | * Don't need an atomic fetch-and-clear op here; |
212 | * no-one has the page mapped, and cannot get at |
213 | * its virtual address (and hence PTE) without first |
214 | * getting the kmap_lock (which is held here). |
215 | * So no dangers, even with speculative execution. |
216 | */ |
217 | page = pte_page(ptent); |
218 | pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); |
219 | |
220 | set_page_address(page, NULL); |
221 | need_flush = 1; |
222 | } |
223 | if (need_flush) |
224 | flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); |
225 | } |
226 | |
227 | void __kmap_flush_unused(void) |
228 | { |
229 | lock_kmap(); |
230 | flush_all_zero_pkmaps(); |
231 | unlock_kmap(); |
232 | } |
233 | |
234 | static inline unsigned long map_new_virtual(struct page *page) |
235 | { |
236 | unsigned long vaddr; |
237 | int count; |
238 | unsigned int last_pkmap_nr; |
239 | unsigned int color = get_pkmap_color(page); |
240 | |
241 | start: |
242 | count = get_pkmap_entries_count(color); |
243 | /* Find an empty entry */ |
244 | for (;;) { |
245 | last_pkmap_nr = get_next_pkmap_nr(color); |
246 | if (no_more_pkmaps(last_pkmap_nr, color)) { |
247 | flush_all_zero_pkmaps(); |
248 | count = get_pkmap_entries_count(color); |
249 | } |
250 | if (!pkmap_count[last_pkmap_nr]) |
251 | break; /* Found a usable entry */ |
252 | if (--count) |
253 | continue; |
254 | |
255 | /* |
256 | * Sleep for somebody else to unmap their entries |
257 | */ |
258 | { |
259 | DECLARE_WAITQUEUE(wait, current); |
260 | wait_queue_head_t *pkmap_map_wait = |
261 | get_pkmap_wait_queue_head(color); |
262 | |
263 | __set_current_state(TASK_UNINTERRUPTIBLE); |
264 | add_wait_queue(pkmap_map_wait, &wait); |
265 | unlock_kmap(); |
266 | schedule(); |
267 | remove_wait_queue(pkmap_map_wait, &wait); |
268 | lock_kmap(); |
269 | |
270 | /* Somebody else might have mapped it while we slept */ |
271 | if (page_address(page)) |
272 | return (unsigned long)page_address(page); |
273 | |
274 | /* Re-start */ |
275 | goto start; |
276 | } |
277 | } |
278 | vaddr = PKMAP_ADDR(last_pkmap_nr); |
279 | set_pte_at(&init_mm, vaddr, |
280 | &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); |
281 | |
282 | pkmap_count[last_pkmap_nr] = 1; |
283 | set_page_address(page, (void *)vaddr); |
284 | |
285 | return vaddr; |
286 | } |
287 | |
288 | /** |
289 | * kmap_high - map a highmem page into memory |
290 | * @page: &struct page to map |
291 | * |
292 | * Returns the page's virtual memory address. |
293 | * |
294 | * We cannot call this from interrupts, as it may block. |
295 | */ |
296 | void *kmap_high(struct page *page) |
297 | { |
298 | unsigned long vaddr; |
299 | |
300 | /* |
301 | * For highmem pages, we can't trust "virtual" until |
302 | * after we have the lock. |
303 | */ |
304 | lock_kmap(); |
305 | vaddr = (unsigned long)page_address(page); |
306 | if (!vaddr) |
307 | vaddr = map_new_virtual(page); |
308 | pkmap_count[PKMAP_NR(vaddr)]++; |
309 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); |
310 | unlock_kmap(); |
311 | return (void *) vaddr; |
312 | } |
313 | EXPORT_SYMBOL(kmap_high); |
314 | |
315 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
316 | /** |
317 | * kmap_high_get - pin a highmem page into memory |
318 | * @page: &struct page to pin |
319 | * |
320 | * Returns the page's current virtual memory address, or NULL if no mapping |
321 | * exists. If and only if a non null address is returned then a |
322 | * matching call to kunmap_high() is necessary. |
323 | * |
324 | * This can be called from any context. |
325 | */ |
326 | void *kmap_high_get(struct page *page) |
327 | { |
328 | unsigned long vaddr, flags; |
329 | |
330 | lock_kmap_any(flags); |
331 | vaddr = (unsigned long)page_address(page); |
332 | if (vaddr) { |
333 | BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); |
334 | pkmap_count[PKMAP_NR(vaddr)]++; |
335 | } |
336 | unlock_kmap_any(flags); |
337 | return (void *) vaddr; |
338 | } |
339 | #endif |
340 | |
341 | /** |
342 | * kunmap_high - unmap a highmem page into memory |
343 | * @page: &struct page to unmap |
344 | * |
345 | * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called |
346 | * only from user context. |
347 | */ |
348 | void kunmap_high(struct page *page) |
349 | { |
350 | unsigned long vaddr; |
351 | unsigned long nr; |
352 | unsigned long flags; |
353 | int need_wakeup; |
354 | unsigned int color = get_pkmap_color(page); |
355 | wait_queue_head_t *pkmap_map_wait; |
356 | |
357 | lock_kmap_any(flags); |
358 | vaddr = (unsigned long)page_address(page); |
359 | BUG_ON(!vaddr); |
360 | nr = PKMAP_NR(vaddr); |
361 | |
362 | /* |
363 | * A count must never go down to zero |
364 | * without a TLB flush! |
365 | */ |
366 | need_wakeup = 0; |
367 | switch (--pkmap_count[nr]) { |
368 | case 0: |
369 | BUG(); |
370 | case 1: |
371 | /* |
372 | * Avoid an unnecessary wake_up() function call. |
373 | * The common case is pkmap_count[] == 1, but |
374 | * no waiters. |
375 | * The tasks queued in the wait-queue are guarded |
376 | * by both the lock in the wait-queue-head and by |
377 | * the kmap_lock. As the kmap_lock is held here, |
378 | * no need for the wait-queue-head's lock. Simply |
379 | * test if the queue is empty. |
380 | */ |
381 | pkmap_map_wait = get_pkmap_wait_queue_head(color); |
382 | need_wakeup = waitqueue_active(pkmap_map_wait); |
383 | } |
384 | unlock_kmap_any(flags); |
385 | |
386 | /* do wake-up, if needed, race-free outside of the spin lock */ |
387 | if (need_wakeup) |
388 | wake_up(pkmap_map_wait); |
389 | } |
390 | EXPORT_SYMBOL(kunmap_high); |
391 | |
392 | void zero_user_segments(struct page *page, unsigned start1, unsigned end1, |
393 | unsigned start2, unsigned end2) |
394 | { |
395 | unsigned int i; |
396 | |
397 | BUG_ON(end1 > page_size(page) || end2 > page_size(page)); |
398 | |
399 | if (start1 >= end1) |
400 | start1 = end1 = 0; |
401 | if (start2 >= end2) |
402 | start2 = end2 = 0; |
403 | |
404 | for (i = 0; i < compound_nr(page); i++) { |
405 | void *kaddr = NULL; |
406 | |
407 | if (start1 >= PAGE_SIZE) { |
408 | start1 -= PAGE_SIZE; |
409 | end1 -= PAGE_SIZE; |
410 | } else { |
411 | unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); |
412 | |
413 | if (end1 > start1) { |
414 | kaddr = kmap_local_page(page + i); |
415 | memset(kaddr + start1, 0, this_end - start1); |
416 | } |
417 | end1 -= this_end; |
418 | start1 = 0; |
419 | } |
420 | |
421 | if (start2 >= PAGE_SIZE) { |
422 | start2 -= PAGE_SIZE; |
423 | end2 -= PAGE_SIZE; |
424 | } else { |
425 | unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); |
426 | |
427 | if (end2 > start2) { |
428 | if (!kaddr) |
429 | kaddr = kmap_local_page(page + i); |
430 | memset(kaddr + start2, 0, this_end - start2); |
431 | } |
432 | end2 -= this_end; |
433 | start2 = 0; |
434 | } |
435 | |
436 | if (kaddr) { |
437 | kunmap_local(kaddr); |
438 | flush_dcache_page(page + i); |
439 | } |
440 | |
441 | if (!end1 && !end2) |
442 | break; |
443 | } |
444 | |
445 | BUG_ON((start1 | start2 | end1 | end2) != 0); |
446 | } |
447 | EXPORT_SYMBOL(zero_user_segments); |
448 | #endif /* CONFIG_HIGHMEM */ |
449 | |
450 | #ifdef CONFIG_KMAP_LOCAL |
451 | |
452 | #include <asm/kmap_size.h> |
453 | |
454 | /* |
455 | * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second |
456 | * slot is unused which acts as a guard page |
457 | */ |
458 | #ifdef CONFIG_DEBUG_KMAP_LOCAL |
459 | # define KM_INCR 2 |
460 | #else |
461 | # define KM_INCR 1 |
462 | #endif |
463 | |
464 | static inline int kmap_local_idx_push(void) |
465 | { |
466 | WARN_ON_ONCE(in_hardirq() && !irqs_disabled()); |
467 | current->kmap_ctrl.idx += KM_INCR; |
468 | BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); |
469 | return current->kmap_ctrl.idx - 1; |
470 | } |
471 | |
472 | static inline int kmap_local_idx(void) |
473 | { |
474 | return current->kmap_ctrl.idx - 1; |
475 | } |
476 | |
477 | static inline void kmap_local_idx_pop(void) |
478 | { |
479 | current->kmap_ctrl.idx -= KM_INCR; |
480 | BUG_ON(current->kmap_ctrl.idx < 0); |
481 | } |
482 | |
483 | #ifndef arch_kmap_local_post_map |
484 | # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) |
485 | #endif |
486 | |
487 | #ifndef arch_kmap_local_pre_unmap |
488 | # define arch_kmap_local_pre_unmap(vaddr) do { } while (0) |
489 | #endif |
490 | |
491 | #ifndef arch_kmap_local_post_unmap |
492 | # define arch_kmap_local_post_unmap(vaddr) do { } while (0) |
493 | #endif |
494 | |
495 | #ifndef arch_kmap_local_unmap_idx |
496 | #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) |
497 | #endif |
498 | |
499 | #ifndef arch_kmap_local_high_get |
500 | static inline void *arch_kmap_local_high_get(struct page *page) |
501 | { |
502 | return NULL; |
503 | } |
504 | #endif |
505 | |
506 | #ifndef arch_kmap_local_set_pte |
507 | #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ |
508 | set_pte_at(mm, vaddr, ptep, ptev) |
509 | #endif |
510 | |
511 | /* Unmap a local mapping which was obtained by kmap_high_get() */ |
512 | static inline bool kmap_high_unmap_local(unsigned long vaddr) |
513 | { |
514 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
515 | if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
516 | kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)]))); |
517 | return true; |
518 | } |
519 | #endif |
520 | return false; |
521 | } |
522 | |
523 | static pte_t *__kmap_pte; |
524 | |
525 | static pte_t *kmap_get_pte(unsigned long vaddr, int idx) |
526 | { |
527 | if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) |
528 | /* |
529 | * Set by the arch if __kmap_pte[-idx] does not produce |
530 | * the correct entry. |
531 | */ |
532 | return virt_to_kpte(vaddr); |
533 | if (!__kmap_pte) |
534 | __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); |
535 | return &__kmap_pte[-idx]; |
536 | } |
537 | |
538 | void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) |
539 | { |
540 | pte_t pteval, *kmap_pte; |
541 | unsigned long vaddr; |
542 | int idx; |
543 | |
544 | /* |
545 | * Disable migration so resulting virtual address is stable |
546 | * across preemption. |
547 | */ |
548 | migrate_disable(); |
549 | preempt_disable(); |
550 | idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); |
551 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
552 | kmap_pte = kmap_get_pte(vaddr, idx); |
553 | BUG_ON(!pte_none(ptep_get(kmap_pte))); |
554 | pteval = pfn_pte(pfn, prot); |
555 | arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); |
556 | arch_kmap_local_post_map(vaddr, pteval); |
557 | current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; |
558 | preempt_enable(); |
559 | |
560 | return (void *)vaddr; |
561 | } |
562 | EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); |
563 | |
564 | void *__kmap_local_page_prot(struct page *page, pgprot_t prot) |
565 | { |
566 | void *kmap; |
567 | |
568 | /* |
569 | * To broaden the usage of the actual kmap_local() machinery always map |
570 | * pages when debugging is enabled and the architecture has no problems |
571 | * with alias mappings. |
572 | */ |
573 | if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page)) |
574 | return page_address(page); |
575 | |
576 | /* Try kmap_high_get() if architecture has it enabled */ |
577 | kmap = arch_kmap_local_high_get(page); |
578 | if (kmap) |
579 | return kmap; |
580 | |
581 | return __kmap_local_pfn_prot(page_to_pfn(page), prot); |
582 | } |
583 | EXPORT_SYMBOL(__kmap_local_page_prot); |
584 | |
585 | void kunmap_local_indexed(const void *vaddr) |
586 | { |
587 | unsigned long addr = (unsigned long) vaddr & PAGE_MASK; |
588 | pte_t *kmap_pte; |
589 | int idx; |
590 | |
591 | if (addr < __fix_to_virt(FIX_KMAP_END) || |
592 | addr > __fix_to_virt(FIX_KMAP_BEGIN)) { |
593 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) { |
594 | /* This _should_ never happen! See above. */ |
595 | WARN_ON_ONCE(1); |
596 | return; |
597 | } |
598 | /* |
599 | * Handle mappings which were obtained by kmap_high_get() |
600 | * first as the virtual address of such mappings is below |
601 | * PAGE_OFFSET. Warn for all other addresses which are in |
602 | * the user space part of the virtual address space. |
603 | */ |
604 | if (!kmap_high_unmap_local(addr)) |
605 | WARN_ON_ONCE(addr < PAGE_OFFSET); |
606 | return; |
607 | } |
608 | |
609 | preempt_disable(); |
610 | idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); |
611 | WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
612 | |
613 | kmap_pte = kmap_get_pte(addr, idx); |
614 | arch_kmap_local_pre_unmap(addr); |
615 | pte_clear(&init_mm, addr, kmap_pte); |
616 | arch_kmap_local_post_unmap(addr); |
617 | current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); |
618 | kmap_local_idx_pop(); |
619 | preempt_enable(); |
620 | migrate_enable(); |
621 | } |
622 | EXPORT_SYMBOL(kunmap_local_indexed); |
623 | |
624 | /* |
625 | * Invoked before switch_to(). This is safe even when during or after |
626 | * clearing the maps an interrupt which needs a kmap_local happens because |
627 | * the task::kmap_ctrl.idx is not modified by the unmapping code so a |
628 | * nested kmap_local will use the next unused index and restore the index |
629 | * on unmap. The already cleared kmaps of the outgoing task are irrelevant |
630 | * because the interrupt context does not know about them. The same applies |
631 | * when scheduling back in for an interrupt which happens before the |
632 | * restore is complete. |
633 | */ |
634 | void __kmap_local_sched_out(void) |
635 | { |
636 | struct task_struct *tsk = current; |
637 | pte_t *kmap_pte; |
638 | int i; |
639 | |
640 | /* Clear kmaps */ |
641 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { |
642 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; |
643 | unsigned long addr; |
644 | int idx; |
645 | |
646 | /* With debug all even slots are unmapped and act as guard */ |
647 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { |
648 | WARN_ON_ONCE(pte_val(pteval) != 0); |
649 | continue; |
650 | } |
651 | if (WARN_ON_ONCE(pte_none(pteval))) |
652 | continue; |
653 | |
654 | /* |
655 | * This is a horrible hack for XTENSA to calculate the |
656 | * coloured PTE index. Uses the PFN encoded into the pteval |
657 | * and the map index calculation because the actual mapped |
658 | * virtual address is not stored in task::kmap_ctrl. |
659 | * For any sane architecture this is optimized out. |
660 | */ |
661 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); |
662 | |
663 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
664 | kmap_pte = kmap_get_pte(addr, idx); |
665 | arch_kmap_local_pre_unmap(addr); |
666 | pte_clear(&init_mm, addr, kmap_pte); |
667 | arch_kmap_local_post_unmap(addr); |
668 | } |
669 | } |
670 | |
671 | void __kmap_local_sched_in(void) |
672 | { |
673 | struct task_struct *tsk = current; |
674 | pte_t *kmap_pte; |
675 | int i; |
676 | |
677 | /* Restore kmaps */ |
678 | for (i = 0; i < tsk->kmap_ctrl.idx; i++) { |
679 | pte_t pteval = tsk->kmap_ctrl.pteval[i]; |
680 | unsigned long addr; |
681 | int idx; |
682 | |
683 | /* With debug all even slots are unmapped and act as guard */ |
684 | if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { |
685 | WARN_ON_ONCE(pte_val(pteval) != 0); |
686 | continue; |
687 | } |
688 | if (WARN_ON_ONCE(pte_none(pteval))) |
689 | continue; |
690 | |
691 | /* See comment in __kmap_local_sched_out() */ |
692 | idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); |
693 | addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
694 | kmap_pte = kmap_get_pte(addr, idx); |
695 | set_pte_at(&init_mm, addr, kmap_pte, pteval); |
696 | arch_kmap_local_post_map(addr, pteval); |
697 | } |
698 | } |
699 | |
700 | void kmap_local_fork(struct task_struct *tsk) |
701 | { |
702 | if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) |
703 | memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); |
704 | } |
705 | |
706 | #endif |
707 | |
708 | #if defined(HASHED_PAGE_VIRTUAL) |
709 | |
710 | #define PA_HASH_ORDER 7 |
711 | |
712 | /* |
713 | * Describes one page->virtual association |
714 | */ |
715 | struct page_address_map { |
716 | struct page *page; |
717 | void *virtual; |
718 | struct list_head list; |
719 | }; |
720 | |
721 | static struct page_address_map page_address_maps[LAST_PKMAP]; |
722 | |
723 | /* |
724 | * Hash table bucket |
725 | */ |
726 | static struct page_address_slot { |
727 | struct list_head lh; /* List of page_address_maps */ |
728 | spinlock_t lock; /* Protect this bucket's list */ |
729 | } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; |
730 | |
731 | static struct page_address_slot *page_slot(const struct page *page) |
732 | { |
733 | return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; |
734 | } |
735 | |
736 | /** |
737 | * page_address - get the mapped virtual address of a page |
738 | * @page: &struct page to get the virtual address of |
739 | * |
740 | * Returns the page's virtual address. |
741 | */ |
742 | void *page_address(const struct page *page) |
743 | { |
744 | unsigned long flags; |
745 | void *ret; |
746 | struct page_address_slot *pas; |
747 | |
748 | if (!PageHighMem(page)) |
749 | return lowmem_page_address(page); |
750 | |
751 | pas = page_slot(page); |
752 | ret = NULL; |
753 | spin_lock_irqsave(&pas->lock, flags); |
754 | if (!list_empty(&pas->lh)) { |
755 | struct page_address_map *pam; |
756 | |
757 | list_for_each_entry(pam, &pas->lh, list) { |
758 | if (pam->page == page) { |
759 | ret = pam->virtual; |
760 | break; |
761 | } |
762 | } |
763 | } |
764 | |
765 | spin_unlock_irqrestore(&pas->lock, flags); |
766 | return ret; |
767 | } |
768 | EXPORT_SYMBOL(page_address); |
769 | |
770 | /** |
771 | * set_page_address - set a page's virtual address |
772 | * @page: &struct page to set |
773 | * @virtual: virtual address to use |
774 | */ |
775 | void set_page_address(struct page *page, void *virtual) |
776 | { |
777 | unsigned long flags; |
778 | struct page_address_slot *pas; |
779 | struct page_address_map *pam; |
780 | |
781 | BUG_ON(!PageHighMem(page)); |
782 | |
783 | pas = page_slot(page); |
784 | if (virtual) { /* Add */ |
785 | pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; |
786 | pam->page = page; |
787 | pam->virtual = virtual; |
788 | |
789 | spin_lock_irqsave(&pas->lock, flags); |
790 | list_add_tail(&pam->list, &pas->lh); |
791 | spin_unlock_irqrestore(&pas->lock, flags); |
792 | } else { /* Remove */ |
793 | spin_lock_irqsave(&pas->lock, flags); |
794 | list_for_each_entry(pam, &pas->lh, list) { |
795 | if (pam->page == page) { |
796 | list_del(&pam->list); |
797 | break; |
798 | } |
799 | } |
800 | spin_unlock_irqrestore(&pas->lock, flags); |
801 | } |
802 | |
803 | return; |
804 | } |
805 | |
806 | void __init page_address_init(void) |
807 | { |
808 | int i; |
809 | |
810 | for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { |
811 | INIT_LIST_HEAD(&page_address_htable[i].lh); |
812 | spin_lock_init(&page_address_htable[i].lock); |
813 | } |
814 | } |
815 | |
816 | #endif /* defined(HASHED_PAGE_VIRTUAL) */ |
817 | |