1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/mm/nommu.c |
4 | * |
5 | * Replacement code for mm functions to support CPU's that don't |
6 | * have any form of memory management unit (thus no virtual memory). |
7 | * |
8 | * See Documentation/admin-guide/mm/nommu-mmap.rst |
9 | * |
10 | * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> |
11 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
12 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> |
13 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
14 | * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> |
15 | */ |
16 | |
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | |
19 | #include <linux/export.h> |
20 | #include <linux/mm.h> |
21 | #include <linux/sched/mm.h> |
22 | #include <linux/mman.h> |
23 | #include <linux/swap.h> |
24 | #include <linux/file.h> |
25 | #include <linux/highmem.h> |
26 | #include <linux/pagemap.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/vmalloc.h> |
29 | #include <linux/backing-dev.h> |
30 | #include <linux/compiler.h> |
31 | #include <linux/mount.h> |
32 | #include <linux/personality.h> |
33 | #include <linux/security.h> |
34 | #include <linux/syscalls.h> |
35 | #include <linux/audit.h> |
36 | #include <linux/printk.h> |
37 | |
38 | #include <linux/uaccess.h> |
39 | #include <linux/uio.h> |
40 | #include <asm/tlb.h> |
41 | #include <asm/tlbflush.h> |
42 | #include <asm/mmu_context.h> |
43 | #include "internal.h" |
44 | |
45 | void *high_memory; |
46 | EXPORT_SYMBOL(high_memory); |
47 | struct page *mem_map; |
48 | unsigned long max_mapnr; |
49 | EXPORT_SYMBOL(max_mapnr); |
50 | unsigned long highest_memmap_pfn; |
51 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; |
52 | int heap_stack_gap = 0; |
53 | |
54 | atomic_long_t mmap_pages_allocated; |
55 | |
56 | EXPORT_SYMBOL(mem_map); |
57 | |
58 | /* list of mapped, potentially shareable regions */ |
59 | static struct kmem_cache *vm_region_jar; |
60 | struct rb_root nommu_region_tree = RB_ROOT; |
61 | DECLARE_RWSEM(nommu_region_sem); |
62 | |
63 | const struct vm_operations_struct generic_file_vm_ops = { |
64 | }; |
65 | |
66 | /* |
67 | * Return the total memory allocated for this pointer, not |
68 | * just what the caller asked for. |
69 | * |
70 | * Doesn't have to be accurate, i.e. may have races. |
71 | */ |
72 | unsigned int kobjsize(const void *objp) |
73 | { |
74 | struct page *page; |
75 | |
76 | /* |
77 | * If the object we have should not have ksize performed on it, |
78 | * return size of 0 |
79 | */ |
80 | if (!objp || !virt_addr_valid(objp)) |
81 | return 0; |
82 | |
83 | page = virt_to_head_page(x: objp); |
84 | |
85 | /* |
86 | * If the allocator sets PageSlab, we know the pointer came from |
87 | * kmalloc(). |
88 | */ |
89 | if (PageSlab(page)) |
90 | return ksize(objp); |
91 | |
92 | /* |
93 | * If it's not a compound page, see if we have a matching VMA |
94 | * region. This test is intentionally done in reverse order, |
95 | * so if there's no VMA, we still fall through and hand back |
96 | * PAGE_SIZE for 0-order pages. |
97 | */ |
98 | if (!PageCompound(page)) { |
99 | struct vm_area_struct *vma; |
100 | |
101 | vma = find_vma(current->mm, addr: (unsigned long)objp); |
102 | if (vma) |
103 | return vma->vm_end - vma->vm_start; |
104 | } |
105 | |
106 | /* |
107 | * The ksize() function is only guaranteed to work for pointers |
108 | * returned by kmalloc(). So handle arbitrary pointers here. |
109 | */ |
110 | return page_size(page); |
111 | } |
112 | |
113 | /** |
114 | * follow_pfn - look up PFN at a user virtual address |
115 | * @vma: memory mapping |
116 | * @address: user virtual address |
117 | * @pfn: location to store found PFN |
118 | * |
119 | * Only IO mappings and raw PFN mappings are allowed. |
120 | * |
121 | * Returns zero and the pfn at @pfn on success, -ve otherwise. |
122 | */ |
123 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
124 | unsigned long *pfn) |
125 | { |
126 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
127 | return -EINVAL; |
128 | |
129 | *pfn = address >> PAGE_SHIFT; |
130 | return 0; |
131 | } |
132 | EXPORT_SYMBOL(follow_pfn); |
133 | |
134 | LIST_HEAD(vmap_area_list); |
135 | |
136 | void vfree(const void *addr) |
137 | { |
138 | kfree(objp: addr); |
139 | } |
140 | EXPORT_SYMBOL(vfree); |
141 | |
142 | void *__vmalloc(unsigned long size, gfp_t gfp_mask) |
143 | { |
144 | /* |
145 | * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() |
146 | * returns only a logical address. |
147 | */ |
148 | return kmalloc(size, flags: (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); |
149 | } |
150 | EXPORT_SYMBOL(__vmalloc); |
151 | |
152 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
153 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
154 | pgprot_t prot, unsigned long vm_flags, int node, |
155 | const void *caller) |
156 | { |
157 | return __vmalloc(size, gfp_mask); |
158 | } |
159 | |
160 | void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, |
161 | int node, const void *caller) |
162 | { |
163 | return __vmalloc(size, gfp_mask); |
164 | } |
165 | |
166 | static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) |
167 | { |
168 | void *ret; |
169 | |
170 | ret = __vmalloc(size, flags); |
171 | if (ret) { |
172 | struct vm_area_struct *vma; |
173 | |
174 | mmap_write_lock(current->mm); |
175 | vma = find_vma(current->mm, addr: (unsigned long)ret); |
176 | if (vma) |
177 | vm_flags_set(vma, VM_USERMAP); |
178 | mmap_write_unlock(current->mm); |
179 | } |
180 | |
181 | return ret; |
182 | } |
183 | |
184 | void *vmalloc_user(unsigned long size) |
185 | { |
186 | return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); |
187 | } |
188 | EXPORT_SYMBOL(vmalloc_user); |
189 | |
190 | struct page *vmalloc_to_page(const void *addr) |
191 | { |
192 | return virt_to_page(addr); |
193 | } |
194 | EXPORT_SYMBOL(vmalloc_to_page); |
195 | |
196 | unsigned long vmalloc_to_pfn(const void *addr) |
197 | { |
198 | return page_to_pfn(virt_to_page(addr)); |
199 | } |
200 | EXPORT_SYMBOL(vmalloc_to_pfn); |
201 | |
202 | long vread_iter(struct iov_iter *iter, const char *addr, size_t count) |
203 | { |
204 | /* Don't allow overflow */ |
205 | if ((unsigned long) addr + count < count) |
206 | count = -(unsigned long) addr; |
207 | |
208 | return copy_to_iter(addr, bytes: count, i: iter); |
209 | } |
210 | |
211 | /* |
212 | * vmalloc - allocate virtually contiguous memory |
213 | * |
214 | * @size: allocation size |
215 | * |
216 | * Allocate enough pages to cover @size from the page level |
217 | * allocator and map them into contiguous kernel virtual space. |
218 | * |
219 | * For tight control over page level allocator and protection flags |
220 | * use __vmalloc() instead. |
221 | */ |
222 | void *vmalloc(unsigned long size) |
223 | { |
224 | return __vmalloc(size, GFP_KERNEL); |
225 | } |
226 | EXPORT_SYMBOL(vmalloc); |
227 | |
228 | void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc); |
229 | |
230 | /* |
231 | * vzalloc - allocate virtually contiguous memory with zero fill |
232 | * |
233 | * @size: allocation size |
234 | * |
235 | * Allocate enough pages to cover @size from the page level |
236 | * allocator and map them into contiguous kernel virtual space. |
237 | * The memory allocated is set to zero. |
238 | * |
239 | * For tight control over page level allocator and protection flags |
240 | * use __vmalloc() instead. |
241 | */ |
242 | void *vzalloc(unsigned long size) |
243 | { |
244 | return __vmalloc(size, GFP_KERNEL | __GFP_ZERO); |
245 | } |
246 | EXPORT_SYMBOL(vzalloc); |
247 | |
248 | /** |
249 | * vmalloc_node - allocate memory on a specific node |
250 | * @size: allocation size |
251 | * @node: numa node |
252 | * |
253 | * Allocate enough pages to cover @size from the page level |
254 | * allocator and map them into contiguous kernel virtual space. |
255 | * |
256 | * For tight control over page level allocator and protection flags |
257 | * use __vmalloc() instead. |
258 | */ |
259 | void *vmalloc_node(unsigned long size, int node) |
260 | { |
261 | return vmalloc(size); |
262 | } |
263 | EXPORT_SYMBOL(vmalloc_node); |
264 | |
265 | /** |
266 | * vzalloc_node - allocate memory on a specific node with zero fill |
267 | * @size: allocation size |
268 | * @node: numa node |
269 | * |
270 | * Allocate enough pages to cover @size from the page level |
271 | * allocator and map them into contiguous kernel virtual space. |
272 | * The memory allocated is set to zero. |
273 | * |
274 | * For tight control over page level allocator and protection flags |
275 | * use __vmalloc() instead. |
276 | */ |
277 | void *vzalloc_node(unsigned long size, int node) |
278 | { |
279 | return vzalloc(size); |
280 | } |
281 | EXPORT_SYMBOL(vzalloc_node); |
282 | |
283 | /** |
284 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
285 | * @size: allocation size |
286 | * |
287 | * Allocate enough 32bit PA addressable pages to cover @size from the |
288 | * page level allocator and map them into contiguous kernel virtual space. |
289 | */ |
290 | void *vmalloc_32(unsigned long size) |
291 | { |
292 | return __vmalloc(size, GFP_KERNEL); |
293 | } |
294 | EXPORT_SYMBOL(vmalloc_32); |
295 | |
296 | /** |
297 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
298 | * @size: allocation size |
299 | * |
300 | * The resulting memory area is 32bit addressable and zeroed so it can be |
301 | * mapped to userspace without leaking data. |
302 | * |
303 | * VM_USERMAP is set on the corresponding VMA so that subsequent calls to |
304 | * remap_vmalloc_range() are permissible. |
305 | */ |
306 | void *vmalloc_32_user(unsigned long size) |
307 | { |
308 | /* |
309 | * We'll have to sort out the ZONE_DMA bits for 64-bit, |
310 | * but for now this can simply use vmalloc_user() directly. |
311 | */ |
312 | return vmalloc_user(size); |
313 | } |
314 | EXPORT_SYMBOL(vmalloc_32_user); |
315 | |
316 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) |
317 | { |
318 | BUG(); |
319 | return NULL; |
320 | } |
321 | EXPORT_SYMBOL(vmap); |
322 | |
323 | void vunmap(const void *addr) |
324 | { |
325 | BUG(); |
326 | } |
327 | EXPORT_SYMBOL(vunmap); |
328 | |
329 | void *vm_map_ram(struct page **pages, unsigned int count, int node) |
330 | { |
331 | BUG(); |
332 | return NULL; |
333 | } |
334 | EXPORT_SYMBOL(vm_map_ram); |
335 | |
336 | void vm_unmap_ram(const void *mem, unsigned int count) |
337 | { |
338 | BUG(); |
339 | } |
340 | EXPORT_SYMBOL(vm_unmap_ram); |
341 | |
342 | void vm_unmap_aliases(void) |
343 | { |
344 | } |
345 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
346 | |
347 | void free_vm_area(struct vm_struct *area) |
348 | { |
349 | BUG(); |
350 | } |
351 | EXPORT_SYMBOL_GPL(free_vm_area); |
352 | |
353 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, |
354 | struct page *page) |
355 | { |
356 | return -EINVAL; |
357 | } |
358 | EXPORT_SYMBOL(vm_insert_page); |
359 | |
360 | int vm_map_pages(struct vm_area_struct *vma, struct page **pages, |
361 | unsigned long num) |
362 | { |
363 | return -EINVAL; |
364 | } |
365 | EXPORT_SYMBOL(vm_map_pages); |
366 | |
367 | int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, |
368 | unsigned long num) |
369 | { |
370 | return -EINVAL; |
371 | } |
372 | EXPORT_SYMBOL(vm_map_pages_zero); |
373 | |
374 | /* |
375 | * sys_brk() for the most part doesn't need the global kernel |
376 | * lock, except when an application is doing something nasty |
377 | * like trying to un-brk an area that has already been mapped |
378 | * to a regular file. in this case, the unmapping will need |
379 | * to invoke file system routines that need the global lock. |
380 | */ |
381 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
382 | { |
383 | struct mm_struct *mm = current->mm; |
384 | |
385 | if (brk < mm->start_brk || brk > mm->context.end_brk) |
386 | return mm->brk; |
387 | |
388 | if (mm->brk == brk) |
389 | return mm->brk; |
390 | |
391 | /* |
392 | * Always allow shrinking brk |
393 | */ |
394 | if (brk <= mm->brk) { |
395 | mm->brk = brk; |
396 | return brk; |
397 | } |
398 | |
399 | /* |
400 | * Ok, looks good - let it rip. |
401 | */ |
402 | flush_icache_user_range(start: mm->brk, end: brk); |
403 | return mm->brk = brk; |
404 | } |
405 | |
406 | /* |
407 | * initialise the percpu counter for VM and region record slabs |
408 | */ |
409 | void __init mmap_init(void) |
410 | { |
411 | int ret; |
412 | |
413 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
414 | VM_BUG_ON(ret); |
415 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT); |
416 | } |
417 | |
418 | /* |
419 | * validate the region tree |
420 | * - the caller must hold the region lock |
421 | */ |
422 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS |
423 | static noinline void validate_nommu_regions(void) |
424 | { |
425 | struct vm_region *region, *last; |
426 | struct rb_node *p, *lastp; |
427 | |
428 | lastp = rb_first(&nommu_region_tree); |
429 | if (!lastp) |
430 | return; |
431 | |
432 | last = rb_entry(lastp, struct vm_region, vm_rb); |
433 | BUG_ON(last->vm_end <= last->vm_start); |
434 | BUG_ON(last->vm_top < last->vm_end); |
435 | |
436 | while ((p = rb_next(lastp))) { |
437 | region = rb_entry(p, struct vm_region, vm_rb); |
438 | last = rb_entry(lastp, struct vm_region, vm_rb); |
439 | |
440 | BUG_ON(region->vm_end <= region->vm_start); |
441 | BUG_ON(region->vm_top < region->vm_end); |
442 | BUG_ON(region->vm_start < last->vm_top); |
443 | |
444 | lastp = p; |
445 | } |
446 | } |
447 | #else |
448 | static void validate_nommu_regions(void) |
449 | { |
450 | } |
451 | #endif |
452 | |
453 | /* |
454 | * add a region into the global tree |
455 | */ |
456 | static void add_nommu_region(struct vm_region *region) |
457 | { |
458 | struct vm_region *pregion; |
459 | struct rb_node **p, *parent; |
460 | |
461 | validate_nommu_regions(); |
462 | |
463 | parent = NULL; |
464 | p = &nommu_region_tree.rb_node; |
465 | while (*p) { |
466 | parent = *p; |
467 | pregion = rb_entry(parent, struct vm_region, vm_rb); |
468 | if (region->vm_start < pregion->vm_start) |
469 | p = &(*p)->rb_left; |
470 | else if (region->vm_start > pregion->vm_start) |
471 | p = &(*p)->rb_right; |
472 | else if (pregion == region) |
473 | return; |
474 | else |
475 | BUG(); |
476 | } |
477 | |
478 | rb_link_node(node: ®ion->vm_rb, parent, rb_link: p); |
479 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); |
480 | |
481 | validate_nommu_regions(); |
482 | } |
483 | |
484 | /* |
485 | * delete a region from the global tree |
486 | */ |
487 | static void delete_nommu_region(struct vm_region *region) |
488 | { |
489 | BUG_ON(!nommu_region_tree.rb_node); |
490 | |
491 | validate_nommu_regions(); |
492 | rb_erase(®ion->vm_rb, &nommu_region_tree); |
493 | validate_nommu_regions(); |
494 | } |
495 | |
496 | /* |
497 | * free a contiguous series of pages |
498 | */ |
499 | static void free_page_series(unsigned long from, unsigned long to) |
500 | { |
501 | for (; from < to; from += PAGE_SIZE) { |
502 | struct page *page = virt_to_page((void *)from); |
503 | |
504 | atomic_long_dec(v: &mmap_pages_allocated); |
505 | put_page(page); |
506 | } |
507 | } |
508 | |
509 | /* |
510 | * release a reference to a region |
511 | * - the caller must hold the region semaphore for writing, which this releases |
512 | * - the region may not have been added to the tree yet, in which case vm_top |
513 | * will equal vm_start |
514 | */ |
515 | static void __put_nommu_region(struct vm_region *region) |
516 | __releases(nommu_region_sem) |
517 | { |
518 | BUG_ON(!nommu_region_tree.rb_node); |
519 | |
520 | if (--region->vm_usage == 0) { |
521 | if (region->vm_top > region->vm_start) |
522 | delete_nommu_region(region); |
523 | up_write(sem: &nommu_region_sem); |
524 | |
525 | if (region->vm_file) |
526 | fput(region->vm_file); |
527 | |
528 | /* IO memory and memory shared directly out of the pagecache |
529 | * from ramfs/tmpfs mustn't be released here */ |
530 | if (region->vm_flags & VM_MAPPED_COPY) |
531 | free_page_series(from: region->vm_start, to: region->vm_top); |
532 | kmem_cache_free(s: vm_region_jar, objp: region); |
533 | } else { |
534 | up_write(sem: &nommu_region_sem); |
535 | } |
536 | } |
537 | |
538 | /* |
539 | * release a reference to a region |
540 | */ |
541 | static void put_nommu_region(struct vm_region *region) |
542 | { |
543 | down_write(sem: &nommu_region_sem); |
544 | __put_nommu_region(region); |
545 | } |
546 | |
547 | static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) |
548 | { |
549 | vma->vm_mm = mm; |
550 | |
551 | /* add the VMA to the mapping */ |
552 | if (vma->vm_file) { |
553 | struct address_space *mapping = vma->vm_file->f_mapping; |
554 | |
555 | i_mmap_lock_write(mapping); |
556 | flush_dcache_mmap_lock(mapping); |
557 | vma_interval_tree_insert(node: vma, root: &mapping->i_mmap); |
558 | flush_dcache_mmap_unlock(mapping); |
559 | i_mmap_unlock_write(mapping); |
560 | } |
561 | } |
562 | |
563 | static void cleanup_vma_from_mm(struct vm_area_struct *vma) |
564 | { |
565 | vma->vm_mm->map_count--; |
566 | /* remove the VMA from the mapping */ |
567 | if (vma->vm_file) { |
568 | struct address_space *mapping; |
569 | mapping = vma->vm_file->f_mapping; |
570 | |
571 | i_mmap_lock_write(mapping); |
572 | flush_dcache_mmap_lock(mapping); |
573 | vma_interval_tree_remove(node: vma, root: &mapping->i_mmap); |
574 | flush_dcache_mmap_unlock(mapping); |
575 | i_mmap_unlock_write(mapping); |
576 | } |
577 | } |
578 | |
579 | /* |
580 | * delete a VMA from its owning mm_struct and address space |
581 | */ |
582 | static int delete_vma_from_mm(struct vm_area_struct *vma) |
583 | { |
584 | VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); |
585 | |
586 | vma_iter_config(vmi: &vmi, index: vma->vm_start, last: vma->vm_end); |
587 | if (vma_iter_prealloc(vmi: &vmi, vma)) { |
588 | pr_warn("Allocation of vma tree for process %d failed\n" , |
589 | current->pid); |
590 | return -ENOMEM; |
591 | } |
592 | cleanup_vma_from_mm(vma); |
593 | |
594 | /* remove from the MM's tree and list */ |
595 | vma_iter_clear(vmi: &vmi); |
596 | return 0; |
597 | } |
598 | /* |
599 | * destroy a VMA record |
600 | */ |
601 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) |
602 | { |
603 | if (vma->vm_ops && vma->vm_ops->close) |
604 | vma->vm_ops->close(vma); |
605 | if (vma->vm_file) |
606 | fput(vma->vm_file); |
607 | put_nommu_region(region: vma->vm_region); |
608 | vm_area_free(vma); |
609 | } |
610 | |
611 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, |
612 | unsigned long start_addr, |
613 | unsigned long end_addr) |
614 | { |
615 | unsigned long index = start_addr; |
616 | |
617 | mmap_assert_locked(mm); |
618 | return mt_find(mt: &mm->mm_mt, index: &index, max: end_addr - 1); |
619 | } |
620 | EXPORT_SYMBOL(find_vma_intersection); |
621 | |
622 | /* |
623 | * look up the first VMA in which addr resides, NULL if none |
624 | * - should be called with mm->mmap_lock at least held readlocked |
625 | */ |
626 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
627 | { |
628 | VMA_ITERATOR(vmi, mm, addr); |
629 | |
630 | return vma_iter_load(vmi: &vmi); |
631 | } |
632 | EXPORT_SYMBOL(find_vma); |
633 | |
634 | /* |
635 | * At least xtensa ends up having protection faults even with no |
636 | * MMU.. No stack expansion, at least. |
637 | */ |
638 | struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, |
639 | unsigned long addr, struct pt_regs *regs) |
640 | { |
641 | struct vm_area_struct *vma; |
642 | |
643 | mmap_read_lock(mm); |
644 | vma = vma_lookup(mm, addr); |
645 | if (!vma) |
646 | mmap_read_unlock(mm); |
647 | return vma; |
648 | } |
649 | |
650 | /* |
651 | * expand a stack to a given address |
652 | * - not supported under NOMMU conditions |
653 | */ |
654 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) |
655 | { |
656 | return -ENOMEM; |
657 | } |
658 | |
659 | struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) |
660 | { |
661 | mmap_read_unlock(mm); |
662 | return NULL; |
663 | } |
664 | |
665 | /* |
666 | * look up the first VMA exactly that exactly matches addr |
667 | * - should be called with mm->mmap_lock at least held readlocked |
668 | */ |
669 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, |
670 | unsigned long addr, |
671 | unsigned long len) |
672 | { |
673 | struct vm_area_struct *vma; |
674 | unsigned long end = addr + len; |
675 | VMA_ITERATOR(vmi, mm, addr); |
676 | |
677 | vma = vma_iter_load(vmi: &vmi); |
678 | if (!vma) |
679 | return NULL; |
680 | if (vma->vm_start != addr) |
681 | return NULL; |
682 | if (vma->vm_end != end) |
683 | return NULL; |
684 | |
685 | return vma; |
686 | } |
687 | |
688 | /* |
689 | * determine whether a mapping should be permitted and, if so, what sort of |
690 | * mapping we're capable of supporting |
691 | */ |
692 | static int validate_mmap_request(struct file *file, |
693 | unsigned long addr, |
694 | unsigned long len, |
695 | unsigned long prot, |
696 | unsigned long flags, |
697 | unsigned long pgoff, |
698 | unsigned long *_capabilities) |
699 | { |
700 | unsigned long capabilities, rlen; |
701 | int ret; |
702 | |
703 | /* do the simple checks first */ |
704 | if (flags & MAP_FIXED) |
705 | return -EINVAL; |
706 | |
707 | if ((flags & MAP_TYPE) != MAP_PRIVATE && |
708 | (flags & MAP_TYPE) != MAP_SHARED) |
709 | return -EINVAL; |
710 | |
711 | if (!len) |
712 | return -EINVAL; |
713 | |
714 | /* Careful about overflows.. */ |
715 | rlen = PAGE_ALIGN(len); |
716 | if (!rlen || rlen > TASK_SIZE) |
717 | return -ENOMEM; |
718 | |
719 | /* offset overflow? */ |
720 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) |
721 | return -EOVERFLOW; |
722 | |
723 | if (file) { |
724 | /* files must support mmap */ |
725 | if (!file->f_op->mmap) |
726 | return -ENODEV; |
727 | |
728 | /* work out if what we've got could possibly be shared |
729 | * - we support chardevs that provide their own "memory" |
730 | * - we support files/blockdevs that are memory backed |
731 | */ |
732 | if (file->f_op->mmap_capabilities) { |
733 | capabilities = file->f_op->mmap_capabilities(file); |
734 | } else { |
735 | /* no explicit capabilities set, so assume some |
736 | * defaults */ |
737 | switch (file_inode(f: file)->i_mode & S_IFMT) { |
738 | case S_IFREG: |
739 | case S_IFBLK: |
740 | capabilities = NOMMU_MAP_COPY; |
741 | break; |
742 | |
743 | case S_IFCHR: |
744 | capabilities = |
745 | NOMMU_MAP_DIRECT | |
746 | NOMMU_MAP_READ | |
747 | NOMMU_MAP_WRITE; |
748 | break; |
749 | |
750 | default: |
751 | return -EINVAL; |
752 | } |
753 | } |
754 | |
755 | /* eliminate any capabilities that we can't support on this |
756 | * device */ |
757 | if (!file->f_op->get_unmapped_area) |
758 | capabilities &= ~NOMMU_MAP_DIRECT; |
759 | if (!(file->f_mode & FMODE_CAN_READ)) |
760 | capabilities &= ~NOMMU_MAP_COPY; |
761 | |
762 | /* The file shall have been opened with read permission. */ |
763 | if (!(file->f_mode & FMODE_READ)) |
764 | return -EACCES; |
765 | |
766 | if (flags & MAP_SHARED) { |
767 | /* do checks for writing, appending and locking */ |
768 | if ((prot & PROT_WRITE) && |
769 | !(file->f_mode & FMODE_WRITE)) |
770 | return -EACCES; |
771 | |
772 | if (IS_APPEND(file_inode(file)) && |
773 | (file->f_mode & FMODE_WRITE)) |
774 | return -EACCES; |
775 | |
776 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
777 | return -ENODEV; |
778 | |
779 | /* we mustn't privatise shared mappings */ |
780 | capabilities &= ~NOMMU_MAP_COPY; |
781 | } else { |
782 | /* we're going to read the file into private memory we |
783 | * allocate */ |
784 | if (!(capabilities & NOMMU_MAP_COPY)) |
785 | return -ENODEV; |
786 | |
787 | /* we don't permit a private writable mapping to be |
788 | * shared with the backing device */ |
789 | if (prot & PROT_WRITE) |
790 | capabilities &= ~NOMMU_MAP_DIRECT; |
791 | } |
792 | |
793 | if (capabilities & NOMMU_MAP_DIRECT) { |
794 | if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || |
795 | ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || |
796 | ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) |
797 | ) { |
798 | capabilities &= ~NOMMU_MAP_DIRECT; |
799 | if (flags & MAP_SHARED) { |
800 | pr_warn("MAP_SHARED not completely supported on !MMU\n" ); |
801 | return -EINVAL; |
802 | } |
803 | } |
804 | } |
805 | |
806 | /* handle executable mappings and implied executable |
807 | * mappings */ |
808 | if (path_noexec(path: &file->f_path)) { |
809 | if (prot & PROT_EXEC) |
810 | return -EPERM; |
811 | } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { |
812 | /* handle implication of PROT_EXEC by PROT_READ */ |
813 | if (current->personality & READ_IMPLIES_EXEC) { |
814 | if (capabilities & NOMMU_MAP_EXEC) |
815 | prot |= PROT_EXEC; |
816 | } |
817 | } else if ((prot & PROT_READ) && |
818 | (prot & PROT_EXEC) && |
819 | !(capabilities & NOMMU_MAP_EXEC) |
820 | ) { |
821 | /* backing file is not executable, try to copy */ |
822 | capabilities &= ~NOMMU_MAP_DIRECT; |
823 | } |
824 | } else { |
825 | /* anonymous mappings are always memory backed and can be |
826 | * privately mapped |
827 | */ |
828 | capabilities = NOMMU_MAP_COPY; |
829 | |
830 | /* handle PROT_EXEC implication by PROT_READ */ |
831 | if ((prot & PROT_READ) && |
832 | (current->personality & READ_IMPLIES_EXEC)) |
833 | prot |= PROT_EXEC; |
834 | } |
835 | |
836 | /* allow the security API to have its say */ |
837 | ret = security_mmap_addr(addr); |
838 | if (ret < 0) |
839 | return ret; |
840 | |
841 | /* looks okay */ |
842 | *_capabilities = capabilities; |
843 | return 0; |
844 | } |
845 | |
846 | /* |
847 | * we've determined that we can make the mapping, now translate what we |
848 | * now know into VMA flags |
849 | */ |
850 | static unsigned long determine_vm_flags(struct file *file, |
851 | unsigned long prot, |
852 | unsigned long flags, |
853 | unsigned long capabilities) |
854 | { |
855 | unsigned long vm_flags; |
856 | |
857 | vm_flags = calc_vm_prot_bits(prot, pkey: 0) | calc_vm_flag_bits(flags); |
858 | |
859 | if (!file) { |
860 | /* |
861 | * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because |
862 | * there is no fork(). |
863 | */ |
864 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
865 | } else if (flags & MAP_PRIVATE) { |
866 | /* MAP_PRIVATE file mapping */ |
867 | if (capabilities & NOMMU_MAP_DIRECT) |
868 | vm_flags |= (capabilities & NOMMU_VMFLAGS); |
869 | else |
870 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
871 | |
872 | if (!(prot & PROT_WRITE) && !current->ptrace) |
873 | /* |
874 | * R/O private file mapping which cannot be used to |
875 | * modify memory, especially also not via active ptrace |
876 | * (e.g., set breakpoints) or later by upgrading |
877 | * permissions (no mprotect()). We can try overlaying |
878 | * the file mapping, which will work e.g., on chardevs, |
879 | * ramfs/tmpfs/shmfs and romfs/cramf. |
880 | */ |
881 | vm_flags |= VM_MAYOVERLAY; |
882 | } else { |
883 | /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */ |
884 | vm_flags |= VM_SHARED | VM_MAYSHARE | |
885 | (capabilities & NOMMU_VMFLAGS); |
886 | } |
887 | |
888 | return vm_flags; |
889 | } |
890 | |
891 | /* |
892 | * set up a shared mapping on a file (the driver or filesystem provides and |
893 | * pins the storage) |
894 | */ |
895 | static int do_mmap_shared_file(struct vm_area_struct *vma) |
896 | { |
897 | int ret; |
898 | |
899 | ret = call_mmap(file: vma->vm_file, vma); |
900 | if (ret == 0) { |
901 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
902 | return 0; |
903 | } |
904 | if (ret != -ENOSYS) |
905 | return ret; |
906 | |
907 | /* getting -ENOSYS indicates that direct mmap isn't possible (as |
908 | * opposed to tried but failed) so we can only give a suitable error as |
909 | * it's not possible to make a private copy if MAP_SHARED was given */ |
910 | return -ENODEV; |
911 | } |
912 | |
913 | /* |
914 | * set up a private mapping or an anonymous shared mapping |
915 | */ |
916 | static int do_mmap_private(struct vm_area_struct *vma, |
917 | struct vm_region *region, |
918 | unsigned long len, |
919 | unsigned long capabilities) |
920 | { |
921 | unsigned long total, point; |
922 | void *base; |
923 | int ret, order; |
924 | |
925 | /* |
926 | * Invoke the file's mapping function so that it can keep track of |
927 | * shared mappings on devices or memory. VM_MAYOVERLAY will be set if |
928 | * it may attempt to share, which will make is_nommu_shared_mapping() |
929 | * happy. |
930 | */ |
931 | if (capabilities & NOMMU_MAP_DIRECT) { |
932 | ret = call_mmap(file: vma->vm_file, vma); |
933 | /* shouldn't return success if we're not sharing */ |
934 | if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags))) |
935 | ret = -ENOSYS; |
936 | if (ret == 0) { |
937 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
938 | return 0; |
939 | } |
940 | if (ret != -ENOSYS) |
941 | return ret; |
942 | |
943 | /* getting an ENOSYS error indicates that direct mmap isn't |
944 | * possible (as opposed to tried but failed) so we'll try to |
945 | * make a private copy of the data and map that instead */ |
946 | } |
947 | |
948 | |
949 | /* allocate some memory to hold the mapping |
950 | * - note that this may not return a page-aligned address if the object |
951 | * we're allocating is smaller than a page |
952 | */ |
953 | order = get_order(size: len); |
954 | total = 1 << order; |
955 | point = len >> PAGE_SHIFT; |
956 | |
957 | /* we don't want to allocate a power-of-2 sized page set */ |
958 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) |
959 | total = point; |
960 | |
961 | base = alloc_pages_exact(size: total << PAGE_SHIFT, GFP_KERNEL); |
962 | if (!base) |
963 | goto enomem; |
964 | |
965 | atomic_long_add(i: total, v: &mmap_pages_allocated); |
966 | |
967 | vm_flags_set(vma, flags: VM_MAPPED_COPY); |
968 | region->vm_flags = vma->vm_flags; |
969 | region->vm_start = (unsigned long) base; |
970 | region->vm_end = region->vm_start + len; |
971 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); |
972 | |
973 | vma->vm_start = region->vm_start; |
974 | vma->vm_end = region->vm_start + len; |
975 | |
976 | if (vma->vm_file) { |
977 | /* read the contents of a file into the copy */ |
978 | loff_t fpos; |
979 | |
980 | fpos = vma->vm_pgoff; |
981 | fpos <<= PAGE_SHIFT; |
982 | |
983 | ret = kernel_read(vma->vm_file, base, len, &fpos); |
984 | if (ret < 0) |
985 | goto error_free; |
986 | |
987 | /* clear the last little bit */ |
988 | if (ret < len) |
989 | memset(base + ret, 0, len - ret); |
990 | |
991 | } else { |
992 | vma_set_anonymous(vma); |
993 | } |
994 | |
995 | return 0; |
996 | |
997 | error_free: |
998 | free_page_series(from: region->vm_start, to: region->vm_top); |
999 | region->vm_start = vma->vm_start = 0; |
1000 | region->vm_end = vma->vm_end = 0; |
1001 | region->vm_top = 0; |
1002 | return ret; |
1003 | |
1004 | enomem: |
1005 | pr_err("Allocation of length %lu from process %d (%s) failed\n" , |
1006 | len, current->pid, current->comm); |
1007 | show_mem(); |
1008 | return -ENOMEM; |
1009 | } |
1010 | |
1011 | /* |
1012 | * handle mapping creation for uClinux |
1013 | */ |
1014 | unsigned long do_mmap(struct file *file, |
1015 | unsigned long addr, |
1016 | unsigned long len, |
1017 | unsigned long prot, |
1018 | unsigned long flags, |
1019 | vm_flags_t vm_flags, |
1020 | unsigned long pgoff, |
1021 | unsigned long *populate, |
1022 | struct list_head *uf) |
1023 | { |
1024 | struct vm_area_struct *vma; |
1025 | struct vm_region *region; |
1026 | struct rb_node *rb; |
1027 | unsigned long capabilities, result; |
1028 | int ret; |
1029 | VMA_ITERATOR(vmi, current->mm, 0); |
1030 | |
1031 | *populate = 0; |
1032 | |
1033 | /* decide whether we should attempt the mapping, and if so what sort of |
1034 | * mapping */ |
1035 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
1036 | capabilities: &capabilities); |
1037 | if (ret < 0) |
1038 | return ret; |
1039 | |
1040 | /* we ignore the address hint */ |
1041 | addr = 0; |
1042 | len = PAGE_ALIGN(len); |
1043 | |
1044 | /* we've determined that we can make the mapping, now translate what we |
1045 | * now know into VMA flags */ |
1046 | vm_flags |= determine_vm_flags(file, prot, flags, capabilities); |
1047 | |
1048 | |
1049 | /* we're going to need to record the mapping */ |
1050 | region = kmem_cache_zalloc(k: vm_region_jar, GFP_KERNEL); |
1051 | if (!region) |
1052 | goto error_getting_region; |
1053 | |
1054 | vma = vm_area_alloc(current->mm); |
1055 | if (!vma) |
1056 | goto error_getting_vma; |
1057 | |
1058 | region->vm_usage = 1; |
1059 | region->vm_flags = vm_flags; |
1060 | region->vm_pgoff = pgoff; |
1061 | |
1062 | vm_flags_init(vma, flags: vm_flags); |
1063 | vma->vm_pgoff = pgoff; |
1064 | |
1065 | if (file) { |
1066 | region->vm_file = get_file(f: file); |
1067 | vma->vm_file = get_file(f: file); |
1068 | } |
1069 | |
1070 | down_write(sem: &nommu_region_sem); |
1071 | |
1072 | /* if we want to share, we need to check for regions created by other |
1073 | * mmap() calls that overlap with our proposed mapping |
1074 | * - we can only share with a superset match on most regular files |
1075 | * - shared mappings on character devices and memory backed files are |
1076 | * permitted to overlap inexactly as far as we are concerned for in |
1077 | * these cases, sharing is handled in the driver or filesystem rather |
1078 | * than here |
1079 | */ |
1080 | if (is_nommu_shared_mapping(vm_flags)) { |
1081 | struct vm_region *pregion; |
1082 | unsigned long pglen, rpglen, pgend, rpgend, start; |
1083 | |
1084 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1085 | pgend = pgoff + pglen; |
1086 | |
1087 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { |
1088 | pregion = rb_entry(rb, struct vm_region, vm_rb); |
1089 | |
1090 | if (!is_nommu_shared_mapping(pregion->vm_flags)) |
1091 | continue; |
1092 | |
1093 | /* search for overlapping mappings on the same file */ |
1094 | if (file_inode(f: pregion->vm_file) != |
1095 | file_inode(f: file)) |
1096 | continue; |
1097 | |
1098 | if (pregion->vm_pgoff >= pgend) |
1099 | continue; |
1100 | |
1101 | rpglen = pregion->vm_end - pregion->vm_start; |
1102 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1103 | rpgend = pregion->vm_pgoff + rpglen; |
1104 | if (pgoff >= rpgend) |
1105 | continue; |
1106 | |
1107 | /* handle inexactly overlapping matches between |
1108 | * mappings */ |
1109 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && |
1110 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { |
1111 | /* new mapping is not a subset of the region */ |
1112 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
1113 | goto sharing_violation; |
1114 | continue; |
1115 | } |
1116 | |
1117 | /* we've found a region we can share */ |
1118 | pregion->vm_usage++; |
1119 | vma->vm_region = pregion; |
1120 | start = pregion->vm_start; |
1121 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; |
1122 | vma->vm_start = start; |
1123 | vma->vm_end = start + len; |
1124 | |
1125 | if (pregion->vm_flags & VM_MAPPED_COPY) |
1126 | vm_flags_set(vma, flags: VM_MAPPED_COPY); |
1127 | else { |
1128 | ret = do_mmap_shared_file(vma); |
1129 | if (ret < 0) { |
1130 | vma->vm_region = NULL; |
1131 | vma->vm_start = 0; |
1132 | vma->vm_end = 0; |
1133 | pregion->vm_usage--; |
1134 | pregion = NULL; |
1135 | goto error_just_free; |
1136 | } |
1137 | } |
1138 | fput(region->vm_file); |
1139 | kmem_cache_free(s: vm_region_jar, objp: region); |
1140 | region = pregion; |
1141 | result = start; |
1142 | goto share; |
1143 | } |
1144 | |
1145 | /* obtain the address at which to make a shared mapping |
1146 | * - this is the hook for quasi-memory character devices to |
1147 | * tell us the location of a shared mapping |
1148 | */ |
1149 | if (capabilities & NOMMU_MAP_DIRECT) { |
1150 | addr = file->f_op->get_unmapped_area(file, addr, len, |
1151 | pgoff, flags); |
1152 | if (IS_ERR_VALUE(addr)) { |
1153 | ret = addr; |
1154 | if (ret != -ENOSYS) |
1155 | goto error_just_free; |
1156 | |
1157 | /* the driver refused to tell us where to site |
1158 | * the mapping so we'll have to attempt to copy |
1159 | * it */ |
1160 | ret = -ENODEV; |
1161 | if (!(capabilities & NOMMU_MAP_COPY)) |
1162 | goto error_just_free; |
1163 | |
1164 | capabilities &= ~NOMMU_MAP_DIRECT; |
1165 | } else { |
1166 | vma->vm_start = region->vm_start = addr; |
1167 | vma->vm_end = region->vm_end = addr + len; |
1168 | } |
1169 | } |
1170 | } |
1171 | |
1172 | vma->vm_region = region; |
1173 | |
1174 | /* set up the mapping |
1175 | * - the region is filled in if NOMMU_MAP_DIRECT is still set |
1176 | */ |
1177 | if (file && vma->vm_flags & VM_SHARED) |
1178 | ret = do_mmap_shared_file(vma); |
1179 | else |
1180 | ret = do_mmap_private(vma, region, len, capabilities); |
1181 | if (ret < 0) |
1182 | goto error_just_free; |
1183 | add_nommu_region(region); |
1184 | |
1185 | /* clear anonymous mappings that don't ask for uninitialized data */ |
1186 | if (!vma->vm_file && |
1187 | (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) || |
1188 | !(flags & MAP_UNINITIALIZED))) |
1189 | memset((void *)region->vm_start, 0, |
1190 | region->vm_end - region->vm_start); |
1191 | |
1192 | /* okay... we have a mapping; now we have to register it */ |
1193 | result = vma->vm_start; |
1194 | |
1195 | current->mm->total_vm += len >> PAGE_SHIFT; |
1196 | |
1197 | share: |
1198 | BUG_ON(!vma->vm_region); |
1199 | vma_iter_config(vmi: &vmi, index: vma->vm_start, last: vma->vm_end); |
1200 | if (vma_iter_prealloc(vmi: &vmi, vma)) |
1201 | goto error_just_free; |
1202 | |
1203 | setup_vma_to_mm(vma, current->mm); |
1204 | current->mm->map_count++; |
1205 | /* add the VMA to the tree */ |
1206 | vma_iter_store(vmi: &vmi, vma); |
1207 | |
1208 | /* we flush the region from the icache only when the first executable |
1209 | * mapping of it is made */ |
1210 | if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { |
1211 | flush_icache_user_range(start: region->vm_start, end: region->vm_end); |
1212 | region->vm_icache_flushed = true; |
1213 | } |
1214 | |
1215 | up_write(sem: &nommu_region_sem); |
1216 | |
1217 | return result; |
1218 | |
1219 | error_just_free: |
1220 | up_write(sem: &nommu_region_sem); |
1221 | error: |
1222 | vma_iter_free(vmi: &vmi); |
1223 | if (region->vm_file) |
1224 | fput(region->vm_file); |
1225 | kmem_cache_free(s: vm_region_jar, objp: region); |
1226 | if (vma->vm_file) |
1227 | fput(vma->vm_file); |
1228 | vm_area_free(vma); |
1229 | return ret; |
1230 | |
1231 | sharing_violation: |
1232 | up_write(sem: &nommu_region_sem); |
1233 | pr_warn("Attempt to share mismatched mappings\n" ); |
1234 | ret = -EINVAL; |
1235 | goto error; |
1236 | |
1237 | error_getting_vma: |
1238 | kmem_cache_free(s: vm_region_jar, objp: region); |
1239 | pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n" , |
1240 | len, current->pid); |
1241 | show_mem(); |
1242 | return -ENOMEM; |
1243 | |
1244 | error_getting_region: |
1245 | pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n" , |
1246 | len, current->pid); |
1247 | show_mem(); |
1248 | return -ENOMEM; |
1249 | } |
1250 | |
1251 | unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, |
1252 | unsigned long prot, unsigned long flags, |
1253 | unsigned long fd, unsigned long pgoff) |
1254 | { |
1255 | struct file *file = NULL; |
1256 | unsigned long retval = -EBADF; |
1257 | |
1258 | audit_mmap_fd(fd, flags); |
1259 | if (!(flags & MAP_ANONYMOUS)) { |
1260 | file = fget(fd); |
1261 | if (!file) |
1262 | goto out; |
1263 | } |
1264 | |
1265 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
1266 | |
1267 | if (file) |
1268 | fput(file); |
1269 | out: |
1270 | return retval; |
1271 | } |
1272 | |
1273 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
1274 | unsigned long, prot, unsigned long, flags, |
1275 | unsigned long, fd, unsigned long, pgoff) |
1276 | { |
1277 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); |
1278 | } |
1279 | |
1280 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
1281 | struct mmap_arg_struct { |
1282 | unsigned long addr; |
1283 | unsigned long len; |
1284 | unsigned long prot; |
1285 | unsigned long flags; |
1286 | unsigned long fd; |
1287 | unsigned long offset; |
1288 | }; |
1289 | |
1290 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) |
1291 | { |
1292 | struct mmap_arg_struct a; |
1293 | |
1294 | if (copy_from_user(&a, arg, sizeof(a))) |
1295 | return -EFAULT; |
1296 | if (offset_in_page(a.offset)) |
1297 | return -EINVAL; |
1298 | |
1299 | return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
1300 | a.offset >> PAGE_SHIFT); |
1301 | } |
1302 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ |
1303 | |
1304 | /* |
1305 | * split a vma into two pieces at address 'addr', a new vma is allocated either |
1306 | * for the first part or the tail. |
1307 | */ |
1308 | static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, |
1309 | unsigned long addr, int new_below) |
1310 | { |
1311 | struct vm_area_struct *new; |
1312 | struct vm_region *region; |
1313 | unsigned long npages; |
1314 | struct mm_struct *mm; |
1315 | |
1316 | /* we're only permitted to split anonymous regions (these should have |
1317 | * only a single usage on the region) */ |
1318 | if (vma->vm_file) |
1319 | return -ENOMEM; |
1320 | |
1321 | mm = vma->vm_mm; |
1322 | if (mm->map_count >= sysctl_max_map_count) |
1323 | return -ENOMEM; |
1324 | |
1325 | region = kmem_cache_alloc(cachep: vm_region_jar, GFP_KERNEL); |
1326 | if (!region) |
1327 | return -ENOMEM; |
1328 | |
1329 | new = vm_area_dup(vma); |
1330 | if (!new) |
1331 | goto err_vma_dup; |
1332 | |
1333 | /* most fields are the same, copy all, and then fixup */ |
1334 | *region = *vma->vm_region; |
1335 | new->vm_region = region; |
1336 | |
1337 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; |
1338 | |
1339 | if (new_below) { |
1340 | region->vm_top = region->vm_end = new->vm_end = addr; |
1341 | } else { |
1342 | region->vm_start = new->vm_start = addr; |
1343 | region->vm_pgoff = new->vm_pgoff += npages; |
1344 | } |
1345 | |
1346 | vma_iter_config(vmi, index: new->vm_start, last: new->vm_end); |
1347 | if (vma_iter_prealloc(vmi, vma)) { |
1348 | pr_warn("Allocation of vma tree for process %d failed\n" , |
1349 | current->pid); |
1350 | goto err_vmi_preallocate; |
1351 | } |
1352 | |
1353 | if (new->vm_ops && new->vm_ops->open) |
1354 | new->vm_ops->open(new); |
1355 | |
1356 | down_write(sem: &nommu_region_sem); |
1357 | delete_nommu_region(region: vma->vm_region); |
1358 | if (new_below) { |
1359 | vma->vm_region->vm_start = vma->vm_start = addr; |
1360 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; |
1361 | } else { |
1362 | vma->vm_region->vm_end = vma->vm_end = addr; |
1363 | vma->vm_region->vm_top = addr; |
1364 | } |
1365 | add_nommu_region(region: vma->vm_region); |
1366 | add_nommu_region(region: new->vm_region); |
1367 | up_write(sem: &nommu_region_sem); |
1368 | |
1369 | setup_vma_to_mm(vma, mm); |
1370 | setup_vma_to_mm(vma: new, mm); |
1371 | vma_iter_store(vmi, vma: new); |
1372 | mm->map_count++; |
1373 | return 0; |
1374 | |
1375 | err_vmi_preallocate: |
1376 | vm_area_free(new); |
1377 | err_vma_dup: |
1378 | kmem_cache_free(s: vm_region_jar, objp: region); |
1379 | return -ENOMEM; |
1380 | } |
1381 | |
1382 | /* |
1383 | * shrink a VMA by removing the specified chunk from either the beginning or |
1384 | * the end |
1385 | */ |
1386 | static int vmi_shrink_vma(struct vma_iterator *vmi, |
1387 | struct vm_area_struct *vma, |
1388 | unsigned long from, unsigned long to) |
1389 | { |
1390 | struct vm_region *region; |
1391 | |
1392 | /* adjust the VMA's pointers, which may reposition it in the MM's tree |
1393 | * and list */ |
1394 | if (from > vma->vm_start) { |
1395 | if (vma_iter_clear_gfp(vmi, start: from, end: vma->vm_end, GFP_KERNEL)) |
1396 | return -ENOMEM; |
1397 | vma->vm_end = from; |
1398 | } else { |
1399 | if (vma_iter_clear_gfp(vmi, start: vma->vm_start, end: to, GFP_KERNEL)) |
1400 | return -ENOMEM; |
1401 | vma->vm_start = to; |
1402 | } |
1403 | |
1404 | /* cut the backing region down to size */ |
1405 | region = vma->vm_region; |
1406 | BUG_ON(region->vm_usage != 1); |
1407 | |
1408 | down_write(sem: &nommu_region_sem); |
1409 | delete_nommu_region(region); |
1410 | if (from > region->vm_start) { |
1411 | to = region->vm_top; |
1412 | region->vm_top = region->vm_end = from; |
1413 | } else { |
1414 | region->vm_start = to; |
1415 | } |
1416 | add_nommu_region(region); |
1417 | up_write(sem: &nommu_region_sem); |
1418 | |
1419 | free_page_series(from, to); |
1420 | return 0; |
1421 | } |
1422 | |
1423 | /* |
1424 | * release a mapping |
1425 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single |
1426 | * VMA, though it need not cover the whole VMA |
1427 | */ |
1428 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) |
1429 | { |
1430 | VMA_ITERATOR(vmi, mm, start); |
1431 | struct vm_area_struct *vma; |
1432 | unsigned long end; |
1433 | int ret = 0; |
1434 | |
1435 | len = PAGE_ALIGN(len); |
1436 | if (len == 0) |
1437 | return -EINVAL; |
1438 | |
1439 | end = start + len; |
1440 | |
1441 | /* find the first potentially overlapping VMA */ |
1442 | vma = vma_find(vmi: &vmi, max: end); |
1443 | if (!vma) { |
1444 | static int limit; |
1445 | if (limit < 5) { |
1446 | pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n" , |
1447 | current->pid, current->comm, |
1448 | start, start + len - 1); |
1449 | limit++; |
1450 | } |
1451 | return -EINVAL; |
1452 | } |
1453 | |
1454 | /* we're allowed to split an anonymous VMA but not a file-backed one */ |
1455 | if (vma->vm_file) { |
1456 | do { |
1457 | if (start > vma->vm_start) |
1458 | return -EINVAL; |
1459 | if (end == vma->vm_end) |
1460 | goto erase_whole_vma; |
1461 | vma = vma_find(vmi: &vmi, max: end); |
1462 | } while (vma); |
1463 | return -EINVAL; |
1464 | } else { |
1465 | /* the chunk must be a subset of the VMA found */ |
1466 | if (start == vma->vm_start && end == vma->vm_end) |
1467 | goto erase_whole_vma; |
1468 | if (start < vma->vm_start || end > vma->vm_end) |
1469 | return -EINVAL; |
1470 | if (offset_in_page(start)) |
1471 | return -EINVAL; |
1472 | if (end != vma->vm_end && offset_in_page(end)) |
1473 | return -EINVAL; |
1474 | if (start != vma->vm_start && end != vma->vm_end) { |
1475 | ret = split_vma(vmi: &vmi, vma, addr: start, new_below: 1); |
1476 | if (ret < 0) |
1477 | return ret; |
1478 | } |
1479 | return vmi_shrink_vma(vmi: &vmi, vma, from: start, to: end); |
1480 | } |
1481 | |
1482 | erase_whole_vma: |
1483 | if (delete_vma_from_mm(vma)) |
1484 | ret = -ENOMEM; |
1485 | else |
1486 | delete_vma(mm, vma); |
1487 | return ret; |
1488 | } |
1489 | |
1490 | int vm_munmap(unsigned long addr, size_t len) |
1491 | { |
1492 | struct mm_struct *mm = current->mm; |
1493 | int ret; |
1494 | |
1495 | mmap_write_lock(mm); |
1496 | ret = do_munmap(mm, start: addr, len, NULL); |
1497 | mmap_write_unlock(mm); |
1498 | return ret; |
1499 | } |
1500 | EXPORT_SYMBOL(vm_munmap); |
1501 | |
1502 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
1503 | { |
1504 | return vm_munmap(addr, len); |
1505 | } |
1506 | |
1507 | /* |
1508 | * release all the mappings made in a process's VM space |
1509 | */ |
1510 | void exit_mmap(struct mm_struct *mm) |
1511 | { |
1512 | VMA_ITERATOR(vmi, mm, 0); |
1513 | struct vm_area_struct *vma; |
1514 | |
1515 | if (!mm) |
1516 | return; |
1517 | |
1518 | mm->total_vm = 0; |
1519 | |
1520 | /* |
1521 | * Lock the mm to avoid assert complaining even though this is the only |
1522 | * user of the mm |
1523 | */ |
1524 | mmap_write_lock(mm); |
1525 | for_each_vma(vmi, vma) { |
1526 | cleanup_vma_from_mm(vma); |
1527 | delete_vma(mm, vma); |
1528 | cond_resched(); |
1529 | } |
1530 | __mt_destroy(mt: &mm->mm_mt); |
1531 | mmap_write_unlock(mm); |
1532 | } |
1533 | |
1534 | /* |
1535 | * expand (or shrink) an existing mapping, potentially moving it at the same |
1536 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) |
1537 | * |
1538 | * under NOMMU conditions, we only permit changing a mapping's size, and only |
1539 | * as long as it stays within the region allocated by do_mmap_private() and the |
1540 | * block is not shareable |
1541 | * |
1542 | * MREMAP_FIXED is not supported under NOMMU conditions |
1543 | */ |
1544 | static unsigned long do_mremap(unsigned long addr, |
1545 | unsigned long old_len, unsigned long new_len, |
1546 | unsigned long flags, unsigned long new_addr) |
1547 | { |
1548 | struct vm_area_struct *vma; |
1549 | |
1550 | /* insanity checks first */ |
1551 | old_len = PAGE_ALIGN(old_len); |
1552 | new_len = PAGE_ALIGN(new_len); |
1553 | if (old_len == 0 || new_len == 0) |
1554 | return (unsigned long) -EINVAL; |
1555 | |
1556 | if (offset_in_page(addr)) |
1557 | return -EINVAL; |
1558 | |
1559 | if (flags & MREMAP_FIXED && new_addr != addr) |
1560 | return (unsigned long) -EINVAL; |
1561 | |
1562 | vma = find_vma_exact(current->mm, addr, len: old_len); |
1563 | if (!vma) |
1564 | return (unsigned long) -EINVAL; |
1565 | |
1566 | if (vma->vm_end != vma->vm_start + old_len) |
1567 | return (unsigned long) -EFAULT; |
1568 | |
1569 | if (is_nommu_shared_mapping(vma->vm_flags)) |
1570 | return (unsigned long) -EPERM; |
1571 | |
1572 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) |
1573 | return (unsigned long) -ENOMEM; |
1574 | |
1575 | /* all checks complete - do it */ |
1576 | vma->vm_end = vma->vm_start + new_len; |
1577 | return vma->vm_start; |
1578 | } |
1579 | |
1580 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, |
1581 | unsigned long, new_len, unsigned long, flags, |
1582 | unsigned long, new_addr) |
1583 | { |
1584 | unsigned long ret; |
1585 | |
1586 | mmap_write_lock(current->mm); |
1587 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); |
1588 | mmap_write_unlock(current->mm); |
1589 | return ret; |
1590 | } |
1591 | |
1592 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
1593 | unsigned int foll_flags) |
1594 | { |
1595 | return NULL; |
1596 | } |
1597 | |
1598 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
1599 | unsigned long pfn, unsigned long size, pgprot_t prot) |
1600 | { |
1601 | if (addr != (pfn << PAGE_SHIFT)) |
1602 | return -EINVAL; |
1603 | |
1604 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); |
1605 | return 0; |
1606 | } |
1607 | EXPORT_SYMBOL(remap_pfn_range); |
1608 | |
1609 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) |
1610 | { |
1611 | unsigned long pfn = start >> PAGE_SHIFT; |
1612 | unsigned long vm_len = vma->vm_end - vma->vm_start; |
1613 | |
1614 | pfn += vma->vm_pgoff; |
1615 | return io_remap_pfn_range(vma, addr: vma->vm_start, pfn, size: vm_len, prot: vma->vm_page_prot); |
1616 | } |
1617 | EXPORT_SYMBOL(vm_iomap_memory); |
1618 | |
1619 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1620 | unsigned long pgoff) |
1621 | { |
1622 | unsigned int size = vma->vm_end - vma->vm_start; |
1623 | |
1624 | if (!(vma->vm_flags & VM_USERMAP)) |
1625 | return -EINVAL; |
1626 | |
1627 | vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); |
1628 | vma->vm_end = vma->vm_start + size; |
1629 | |
1630 | return 0; |
1631 | } |
1632 | EXPORT_SYMBOL(remap_vmalloc_range); |
1633 | |
1634 | vm_fault_t filemap_fault(struct vm_fault *vmf) |
1635 | { |
1636 | BUG(); |
1637 | return 0; |
1638 | } |
1639 | EXPORT_SYMBOL(filemap_fault); |
1640 | |
1641 | vm_fault_t filemap_map_pages(struct vm_fault *vmf, |
1642 | pgoff_t start_pgoff, pgoff_t end_pgoff) |
1643 | { |
1644 | BUG(); |
1645 | return 0; |
1646 | } |
1647 | EXPORT_SYMBOL(filemap_map_pages); |
1648 | |
1649 | static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, |
1650 | void *buf, int len, unsigned int gup_flags) |
1651 | { |
1652 | struct vm_area_struct *vma; |
1653 | int write = gup_flags & FOLL_WRITE; |
1654 | |
1655 | if (mmap_read_lock_killable(mm)) |
1656 | return 0; |
1657 | |
1658 | /* the access must start within one of the target process's mappings */ |
1659 | vma = find_vma(mm, addr); |
1660 | if (vma) { |
1661 | /* don't overrun this mapping */ |
1662 | if (addr + len >= vma->vm_end) |
1663 | len = vma->vm_end - addr; |
1664 | |
1665 | /* only read or write mappings where it is permitted */ |
1666 | if (write && vma->vm_flags & VM_MAYWRITE) |
1667 | copy_to_user_page(vma, NULL, addr, |
1668 | (void *) addr, buf, len); |
1669 | else if (!write && vma->vm_flags & VM_MAYREAD) |
1670 | copy_from_user_page(vma, NULL, addr, |
1671 | buf, (void *) addr, len); |
1672 | else |
1673 | len = 0; |
1674 | } else { |
1675 | len = 0; |
1676 | } |
1677 | |
1678 | mmap_read_unlock(mm); |
1679 | |
1680 | return len; |
1681 | } |
1682 | |
1683 | /** |
1684 | * access_remote_vm - access another process' address space |
1685 | * @mm: the mm_struct of the target address space |
1686 | * @addr: start address to access |
1687 | * @buf: source or destination buffer |
1688 | * @len: number of bytes to transfer |
1689 | * @gup_flags: flags modifying lookup behaviour |
1690 | * |
1691 | * The caller must hold a reference on @mm. |
1692 | */ |
1693 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
1694 | void *buf, int len, unsigned int gup_flags) |
1695 | { |
1696 | return __access_remote_vm(mm, addr, buf, len, gup_flags); |
1697 | } |
1698 | |
1699 | /* |
1700 | * Access another process' address space. |
1701 | * - source/target buffer must be kernel space |
1702 | */ |
1703 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, |
1704 | unsigned int gup_flags) |
1705 | { |
1706 | struct mm_struct *mm; |
1707 | |
1708 | if (addr + len < addr) |
1709 | return 0; |
1710 | |
1711 | mm = get_task_mm(task: tsk); |
1712 | if (!mm) |
1713 | return 0; |
1714 | |
1715 | len = __access_remote_vm(mm, addr, buf, len, gup_flags); |
1716 | |
1717 | mmput(mm); |
1718 | return len; |
1719 | } |
1720 | EXPORT_SYMBOL_GPL(access_process_vm); |
1721 | |
1722 | /** |
1723 | * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode |
1724 | * @inode: The inode to check |
1725 | * @size: The current filesize of the inode |
1726 | * @newsize: The proposed filesize of the inode |
1727 | * |
1728 | * Check the shared mappings on an inode on behalf of a shrinking truncate to |
1729 | * make sure that any outstanding VMAs aren't broken and then shrink the |
1730 | * vm_regions that extend beyond so that do_mmap() doesn't |
1731 | * automatically grant mappings that are too large. |
1732 | */ |
1733 | int nommu_shrink_inode_mappings(struct inode *inode, size_t size, |
1734 | size_t newsize) |
1735 | { |
1736 | struct vm_area_struct *vma; |
1737 | struct vm_region *region; |
1738 | pgoff_t low, high; |
1739 | size_t r_size, r_top; |
1740 | |
1741 | low = newsize >> PAGE_SHIFT; |
1742 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1743 | |
1744 | down_write(sem: &nommu_region_sem); |
1745 | i_mmap_lock_read(mapping: inode->i_mapping); |
1746 | |
1747 | /* search for VMAs that fall within the dead zone */ |
1748 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { |
1749 | /* found one - only interested if it's shared out of the page |
1750 | * cache */ |
1751 | if (vma->vm_flags & VM_SHARED) { |
1752 | i_mmap_unlock_read(mapping: inode->i_mapping); |
1753 | up_write(sem: &nommu_region_sem); |
1754 | return -ETXTBSY; /* not quite true, but near enough */ |
1755 | } |
1756 | } |
1757 | |
1758 | /* reduce any regions that overlap the dead zone - if in existence, |
1759 | * these will be pointed to by VMAs that don't overlap the dead zone |
1760 | * |
1761 | * we don't check for any regions that start beyond the EOF as there |
1762 | * shouldn't be any |
1763 | */ |
1764 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { |
1765 | if (!(vma->vm_flags & VM_SHARED)) |
1766 | continue; |
1767 | |
1768 | region = vma->vm_region; |
1769 | r_size = region->vm_top - region->vm_start; |
1770 | r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; |
1771 | |
1772 | if (r_top > newsize) { |
1773 | region->vm_top -= r_top - newsize; |
1774 | if (region->vm_end > region->vm_top) |
1775 | region->vm_end = region->vm_top; |
1776 | } |
1777 | } |
1778 | |
1779 | i_mmap_unlock_read(mapping: inode->i_mapping); |
1780 | up_write(sem: &nommu_region_sem); |
1781 | return 0; |
1782 | } |
1783 | |
1784 | /* |
1785 | * Initialise sysctl_user_reserve_kbytes. |
1786 | * |
1787 | * This is intended to prevent a user from starting a single memory hogging |
1788 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER |
1789 | * mode. |
1790 | * |
1791 | * The default value is min(3% of free memory, 128MB) |
1792 | * 128MB is enough to recover with sshd/login, bash, and top/kill. |
1793 | */ |
1794 | static int __meminit init_user_reserve(void) |
1795 | { |
1796 | unsigned long free_kbytes; |
1797 | |
1798 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1799 | |
1800 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
1801 | return 0; |
1802 | } |
1803 | subsys_initcall(init_user_reserve); |
1804 | |
1805 | /* |
1806 | * Initialise sysctl_admin_reserve_kbytes. |
1807 | * |
1808 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin |
1809 | * to log in and kill a memory hogging process. |
1810 | * |
1811 | * Systems with more than 256MB will reserve 8MB, enough to recover |
1812 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will |
1813 | * only reserve 3% of free pages by default. |
1814 | */ |
1815 | static int __meminit init_admin_reserve(void) |
1816 | { |
1817 | unsigned long free_kbytes; |
1818 | |
1819 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1820 | |
1821 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
1822 | return 0; |
1823 | } |
1824 | subsys_initcall(init_admin_reserve); |
1825 | |