1 | #include <linux/mm.h> |
2 | #include <linux/slab.h> |
3 | #include <linux/string.h> |
4 | #include <linux/compiler.h> |
5 | #include <linux/export.h> |
6 | #include <linux/err.h> |
7 | #include <linux/sched.h> |
8 | #include <linux/sched/mm.h> |
9 | #include <linux/sched/task_stack.h> |
10 | #include <linux/security.h> |
11 | #include <linux/swap.h> |
12 | #include <linux/swapops.h> |
13 | #include <linux/mman.h> |
14 | #include <linux/hugetlb.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/userfaultfd_k.h> |
17 | |
18 | #include <linux/uaccess.h> |
19 | |
20 | #include "internal.h" |
21 | |
22 | /** |
23 | * kfree_const - conditionally free memory |
24 | * @x: pointer to the memory |
25 | * |
26 | * Function calls kfree only if @x is not in .rodata section. |
27 | */ |
28 | void kfree_const(const void *x) |
29 | { |
30 | if (!is_kernel_rodata((unsigned long)x)) |
31 | kfree(x); |
32 | } |
33 | EXPORT_SYMBOL(kfree_const); |
34 | |
35 | /** |
36 | * kstrdup - allocate space for and copy an existing string |
37 | * @s: the string to duplicate |
38 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
39 | * |
40 | * Return: newly allocated copy of @s or %NULL in case of error |
41 | */ |
42 | char *kstrdup(const char *s, gfp_t gfp) |
43 | { |
44 | size_t len; |
45 | char *buf; |
46 | |
47 | if (!s) |
48 | return NULL; |
49 | |
50 | len = strlen(s) + 1; |
51 | buf = kmalloc_track_caller(len, gfp); |
52 | if (buf) |
53 | memcpy(buf, s, len); |
54 | return buf; |
55 | } |
56 | EXPORT_SYMBOL(kstrdup); |
57 | |
58 | /** |
59 | * kstrdup_const - conditionally duplicate an existing const string |
60 | * @s: the string to duplicate |
61 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
62 | * |
63 | * Note: Strings allocated by kstrdup_const should be freed by kfree_const. |
64 | * |
65 | * Return: source string if it is in .rodata section otherwise |
66 | * fallback to kstrdup. |
67 | */ |
68 | const char *kstrdup_const(const char *s, gfp_t gfp) |
69 | { |
70 | if (is_kernel_rodata((unsigned long)s)) |
71 | return s; |
72 | |
73 | return kstrdup(s, gfp); |
74 | } |
75 | EXPORT_SYMBOL(kstrdup_const); |
76 | |
77 | /** |
78 | * kstrndup - allocate space for and copy an existing string |
79 | * @s: the string to duplicate |
80 | * @max: read at most @max chars from @s |
81 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
82 | * |
83 | * Note: Use kmemdup_nul() instead if the size is known exactly. |
84 | * |
85 | * Return: newly allocated copy of @s or %NULL in case of error |
86 | */ |
87 | char *kstrndup(const char *s, size_t max, gfp_t gfp) |
88 | { |
89 | size_t len; |
90 | char *buf; |
91 | |
92 | if (!s) |
93 | return NULL; |
94 | |
95 | len = strnlen(s, max); |
96 | buf = kmalloc_track_caller(len+1, gfp); |
97 | if (buf) { |
98 | memcpy(buf, s, len); |
99 | buf[len] = '\0'; |
100 | } |
101 | return buf; |
102 | } |
103 | EXPORT_SYMBOL(kstrndup); |
104 | |
105 | /** |
106 | * kmemdup - duplicate region of memory |
107 | * |
108 | * @src: memory region to duplicate |
109 | * @len: memory region length |
110 | * @gfp: GFP mask to use |
111 | * |
112 | * Return: newly allocated copy of @src or %NULL in case of error |
113 | */ |
114 | void *kmemdup(const void *src, size_t len, gfp_t gfp) |
115 | { |
116 | void *p; |
117 | |
118 | p = kmalloc_track_caller(len, gfp); |
119 | if (p) |
120 | memcpy(p, src, len); |
121 | return p; |
122 | } |
123 | EXPORT_SYMBOL(kmemdup); |
124 | |
125 | /** |
126 | * kmemdup_nul - Create a NUL-terminated string from unterminated data |
127 | * @s: The data to stringify |
128 | * @len: The size of the data |
129 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
130 | * |
131 | * Return: newly allocated copy of @s with NUL-termination or %NULL in |
132 | * case of error |
133 | */ |
134 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) |
135 | { |
136 | char *buf; |
137 | |
138 | if (!s) |
139 | return NULL; |
140 | |
141 | buf = kmalloc_track_caller(len + 1, gfp); |
142 | if (buf) { |
143 | memcpy(buf, s, len); |
144 | buf[len] = '\0'; |
145 | } |
146 | return buf; |
147 | } |
148 | EXPORT_SYMBOL(kmemdup_nul); |
149 | |
150 | /** |
151 | * memdup_user - duplicate memory region from user space |
152 | * |
153 | * @src: source address in user space |
154 | * @len: number of bytes to copy |
155 | * |
156 | * Return: an ERR_PTR() on failure. Result is physically |
157 | * contiguous, to be freed by kfree(). |
158 | */ |
159 | void *memdup_user(const void __user *src, size_t len) |
160 | { |
161 | void *p; |
162 | |
163 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
164 | if (!p) |
165 | return ERR_PTR(-ENOMEM); |
166 | |
167 | if (copy_from_user(p, src, len)) { |
168 | kfree(p); |
169 | return ERR_PTR(-EFAULT); |
170 | } |
171 | |
172 | return p; |
173 | } |
174 | EXPORT_SYMBOL(memdup_user); |
175 | |
176 | /** |
177 | * vmemdup_user - duplicate memory region from user space |
178 | * |
179 | * @src: source address in user space |
180 | * @len: number of bytes to copy |
181 | * |
182 | * Return: an ERR_PTR() on failure. Result may be not |
183 | * physically contiguous. Use kvfree() to free. |
184 | */ |
185 | void *vmemdup_user(const void __user *src, size_t len) |
186 | { |
187 | void *p; |
188 | |
189 | p = kvmalloc(len, GFP_USER); |
190 | if (!p) |
191 | return ERR_PTR(-ENOMEM); |
192 | |
193 | if (copy_from_user(p, src, len)) { |
194 | kvfree(p); |
195 | return ERR_PTR(-EFAULT); |
196 | } |
197 | |
198 | return p; |
199 | } |
200 | EXPORT_SYMBOL(vmemdup_user); |
201 | |
202 | /** |
203 | * strndup_user - duplicate an existing string from user space |
204 | * @s: The string to duplicate |
205 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
206 | * |
207 | * Return: newly allocated copy of @s or %NULL in case of error |
208 | */ |
209 | char *strndup_user(const char __user *s, long n) |
210 | { |
211 | char *p; |
212 | long length; |
213 | |
214 | length = strnlen_user(s, n); |
215 | |
216 | if (!length) |
217 | return ERR_PTR(-EFAULT); |
218 | |
219 | if (length > n) |
220 | return ERR_PTR(-EINVAL); |
221 | |
222 | p = memdup_user(s, length); |
223 | |
224 | if (IS_ERR(p)) |
225 | return p; |
226 | |
227 | p[length - 1] = '\0'; |
228 | |
229 | return p; |
230 | } |
231 | EXPORT_SYMBOL(strndup_user); |
232 | |
233 | /** |
234 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate |
235 | * |
236 | * @src: source address in user space |
237 | * @len: number of bytes to copy |
238 | * |
239 | * Return: an ERR_PTR() on failure. |
240 | */ |
241 | void *memdup_user_nul(const void __user *src, size_t len) |
242 | { |
243 | char *p; |
244 | |
245 | /* |
246 | * Always use GFP_KERNEL, since copy_from_user() can sleep and |
247 | * cause pagefault, which makes it pointless to use GFP_NOFS |
248 | * or GFP_ATOMIC. |
249 | */ |
250 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); |
251 | if (!p) |
252 | return ERR_PTR(-ENOMEM); |
253 | |
254 | if (copy_from_user(p, src, len)) { |
255 | kfree(p); |
256 | return ERR_PTR(-EFAULT); |
257 | } |
258 | p[len] = '\0'; |
259 | |
260 | return p; |
261 | } |
262 | EXPORT_SYMBOL(memdup_user_nul); |
263 | |
264 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
265 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
266 | { |
267 | struct vm_area_struct *next; |
268 | |
269 | vma->vm_prev = prev; |
270 | if (prev) { |
271 | next = prev->vm_next; |
272 | prev->vm_next = vma; |
273 | } else { |
274 | mm->mmap = vma; |
275 | if (rb_parent) |
276 | next = rb_entry(rb_parent, |
277 | struct vm_area_struct, vm_rb); |
278 | else |
279 | next = NULL; |
280 | } |
281 | vma->vm_next = next; |
282 | if (next) |
283 | next->vm_prev = vma; |
284 | } |
285 | |
286 | /* Check if the vma is being used as a stack by this task */ |
287 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
288 | { |
289 | struct task_struct * __maybe_unused t = current; |
290 | |
291 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
292 | } |
293 | |
294 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
295 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
296 | { |
297 | mm->mmap_base = TASK_UNMAPPED_BASE; |
298 | mm->get_unmapped_area = arch_get_unmapped_area; |
299 | } |
300 | #endif |
301 | |
302 | /* |
303 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall |
304 | * back to the regular GUP. |
305 | * Note a difference with get_user_pages_fast: this always returns the |
306 | * number of pages pinned, 0 if no pages were pinned. |
307 | * If the architecture does not support this function, simply return with no |
308 | * pages pinned. |
309 | */ |
310 | int __weak __get_user_pages_fast(unsigned long start, |
311 | int nr_pages, int write, struct page **pages) |
312 | { |
313 | return 0; |
314 | } |
315 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); |
316 | |
317 | /** |
318 | * get_user_pages_fast() - pin user pages in memory |
319 | * @start: starting user address |
320 | * @nr_pages: number of pages from start to pin |
321 | * @write: whether pages will be written to |
322 | * @pages: array that receives pointers to the pages pinned. |
323 | * Should be at least nr_pages long. |
324 | * |
325 | * get_user_pages_fast provides equivalent functionality to get_user_pages, |
326 | * operating on current and current->mm, with force=0 and vma=NULL. However |
327 | * unlike get_user_pages, it must be called without mmap_sem held. |
328 | * |
329 | * get_user_pages_fast may take mmap_sem and page table locks, so no |
330 | * assumptions can be made about lack of locking. get_user_pages_fast is to be |
331 | * implemented in a way that is advantageous (vs get_user_pages()) when the |
332 | * user memory area is already faulted in and present in ptes. However if the |
333 | * pages have to be faulted in, it may turn out to be slightly slower so |
334 | * callers need to carefully consider what to use. On many architectures, |
335 | * get_user_pages_fast simply falls back to get_user_pages. |
336 | * |
337 | * Return: number of pages pinned. This may be fewer than the number |
338 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
339 | * were pinned, returns -errno. |
340 | */ |
341 | int __weak get_user_pages_fast(unsigned long start, |
342 | int nr_pages, int write, struct page **pages) |
343 | { |
344 | return get_user_pages_unlocked(start, nr_pages, pages, |
345 | write ? FOLL_WRITE : 0); |
346 | } |
347 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
348 | |
349 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
350 | unsigned long len, unsigned long prot, |
351 | unsigned long flag, unsigned long pgoff) |
352 | { |
353 | unsigned long ret; |
354 | struct mm_struct *mm = current->mm; |
355 | unsigned long populate; |
356 | LIST_HEAD(uf); |
357 | |
358 | ret = security_mmap_file(file, prot, flag); |
359 | if (!ret) { |
360 | if (down_write_killable(&mm->mmap_sem)) |
361 | return -EINTR; |
362 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, |
363 | &populate, &uf); |
364 | up_write(&mm->mmap_sem); |
365 | userfaultfd_unmap_complete(mm, &uf); |
366 | if (populate) |
367 | mm_populate(ret, populate); |
368 | } |
369 | return ret; |
370 | } |
371 | |
372 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
373 | unsigned long len, unsigned long prot, |
374 | unsigned long flag, unsigned long offset) |
375 | { |
376 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) |
377 | return -EINVAL; |
378 | if (unlikely(offset_in_page(offset))) |
379 | return -EINVAL; |
380 | |
381 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
382 | } |
383 | EXPORT_SYMBOL(vm_mmap); |
384 | |
385 | /** |
386 | * kvmalloc_node - attempt to allocate physically contiguous memory, but upon |
387 | * failure, fall back to non-contiguous (vmalloc) allocation. |
388 | * @size: size of the request. |
389 | * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. |
390 | * @node: numa node to allocate from |
391 | * |
392 | * Uses kmalloc to get the memory but if the allocation fails then falls back |
393 | * to the vmalloc allocator. Use kvfree for freeing the memory. |
394 | * |
395 | * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported. |
396 | * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is |
397 | * preferable to the vmalloc fallback, due to visible performance drawbacks. |
398 | * |
399 | * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not |
400 | * fall back to vmalloc. |
401 | * |
402 | * Return: pointer to the allocated memory of %NULL in case of failure |
403 | */ |
404 | void *kvmalloc_node(size_t size, gfp_t flags, int node) |
405 | { |
406 | gfp_t kmalloc_flags = flags; |
407 | void *ret; |
408 | |
409 | /* |
410 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) |
411 | * so the given set of flags has to be compatible. |
412 | */ |
413 | if ((flags & GFP_KERNEL) != GFP_KERNEL) |
414 | return kmalloc_node(size, flags, node); |
415 | |
416 | /* |
417 | * We want to attempt a large physically contiguous block first because |
418 | * it is less likely to fragment multiple larger blocks and therefore |
419 | * contribute to a long term fragmentation less than vmalloc fallback. |
420 | * However make sure that larger requests are not too disruptive - no |
421 | * OOM killer and no allocation failure warnings as we have a fallback. |
422 | */ |
423 | if (size > PAGE_SIZE) { |
424 | kmalloc_flags |= __GFP_NOWARN; |
425 | |
426 | if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) |
427 | kmalloc_flags |= __GFP_NORETRY; |
428 | } |
429 | |
430 | ret = kmalloc_node(size, kmalloc_flags, node); |
431 | |
432 | /* |
433 | * It doesn't really make sense to fallback to vmalloc for sub page |
434 | * requests |
435 | */ |
436 | if (ret || size <= PAGE_SIZE) |
437 | return ret; |
438 | |
439 | return __vmalloc_node_flags_caller(size, node, flags, |
440 | __builtin_return_address(0)); |
441 | } |
442 | EXPORT_SYMBOL(kvmalloc_node); |
443 | |
444 | /** |
445 | * kvfree() - Free memory. |
446 | * @addr: Pointer to allocated memory. |
447 | * |
448 | * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). |
449 | * It is slightly more efficient to use kfree() or vfree() if you are certain |
450 | * that you know which one to use. |
451 | * |
452 | * Context: Either preemptible task context or not-NMI interrupt. |
453 | */ |
454 | void kvfree(const void *addr) |
455 | { |
456 | if (is_vmalloc_addr(addr)) |
457 | vfree(addr); |
458 | else |
459 | kfree(addr); |
460 | } |
461 | EXPORT_SYMBOL(kvfree); |
462 | |
463 | static inline void *__page_rmapping(struct page *page) |
464 | { |
465 | unsigned long mapping; |
466 | |
467 | mapping = (unsigned long)page->mapping; |
468 | mapping &= ~PAGE_MAPPING_FLAGS; |
469 | |
470 | return (void *)mapping; |
471 | } |
472 | |
473 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ |
474 | void *page_rmapping(struct page *page) |
475 | { |
476 | page = compound_head(page); |
477 | return __page_rmapping(page); |
478 | } |
479 | |
480 | /* |
481 | * Return true if this page is mapped into pagetables. |
482 | * For compound page it returns true if any subpage of compound page is mapped. |
483 | */ |
484 | bool page_mapped(struct page *page) |
485 | { |
486 | int i; |
487 | |
488 | if (likely(!PageCompound(page))) |
489 | return atomic_read(&page->_mapcount) >= 0; |
490 | page = compound_head(page); |
491 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) |
492 | return true; |
493 | if (PageHuge(page)) |
494 | return false; |
495 | for (i = 0; i < (1 << compound_order(page)); i++) { |
496 | if (atomic_read(&page[i]._mapcount) >= 0) |
497 | return true; |
498 | } |
499 | return false; |
500 | } |
501 | EXPORT_SYMBOL(page_mapped); |
502 | |
503 | struct anon_vma *page_anon_vma(struct page *page) |
504 | { |
505 | unsigned long mapping; |
506 | |
507 | page = compound_head(page); |
508 | mapping = (unsigned long)page->mapping; |
509 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
510 | return NULL; |
511 | return __page_rmapping(page); |
512 | } |
513 | |
514 | struct address_space *page_mapping(struct page *page) |
515 | { |
516 | struct address_space *mapping; |
517 | |
518 | page = compound_head(page); |
519 | |
520 | /* This happens if someone calls flush_dcache_page on slab page */ |
521 | if (unlikely(PageSlab(page))) |
522 | return NULL; |
523 | |
524 | if (unlikely(PageSwapCache(page))) { |
525 | swp_entry_t entry; |
526 | |
527 | entry.val = page_private(page); |
528 | return swap_address_space(entry); |
529 | } |
530 | |
531 | mapping = page->mapping; |
532 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
533 | return NULL; |
534 | |
535 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); |
536 | } |
537 | EXPORT_SYMBOL(page_mapping); |
538 | |
539 | /* |
540 | * For file cache pages, return the address_space, otherwise return NULL |
541 | */ |
542 | struct address_space *page_mapping_file(struct page *page) |
543 | { |
544 | if (unlikely(PageSwapCache(page))) |
545 | return NULL; |
546 | return page_mapping(page); |
547 | } |
548 | |
549 | /* Slow path of page_mapcount() for compound pages */ |
550 | int __page_mapcount(struct page *page) |
551 | { |
552 | int ret; |
553 | |
554 | ret = atomic_read(&page->_mapcount) + 1; |
555 | /* |
556 | * For file THP page->_mapcount contains total number of mapping |
557 | * of the page: no need to look into compound_mapcount. |
558 | */ |
559 | if (!PageAnon(page) && !PageHuge(page)) |
560 | return ret; |
561 | page = compound_head(page); |
562 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; |
563 | if (PageDoubleMap(page)) |
564 | ret--; |
565 | return ret; |
566 | } |
567 | EXPORT_SYMBOL_GPL(__page_mapcount); |
568 | |
569 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
570 | int sysctl_overcommit_ratio __read_mostly = 50; |
571 | unsigned long sysctl_overcommit_kbytes __read_mostly; |
572 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
573 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ |
574 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ |
575 | |
576 | int overcommit_ratio_handler(struct ctl_table *table, int write, |
577 | void __user *buffer, size_t *lenp, |
578 | loff_t *ppos) |
579 | { |
580 | int ret; |
581 | |
582 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
583 | if (ret == 0 && write) |
584 | sysctl_overcommit_kbytes = 0; |
585 | return ret; |
586 | } |
587 | |
588 | int overcommit_kbytes_handler(struct ctl_table *table, int write, |
589 | void __user *buffer, size_t *lenp, |
590 | loff_t *ppos) |
591 | { |
592 | int ret; |
593 | |
594 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
595 | if (ret == 0 && write) |
596 | sysctl_overcommit_ratio = 0; |
597 | return ret; |
598 | } |
599 | |
600 | /* |
601 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used |
602 | */ |
603 | unsigned long vm_commit_limit(void) |
604 | { |
605 | unsigned long allowed; |
606 | |
607 | if (sysctl_overcommit_kbytes) |
608 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); |
609 | else |
610 | allowed = ((totalram_pages() - hugetlb_total_pages()) |
611 | * sysctl_overcommit_ratio / 100); |
612 | allowed += total_swap_pages; |
613 | |
614 | return allowed; |
615 | } |
616 | |
617 | /* |
618 | * Make sure vm_committed_as in one cacheline and not cacheline shared with |
619 | * other variables. It can be updated by several CPUs frequently. |
620 | */ |
621 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; |
622 | |
623 | /* |
624 | * The global memory commitment made in the system can be a metric |
625 | * that can be used to drive ballooning decisions when Linux is hosted |
626 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically |
627 | * balancing memory across competing virtual machines that are hosted. |
628 | * Several metrics drive this policy engine including the guest reported |
629 | * memory commitment. |
630 | */ |
631 | unsigned long vm_memory_committed(void) |
632 | { |
633 | return percpu_counter_read_positive(&vm_committed_as); |
634 | } |
635 | EXPORT_SYMBOL_GPL(vm_memory_committed); |
636 | |
637 | /* |
638 | * Check that a process has enough memory to allocate a new virtual |
639 | * mapping. 0 means there is enough memory for the allocation to |
640 | * succeed and -ENOMEM implies there is not. |
641 | * |
642 | * We currently support three overcommit policies, which are set via the |
643 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst |
644 | * |
645 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. |
646 | * Additional code 2002 Jul 20 by Robert Love. |
647 | * |
648 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. |
649 | * |
650 | * Note this is a helper function intended to be used by LSMs which |
651 | * wish to use this logic. |
652 | */ |
653 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
654 | { |
655 | long free, allowed, reserve; |
656 | |
657 | VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < |
658 | -(s64)vm_committed_as_batch * num_online_cpus(), |
659 | "memory commitment underflow" ); |
660 | |
661 | vm_acct_memory(pages); |
662 | |
663 | /* |
664 | * Sometimes we want to use more memory than we have |
665 | */ |
666 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) |
667 | return 0; |
668 | |
669 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
670 | free = global_zone_page_state(NR_FREE_PAGES); |
671 | free += global_node_page_state(NR_FILE_PAGES); |
672 | |
673 | /* |
674 | * shmem pages shouldn't be counted as free in this |
675 | * case, they can't be purged, only swapped out, and |
676 | * that won't affect the overall amount of available |
677 | * memory in the system. |
678 | */ |
679 | free -= global_node_page_state(NR_SHMEM); |
680 | |
681 | free += get_nr_swap_pages(); |
682 | |
683 | /* |
684 | * Any slabs which are created with the |
685 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents |
686 | * which are reclaimable, under pressure. The dentry |
687 | * cache and most inode caches should fall into this |
688 | */ |
689 | free += global_node_page_state(NR_SLAB_RECLAIMABLE); |
690 | |
691 | /* |
692 | * Part of the kernel memory, which can be released |
693 | * under memory pressure. |
694 | */ |
695 | free += global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); |
696 | |
697 | /* |
698 | * Leave reserved pages. The pages are not for anonymous pages. |
699 | */ |
700 | if (free <= totalreserve_pages) |
701 | goto error; |
702 | else |
703 | free -= totalreserve_pages; |
704 | |
705 | /* |
706 | * Reserve some for root |
707 | */ |
708 | if (!cap_sys_admin) |
709 | free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); |
710 | |
711 | if (free > pages) |
712 | return 0; |
713 | |
714 | goto error; |
715 | } |
716 | |
717 | allowed = vm_commit_limit(); |
718 | /* |
719 | * Reserve some for root |
720 | */ |
721 | if (!cap_sys_admin) |
722 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); |
723 | |
724 | /* |
725 | * Don't let a single process grow so big a user can't recover |
726 | */ |
727 | if (mm) { |
728 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
729 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
730 | } |
731 | |
732 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) |
733 | return 0; |
734 | error: |
735 | vm_unacct_memory(pages); |
736 | |
737 | return -ENOMEM; |
738 | } |
739 | |
740 | /** |
741 | * get_cmdline() - copy the cmdline value to a buffer. |
742 | * @task: the task whose cmdline value to copy. |
743 | * @buffer: the buffer to copy to. |
744 | * @buflen: the length of the buffer. Larger cmdline values are truncated |
745 | * to this length. |
746 | * |
747 | * Return: the size of the cmdline field copied. Note that the copy does |
748 | * not guarantee an ending NULL byte. |
749 | */ |
750 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) |
751 | { |
752 | int res = 0; |
753 | unsigned int len; |
754 | struct mm_struct *mm = get_task_mm(task); |
755 | unsigned long arg_start, arg_end, env_start, env_end; |
756 | if (!mm) |
757 | goto out; |
758 | if (!mm->arg_end) |
759 | goto out_mm; /* Shh! No looking before we're done */ |
760 | |
761 | down_read(&mm->mmap_sem); |
762 | arg_start = mm->arg_start; |
763 | arg_end = mm->arg_end; |
764 | env_start = mm->env_start; |
765 | env_end = mm->env_end; |
766 | up_read(&mm->mmap_sem); |
767 | |
768 | len = arg_end - arg_start; |
769 | |
770 | if (len > buflen) |
771 | len = buflen; |
772 | |
773 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
774 | |
775 | /* |
776 | * If the nul at the end of args has been overwritten, then |
777 | * assume application is using setproctitle(3). |
778 | */ |
779 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { |
780 | len = strnlen(buffer, res); |
781 | if (len < res) { |
782 | res = len; |
783 | } else { |
784 | len = env_end - env_start; |
785 | if (len > buflen - res) |
786 | len = buflen - res; |
787 | res += access_process_vm(task, env_start, |
788 | buffer+res, len, |
789 | FOLL_FORCE); |
790 | res = strnlen(buffer, res); |
791 | } |
792 | } |
793 | out_mm: |
794 | mmput(mm); |
795 | out: |
796 | return res; |
797 | } |
798 | |