1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. |
3 | * |
4 | * Nadia Yvette Chambers, 2002 |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. |
7 | * License: GPL |
8 | */ |
9 | |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> |
14 | #include <linux/falloc.h> |
15 | #include <linux/fs.h> |
16 | #include <linux/mount.h> |
17 | #include <linux/file.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/writeback.h> |
20 | #include <linux/pagemap.h> |
21 | #include <linux/highmem.h> |
22 | #include <linux/init.h> |
23 | #include <linux/string.h> |
24 | #include <linux/capability.h> |
25 | #include <linux/ctype.h> |
26 | #include <linux/backing-dev.h> |
27 | #include <linux/hugetlb.h> |
28 | #include <linux/pagevec.h> |
29 | #include <linux/fs_parser.h> |
30 | #include <linux/mman.h> |
31 | #include <linux/slab.h> |
32 | #include <linux/dnotify.h> |
33 | #include <linux/statfs.h> |
34 | #include <linux/security.h> |
35 | #include <linux/magic.h> |
36 | #include <linux/migrate.h> |
37 | #include <linux/uio.h> |
38 | |
39 | #include <linux/uaccess.h> |
40 | #include <linux/sched/mm.h> |
41 | |
42 | static const struct address_space_operations hugetlbfs_aops; |
43 | const struct file_operations hugetlbfs_file_operations; |
44 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
45 | static const struct inode_operations hugetlbfs_inode_operations; |
46 | |
47 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
48 | |
49 | struct hugetlbfs_fs_context { |
50 | struct hstate *hstate; |
51 | unsigned long long max_size_opt; |
52 | unsigned long long min_size_opt; |
53 | long max_hpages; |
54 | long nr_inodes; |
55 | long min_hpages; |
56 | enum hugetlbfs_size_type max_val_type; |
57 | enum hugetlbfs_size_type min_val_type; |
58 | kuid_t uid; |
59 | kgid_t gid; |
60 | umode_t mode; |
61 | }; |
62 | |
63 | int sysctl_hugetlb_shm_group; |
64 | |
65 | enum hugetlb_param { |
66 | Opt_gid, |
67 | Opt_min_size, |
68 | Opt_mode, |
69 | Opt_nr_inodes, |
70 | Opt_pagesize, |
71 | Opt_size, |
72 | Opt_uid, |
73 | }; |
74 | |
75 | static const struct fs_parameter_spec hugetlb_fs_parameters[] = { |
76 | fsparam_u32 ("gid" , Opt_gid), |
77 | fsparam_string("min_size" , Opt_min_size), |
78 | fsparam_u32oct("mode" , Opt_mode), |
79 | fsparam_string("nr_inodes" , Opt_nr_inodes), |
80 | fsparam_string("pagesize" , Opt_pagesize), |
81 | fsparam_string("size" , Opt_size), |
82 | fsparam_u32 ("uid" , Opt_uid), |
83 | {} |
84 | }; |
85 | |
86 | /* |
87 | * Mask used when checking the page offset value passed in via system |
88 | * calls. This value will be converted to a loff_t which is signed. |
89 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the |
90 | * value. The extra bit (- 1 in the shift value) is to take the sign |
91 | * bit into account. |
92 | */ |
93 | #define PGOFF_LOFFT_MAX \ |
94 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) |
95 | |
96 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
97 | { |
98 | struct inode *inode = file_inode(f: file); |
99 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
100 | loff_t len, vma_len; |
101 | int ret; |
102 | struct hstate *h = hstate_file(f: file); |
103 | |
104 | /* |
105 | * vma address alignment (but not the pgoff alignment) has |
106 | * already been checked by prepare_hugepage_range. If you add |
107 | * any error returns here, do so after setting VM_HUGETLB, so |
108 | * is_vm_hugetlb_page tests below unmap_region go the right |
109 | * way when do_mmap unwinds (may be important on powerpc |
110 | * and ia64). |
111 | */ |
112 | vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); |
113 | vma->vm_ops = &hugetlb_vm_ops; |
114 | |
115 | ret = seal_check_write(seals: info->seals, vma); |
116 | if (ret) |
117 | return ret; |
118 | |
119 | /* |
120 | * page based offset in vm_pgoff could be sufficiently large to |
121 | * overflow a loff_t when converted to byte offset. This can |
122 | * only happen on architectures where sizeof(loff_t) == |
123 | * sizeof(unsigned long). So, only check in those instances. |
124 | */ |
125 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
126 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) |
127 | return -EINVAL; |
128 | } |
129 | |
130 | /* must be huge page aligned */ |
131 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
132 | return -EINVAL; |
133 | |
134 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
135 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
136 | /* check for overflow */ |
137 | if (len < vma_len) |
138 | return -EINVAL; |
139 | |
140 | inode_lock(inode); |
141 | file_accessed(file); |
142 | |
143 | ret = -ENOMEM; |
144 | if (!hugetlb_reserve_pages(inode, |
145 | from: vma->vm_pgoff >> huge_page_order(h), |
146 | to: len >> huge_page_shift(h), vma, |
147 | vm_flags: vma->vm_flags)) |
148 | goto out; |
149 | |
150 | ret = 0; |
151 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
152 | i_size_write(inode, i_size: len); |
153 | out: |
154 | inode_unlock(inode); |
155 | |
156 | return ret; |
157 | } |
158 | |
159 | /* |
160 | * Called under mmap_write_lock(mm). |
161 | */ |
162 | |
163 | static unsigned long |
164 | hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, |
165 | unsigned long len, unsigned long pgoff, unsigned long flags) |
166 | { |
167 | struct hstate *h = hstate_file(f: file); |
168 | struct vm_unmapped_area_info info; |
169 | |
170 | info.flags = 0; |
171 | info.length = len; |
172 | info.low_limit = current->mm->mmap_base; |
173 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
174 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
175 | info.align_offset = 0; |
176 | return vm_unmapped_area(info: &info); |
177 | } |
178 | |
179 | static unsigned long |
180 | hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, |
181 | unsigned long len, unsigned long pgoff, unsigned long flags) |
182 | { |
183 | struct hstate *h = hstate_file(f: file); |
184 | struct vm_unmapped_area_info info; |
185 | |
186 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
187 | info.length = len; |
188 | info.low_limit = PAGE_SIZE; |
189 | info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); |
190 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
191 | info.align_offset = 0; |
192 | addr = vm_unmapped_area(info: &info); |
193 | |
194 | /* |
195 | * A failed mmap() very likely causes application failure, |
196 | * so fall back to the bottom-up function here. This scenario |
197 | * can happen with large stack limits and large mmap() |
198 | * allocations. |
199 | */ |
200 | if (unlikely(offset_in_page(addr))) { |
201 | VM_BUG_ON(addr != -ENOMEM); |
202 | info.flags = 0; |
203 | info.low_limit = current->mm->mmap_base; |
204 | info.high_limit = arch_get_mmap_end(addr, len, flags); |
205 | addr = vm_unmapped_area(info: &info); |
206 | } |
207 | |
208 | return addr; |
209 | } |
210 | |
211 | unsigned long |
212 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
213 | unsigned long len, unsigned long pgoff, |
214 | unsigned long flags) |
215 | { |
216 | struct mm_struct *mm = current->mm; |
217 | struct vm_area_struct *vma; |
218 | struct hstate *h = hstate_file(f: file); |
219 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
220 | |
221 | if (len & ~huge_page_mask(h)) |
222 | return -EINVAL; |
223 | if (len > TASK_SIZE) |
224 | return -ENOMEM; |
225 | |
226 | if (flags & MAP_FIXED) { |
227 | if (prepare_hugepage_range(file, addr, len)) |
228 | return -EINVAL; |
229 | return addr; |
230 | } |
231 | |
232 | if (addr) { |
233 | addr = ALIGN(addr, huge_page_size(h)); |
234 | vma = find_vma(mm, addr); |
235 | if (mmap_end - len >= addr && |
236 | (!vma || addr + len <= vm_start_gap(vma))) |
237 | return addr; |
238 | } |
239 | |
240 | /* |
241 | * Use mm->get_unmapped_area value as a hint to use topdown routine. |
242 | * If architectures have special needs, they should define their own |
243 | * version of hugetlb_get_unmapped_area. |
244 | */ |
245 | if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) |
246 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
247 | pgoff, flags); |
248 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
249 | pgoff, flags); |
250 | } |
251 | |
252 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
253 | static unsigned long |
254 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
255 | unsigned long len, unsigned long pgoff, |
256 | unsigned long flags) |
257 | { |
258 | return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags); |
259 | } |
260 | #endif |
261 | |
262 | /* |
263 | * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset. |
264 | * Returns the maximum number of bytes one can read without touching the 1st raw |
265 | * HWPOISON subpage. |
266 | * |
267 | * The implementation borrows the iteration logic from copy_page_to_iter*. |
268 | */ |
269 | static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes) |
270 | { |
271 | size_t n = 0; |
272 | size_t res = 0; |
273 | |
274 | /* First subpage to start the loop. */ |
275 | page = nth_page(page, offset / PAGE_SIZE); |
276 | offset %= PAGE_SIZE; |
277 | while (1) { |
278 | if (is_raw_hwpoison_page_in_hugepage(page)) |
279 | break; |
280 | |
281 | /* Safe to read n bytes without touching HWPOISON subpage. */ |
282 | n = min(bytes, (size_t)PAGE_SIZE - offset); |
283 | res += n; |
284 | bytes -= n; |
285 | if (!bytes || !n) |
286 | break; |
287 | offset += n; |
288 | if (offset == PAGE_SIZE) { |
289 | page = nth_page(page, 1); |
290 | offset = 0; |
291 | } |
292 | } |
293 | |
294 | return res; |
295 | } |
296 | |
297 | /* |
298 | * Support for read() - Find the page attached to f_mapping and copy out the |
299 | * data. This provides functionality similar to filemap_read(). |
300 | */ |
301 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
302 | { |
303 | struct file *file = iocb->ki_filp; |
304 | struct hstate *h = hstate_file(f: file); |
305 | struct address_space *mapping = file->f_mapping; |
306 | struct inode *inode = mapping->host; |
307 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
308 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); |
309 | unsigned long end_index; |
310 | loff_t isize; |
311 | ssize_t retval = 0; |
312 | |
313 | while (iov_iter_count(i: to)) { |
314 | struct folio *folio; |
315 | size_t nr, copied, want; |
316 | |
317 | /* nr is the maximum number of bytes to copy from this page */ |
318 | nr = huge_page_size(h); |
319 | isize = i_size_read(inode); |
320 | if (!isize) |
321 | break; |
322 | end_index = (isize - 1) >> huge_page_shift(h); |
323 | if (index > end_index) |
324 | break; |
325 | if (index == end_index) { |
326 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
327 | if (nr <= offset) |
328 | break; |
329 | } |
330 | nr = nr - offset; |
331 | |
332 | /* Find the folio */ |
333 | folio = filemap_lock_hugetlb_folio(h, mapping, idx: index); |
334 | if (IS_ERR(ptr: folio)) { |
335 | /* |
336 | * We have a HOLE, zero out the user-buffer for the |
337 | * length of the hole or request. |
338 | */ |
339 | copied = iov_iter_zero(bytes: nr, to); |
340 | } else { |
341 | folio_unlock(folio); |
342 | |
343 | if (!folio_test_has_hwpoisoned(folio)) |
344 | want = nr; |
345 | else { |
346 | /* |
347 | * Adjust how many bytes safe to read without |
348 | * touching the 1st raw HWPOISON subpage after |
349 | * offset. |
350 | */ |
351 | want = adjust_range_hwpoison(page: &folio->page, offset, bytes: nr); |
352 | if (want == 0) { |
353 | folio_put(folio); |
354 | retval = -EIO; |
355 | break; |
356 | } |
357 | } |
358 | |
359 | /* |
360 | * We have the folio, copy it to user space buffer. |
361 | */ |
362 | copied = copy_folio_to_iter(folio, offset, bytes: want, i: to); |
363 | folio_put(folio); |
364 | } |
365 | offset += copied; |
366 | retval += copied; |
367 | if (copied != nr && iov_iter_count(i: to)) { |
368 | if (!retval) |
369 | retval = -EFAULT; |
370 | break; |
371 | } |
372 | index += offset >> huge_page_shift(h); |
373 | offset &= ~huge_page_mask(h); |
374 | } |
375 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
376 | return retval; |
377 | } |
378 | |
379 | static int hugetlbfs_write_begin(struct file *file, |
380 | struct address_space *mapping, |
381 | loff_t pos, unsigned len, |
382 | struct page **pagep, void **fsdata) |
383 | { |
384 | return -EINVAL; |
385 | } |
386 | |
387 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
388 | loff_t pos, unsigned len, unsigned copied, |
389 | struct page *page, void *fsdata) |
390 | { |
391 | BUG(); |
392 | return -EINVAL; |
393 | } |
394 | |
395 | static void hugetlb_delete_from_page_cache(struct folio *folio) |
396 | { |
397 | folio_clear_dirty(folio); |
398 | folio_clear_uptodate(folio); |
399 | filemap_remove_folio(folio); |
400 | } |
401 | |
402 | /* |
403 | * Called with i_mmap_rwsem held for inode based vma maps. This makes |
404 | * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault |
405 | * mutex for the page in the mapping. So, we can not race with page being |
406 | * faulted into the vma. |
407 | */ |
408 | static bool hugetlb_vma_maps_page(struct vm_area_struct *vma, |
409 | unsigned long addr, struct page *page) |
410 | { |
411 | pte_t *ptep, pte; |
412 | |
413 | ptep = hugetlb_walk(vma, addr, sz: huge_page_size(h: hstate_vma(vma))); |
414 | if (!ptep) |
415 | return false; |
416 | |
417 | pte = huge_ptep_get(ptep); |
418 | if (huge_pte_none(pte) || !pte_present(a: pte)) |
419 | return false; |
420 | |
421 | if (pte_page(pte) == page) |
422 | return true; |
423 | |
424 | return false; |
425 | } |
426 | |
427 | /* |
428 | * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? |
429 | * No, because the interval tree returns us only those vmas |
430 | * which overlap the truncated area starting at pgoff, |
431 | * and no vma on a 32-bit arch can span beyond the 4GB. |
432 | */ |
433 | static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) |
434 | { |
435 | unsigned long offset = 0; |
436 | |
437 | if (vma->vm_pgoff < start) |
438 | offset = (start - vma->vm_pgoff) << PAGE_SHIFT; |
439 | |
440 | return vma->vm_start + offset; |
441 | } |
442 | |
443 | static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) |
444 | { |
445 | unsigned long t_end; |
446 | |
447 | if (!end) |
448 | return vma->vm_end; |
449 | |
450 | t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; |
451 | if (t_end > vma->vm_end) |
452 | t_end = vma->vm_end; |
453 | return t_end; |
454 | } |
455 | |
456 | /* |
457 | * Called with hugetlb fault mutex held. Therefore, no more mappings to |
458 | * this folio can be created while executing the routine. |
459 | */ |
460 | static void hugetlb_unmap_file_folio(struct hstate *h, |
461 | struct address_space *mapping, |
462 | struct folio *folio, pgoff_t index) |
463 | { |
464 | struct rb_root_cached *root = &mapping->i_mmap; |
465 | struct hugetlb_vma_lock *vma_lock; |
466 | struct page *page = &folio->page; |
467 | struct vm_area_struct *vma; |
468 | unsigned long v_start; |
469 | unsigned long v_end; |
470 | pgoff_t start, end; |
471 | |
472 | start = index * pages_per_huge_page(h); |
473 | end = (index + 1) * pages_per_huge_page(h); |
474 | |
475 | i_mmap_lock_write(mapping); |
476 | retry: |
477 | vma_lock = NULL; |
478 | vma_interval_tree_foreach(vma, root, start, end - 1) { |
479 | v_start = vma_offset_start(vma, start); |
480 | v_end = vma_offset_end(vma, end); |
481 | |
482 | if (!hugetlb_vma_maps_page(vma, addr: v_start, page)) |
483 | continue; |
484 | |
485 | if (!hugetlb_vma_trylock_write(vma)) { |
486 | vma_lock = vma->vm_private_data; |
487 | /* |
488 | * If we can not get vma lock, we need to drop |
489 | * immap_sema and take locks in order. First, |
490 | * take a ref on the vma_lock structure so that |
491 | * we can be guaranteed it will not go away when |
492 | * dropping immap_sema. |
493 | */ |
494 | kref_get(kref: &vma_lock->refs); |
495 | break; |
496 | } |
497 | |
498 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
499 | ZAP_FLAG_DROP_MARKER); |
500 | hugetlb_vma_unlock_write(vma); |
501 | } |
502 | |
503 | i_mmap_unlock_write(mapping); |
504 | |
505 | if (vma_lock) { |
506 | /* |
507 | * Wait on vma_lock. We know it is still valid as we have |
508 | * a reference. We must 'open code' vma locking as we do |
509 | * not know if vma_lock is still attached to vma. |
510 | */ |
511 | down_write(sem: &vma_lock->rw_sema); |
512 | i_mmap_lock_write(mapping); |
513 | |
514 | vma = vma_lock->vma; |
515 | if (!vma) { |
516 | /* |
517 | * If lock is no longer attached to vma, then just |
518 | * unlock, drop our reference and retry looking for |
519 | * other vmas. |
520 | */ |
521 | up_write(sem: &vma_lock->rw_sema); |
522 | kref_put(kref: &vma_lock->refs, release: hugetlb_vma_lock_release); |
523 | goto retry; |
524 | } |
525 | |
526 | /* |
527 | * vma_lock is still attached to vma. Check to see if vma |
528 | * still maps page and if so, unmap. |
529 | */ |
530 | v_start = vma_offset_start(vma, start); |
531 | v_end = vma_offset_end(vma, end); |
532 | if (hugetlb_vma_maps_page(vma, addr: v_start, page)) |
533 | unmap_hugepage_range(vma, v_start, v_end, NULL, |
534 | ZAP_FLAG_DROP_MARKER); |
535 | |
536 | kref_put(kref: &vma_lock->refs, release: hugetlb_vma_lock_release); |
537 | hugetlb_vma_unlock_write(vma); |
538 | |
539 | goto retry; |
540 | } |
541 | } |
542 | |
543 | static void |
544 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, |
545 | zap_flags_t zap_flags) |
546 | { |
547 | struct vm_area_struct *vma; |
548 | |
549 | /* |
550 | * end == 0 indicates that the entire range after start should be |
551 | * unmapped. Note, end is exclusive, whereas the interval tree takes |
552 | * an inclusive "last". |
553 | */ |
554 | vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { |
555 | unsigned long v_start; |
556 | unsigned long v_end; |
557 | |
558 | if (!hugetlb_vma_trylock_write(vma)) |
559 | continue; |
560 | |
561 | v_start = vma_offset_start(vma, start); |
562 | v_end = vma_offset_end(vma, end); |
563 | |
564 | unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); |
565 | |
566 | /* |
567 | * Note that vma lock only exists for shared/non-private |
568 | * vmas. Therefore, lock is not held when calling |
569 | * unmap_hugepage_range for private vmas. |
570 | */ |
571 | hugetlb_vma_unlock_write(vma); |
572 | } |
573 | } |
574 | |
575 | /* |
576 | * Called with hugetlb fault mutex held. |
577 | * Returns true if page was actually removed, false otherwise. |
578 | */ |
579 | static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, |
580 | struct address_space *mapping, |
581 | struct folio *folio, pgoff_t index, |
582 | bool truncate_op) |
583 | { |
584 | bool ret = false; |
585 | |
586 | /* |
587 | * If folio is mapped, it was faulted in after being |
588 | * unmapped in caller. Unmap (again) while holding |
589 | * the fault mutex. The mutex will prevent faults |
590 | * until we finish removing the folio. |
591 | */ |
592 | if (unlikely(folio_mapped(folio))) |
593 | hugetlb_unmap_file_folio(h, mapping, folio, index); |
594 | |
595 | folio_lock(folio); |
596 | /* |
597 | * We must remove the folio from page cache before removing |
598 | * the region/ reserve map (hugetlb_unreserve_pages). In |
599 | * rare out of memory conditions, removal of the region/reserve |
600 | * map could fail. Correspondingly, the subpool and global |
601 | * reserve usage count can need to be adjusted. |
602 | */ |
603 | VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); |
604 | hugetlb_delete_from_page_cache(folio); |
605 | ret = true; |
606 | if (!truncate_op) { |
607 | if (unlikely(hugetlb_unreserve_pages(inode, index, |
608 | index + 1, 1))) |
609 | hugetlb_fix_reserve_counts(inode); |
610 | } |
611 | |
612 | folio_unlock(folio); |
613 | return ret; |
614 | } |
615 | |
616 | /* |
617 | * remove_inode_hugepages handles two distinct cases: truncation and hole |
618 | * punch. There are subtle differences in operation for each case. |
619 | * |
620 | * truncation is indicated by end of range being LLONG_MAX |
621 | * In this case, we first scan the range and release found pages. |
622 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve |
623 | * maps and global counts. Page faults can race with truncation. |
624 | * During faults, hugetlb_no_page() checks i_size before page allocation, |
625 | * and again after obtaining page table lock. It will 'back out' |
626 | * allocations in the truncated range. |
627 | * hole punch is indicated if end is not LLONG_MAX |
628 | * In the hole punch case we scan the range and release found pages. |
629 | * Only when releasing a page is the associated region/reserve map |
630 | * deleted. The region/reserve map for ranges without associated |
631 | * pages are not modified. Page faults can race with hole punch. |
632 | * This is indicated if we find a mapped page. |
633 | * Note: If the passed end of range value is beyond the end of file, but |
634 | * not LLONG_MAX this routine still performs a hole punch operation. |
635 | */ |
636 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, |
637 | loff_t lend) |
638 | { |
639 | struct hstate *h = hstate_inode(i: inode); |
640 | struct address_space *mapping = &inode->i_data; |
641 | const pgoff_t end = lend >> PAGE_SHIFT; |
642 | struct folio_batch fbatch; |
643 | pgoff_t next, index; |
644 | int i, freed = 0; |
645 | bool truncate_op = (lend == LLONG_MAX); |
646 | |
647 | folio_batch_init(fbatch: &fbatch); |
648 | next = lstart >> PAGE_SHIFT; |
649 | while (filemap_get_folios(mapping, start: &next, end: end - 1, fbatch: &fbatch)) { |
650 | for (i = 0; i < folio_batch_count(fbatch: &fbatch); ++i) { |
651 | struct folio *folio = fbatch.folios[i]; |
652 | u32 hash = 0; |
653 | |
654 | index = folio->index >> huge_page_order(h); |
655 | hash = hugetlb_fault_mutex_hash(mapping, idx: index); |
656 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
657 | |
658 | /* |
659 | * Remove folio that was part of folio_batch. |
660 | */ |
661 | if (remove_inode_single_folio(h, inode, mapping, folio, |
662 | index, truncate_op)) |
663 | freed++; |
664 | |
665 | mutex_unlock(lock: &hugetlb_fault_mutex_table[hash]); |
666 | } |
667 | folio_batch_release(fbatch: &fbatch); |
668 | cond_resched(); |
669 | } |
670 | |
671 | if (truncate_op) |
672 | (void)hugetlb_unreserve_pages(inode, |
673 | start: lstart >> huge_page_shift(h), |
674 | LONG_MAX, freed); |
675 | } |
676 | |
677 | static void hugetlbfs_evict_inode(struct inode *inode) |
678 | { |
679 | struct resv_map *resv_map; |
680 | |
681 | remove_inode_hugepages(inode, lstart: 0, LLONG_MAX); |
682 | |
683 | /* |
684 | * Get the resv_map from the address space embedded in the inode. |
685 | * This is the address space which points to any resv_map allocated |
686 | * at inode creation time. If this is a device special inode, |
687 | * i_mapping may not point to the original address space. |
688 | */ |
689 | resv_map = (struct resv_map *)(&inode->i_data)->private_data; |
690 | /* Only regular and link inodes have associated reserve maps */ |
691 | if (resv_map) |
692 | resv_map_release(ref: &resv_map->refs); |
693 | clear_inode(inode); |
694 | } |
695 | |
696 | static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
697 | { |
698 | pgoff_t pgoff; |
699 | struct address_space *mapping = inode->i_mapping; |
700 | struct hstate *h = hstate_inode(i: inode); |
701 | |
702 | BUG_ON(offset & ~huge_page_mask(h)); |
703 | pgoff = offset >> PAGE_SHIFT; |
704 | |
705 | i_size_write(inode, i_size: offset); |
706 | i_mmap_lock_write(mapping); |
707 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
708 | hugetlb_vmdelete_list(root: &mapping->i_mmap, start: pgoff, end: 0, |
709 | ZAP_FLAG_DROP_MARKER); |
710 | i_mmap_unlock_write(mapping); |
711 | remove_inode_hugepages(inode, lstart: offset, LLONG_MAX); |
712 | } |
713 | |
714 | static void hugetlbfs_zero_partial_page(struct hstate *h, |
715 | struct address_space *mapping, |
716 | loff_t start, |
717 | loff_t end) |
718 | { |
719 | pgoff_t idx = start >> huge_page_shift(h); |
720 | struct folio *folio; |
721 | |
722 | folio = filemap_lock_hugetlb_folio(h, mapping, idx); |
723 | if (IS_ERR(ptr: folio)) |
724 | return; |
725 | |
726 | start = start & ~huge_page_mask(h); |
727 | end = end & ~huge_page_mask(h); |
728 | if (!end) |
729 | end = huge_page_size(h); |
730 | |
731 | folio_zero_segment(folio, start: (size_t)start, xend: (size_t)end); |
732 | |
733 | folio_unlock(folio); |
734 | folio_put(folio); |
735 | } |
736 | |
737 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
738 | { |
739 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
740 | struct address_space *mapping = inode->i_mapping; |
741 | struct hstate *h = hstate_inode(i: inode); |
742 | loff_t hpage_size = huge_page_size(h); |
743 | loff_t hole_start, hole_end; |
744 | |
745 | /* |
746 | * hole_start and hole_end indicate the full pages within the hole. |
747 | */ |
748 | hole_start = round_up(offset, hpage_size); |
749 | hole_end = round_down(offset + len, hpage_size); |
750 | |
751 | inode_lock(inode); |
752 | |
753 | /* protected by i_rwsem */ |
754 | if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { |
755 | inode_unlock(inode); |
756 | return -EPERM; |
757 | } |
758 | |
759 | i_mmap_lock_write(mapping); |
760 | |
761 | /* If range starts before first full page, zero partial page. */ |
762 | if (offset < hole_start) |
763 | hugetlbfs_zero_partial_page(h, mapping, |
764 | start: offset, min(offset + len, hole_start)); |
765 | |
766 | /* Unmap users of full pages in the hole. */ |
767 | if (hole_end > hole_start) { |
768 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
769 | hugetlb_vmdelete_list(root: &mapping->i_mmap, |
770 | start: hole_start >> PAGE_SHIFT, |
771 | end: hole_end >> PAGE_SHIFT, zap_flags: 0); |
772 | } |
773 | |
774 | /* If range extends beyond last full page, zero partial page. */ |
775 | if ((offset + len) > hole_end && (offset + len) > hole_start) |
776 | hugetlbfs_zero_partial_page(h, mapping, |
777 | start: hole_end, end: offset + len); |
778 | |
779 | i_mmap_unlock_write(mapping); |
780 | |
781 | /* Remove full pages from the file. */ |
782 | if (hole_end > hole_start) |
783 | remove_inode_hugepages(inode, lstart: hole_start, lend: hole_end); |
784 | |
785 | inode_unlock(inode); |
786 | |
787 | return 0; |
788 | } |
789 | |
790 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, |
791 | loff_t len) |
792 | { |
793 | struct inode *inode = file_inode(f: file); |
794 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
795 | struct address_space *mapping = inode->i_mapping; |
796 | struct hstate *h = hstate_inode(i: inode); |
797 | struct vm_area_struct pseudo_vma; |
798 | struct mm_struct *mm = current->mm; |
799 | loff_t hpage_size = huge_page_size(h); |
800 | unsigned long hpage_shift = huge_page_shift(h); |
801 | pgoff_t start, index, end; |
802 | int error; |
803 | u32 hash; |
804 | |
805 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
806 | return -EOPNOTSUPP; |
807 | |
808 | if (mode & FALLOC_FL_PUNCH_HOLE) |
809 | return hugetlbfs_punch_hole(inode, offset, len); |
810 | |
811 | /* |
812 | * Default preallocate case. |
813 | * For this range, start is rounded down and end is rounded up |
814 | * as well as being converted to page offsets. |
815 | */ |
816 | start = offset >> hpage_shift; |
817 | end = (offset + len + hpage_size - 1) >> hpage_shift; |
818 | |
819 | inode_lock(inode); |
820 | |
821 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
822 | error = inode_newsize_ok(inode, offset: offset + len); |
823 | if (error) |
824 | goto out; |
825 | |
826 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
827 | error = -EPERM; |
828 | goto out; |
829 | } |
830 | |
831 | /* |
832 | * Initialize a pseudo vma as this is required by the huge page |
833 | * allocation routines. |
834 | */ |
835 | vma_init(vma: &pseudo_vma, mm); |
836 | vm_flags_init(vma: &pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); |
837 | pseudo_vma.vm_file = file; |
838 | |
839 | for (index = start; index < end; index++) { |
840 | /* |
841 | * This is supposed to be the vaddr where the page is being |
842 | * faulted in, but we have no vaddr here. |
843 | */ |
844 | struct folio *folio; |
845 | unsigned long addr; |
846 | |
847 | cond_resched(); |
848 | |
849 | /* |
850 | * fallocate(2) manpage permits EINTR; we may have been |
851 | * interrupted because we are using up too much memory. |
852 | */ |
853 | if (signal_pending(current)) { |
854 | error = -EINTR; |
855 | break; |
856 | } |
857 | |
858 | /* addr is the offset within the file (zero based) */ |
859 | addr = index * hpage_size; |
860 | |
861 | /* mutex taken here, fault path and hole punch */ |
862 | hash = hugetlb_fault_mutex_hash(mapping, idx: index); |
863 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
864 | |
865 | /* See if already present in mapping to avoid alloc/free */ |
866 | folio = filemap_get_folio(mapping, index: index << huge_page_order(h)); |
867 | if (!IS_ERR(ptr: folio)) { |
868 | folio_put(folio); |
869 | mutex_unlock(lock: &hugetlb_fault_mutex_table[hash]); |
870 | continue; |
871 | } |
872 | |
873 | /* |
874 | * Allocate folio without setting the avoid_reserve argument. |
875 | * There certainly are no reserves associated with the |
876 | * pseudo_vma. However, there could be shared mappings with |
877 | * reserves for the file at the inode level. If we fallocate |
878 | * folios in these areas, we need to consume the reserves |
879 | * to keep reservation accounting consistent. |
880 | */ |
881 | folio = alloc_hugetlb_folio(vma: &pseudo_vma, addr, avoid_reserve: 0); |
882 | if (IS_ERR(ptr: folio)) { |
883 | mutex_unlock(lock: &hugetlb_fault_mutex_table[hash]); |
884 | error = PTR_ERR(ptr: folio); |
885 | goto out; |
886 | } |
887 | clear_huge_page(page: &folio->page, addr_hint: addr, pages_per_huge_page: pages_per_huge_page(h)); |
888 | __folio_mark_uptodate(folio); |
889 | error = hugetlb_add_to_page_cache(folio, mapping, idx: index); |
890 | if (unlikely(error)) { |
891 | restore_reserve_on_error(h, vma: &pseudo_vma, address: addr, folio); |
892 | folio_put(folio); |
893 | mutex_unlock(lock: &hugetlb_fault_mutex_table[hash]); |
894 | goto out; |
895 | } |
896 | |
897 | mutex_unlock(lock: &hugetlb_fault_mutex_table[hash]); |
898 | |
899 | folio_set_hugetlb_migratable(folio); |
900 | /* |
901 | * folio_unlock because locked by hugetlb_add_to_page_cache() |
902 | * folio_put() due to reference from alloc_hugetlb_folio() |
903 | */ |
904 | folio_unlock(folio); |
905 | folio_put(folio); |
906 | } |
907 | |
908 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
909 | i_size_write(inode, i_size: offset + len); |
910 | inode_set_ctime_current(inode); |
911 | out: |
912 | inode_unlock(inode); |
913 | return error; |
914 | } |
915 | |
916 | static int hugetlbfs_setattr(struct mnt_idmap *idmap, |
917 | struct dentry *dentry, struct iattr *attr) |
918 | { |
919 | struct inode *inode = d_inode(dentry); |
920 | struct hstate *h = hstate_inode(i: inode); |
921 | int error; |
922 | unsigned int ia_valid = attr->ia_valid; |
923 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
924 | |
925 | error = setattr_prepare(&nop_mnt_idmap, dentry, attr); |
926 | if (error) |
927 | return error; |
928 | |
929 | if (ia_valid & ATTR_SIZE) { |
930 | loff_t oldsize = inode->i_size; |
931 | loff_t newsize = attr->ia_size; |
932 | |
933 | if (newsize & ~huge_page_mask(h)) |
934 | return -EINVAL; |
935 | /* protected by i_rwsem */ |
936 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
937 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
938 | return -EPERM; |
939 | hugetlb_vmtruncate(inode, offset: newsize); |
940 | } |
941 | |
942 | setattr_copy(&nop_mnt_idmap, inode, attr); |
943 | mark_inode_dirty(inode); |
944 | return 0; |
945 | } |
946 | |
947 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
948 | struct hugetlbfs_fs_context *ctx) |
949 | { |
950 | struct inode *inode; |
951 | |
952 | inode = new_inode(sb); |
953 | if (inode) { |
954 | inode->i_ino = get_next_ino(); |
955 | inode->i_mode = S_IFDIR | ctx->mode; |
956 | inode->i_uid = ctx->uid; |
957 | inode->i_gid = ctx->gid; |
958 | simple_inode_init_ts(inode); |
959 | inode->i_op = &hugetlbfs_dir_inode_operations; |
960 | inode->i_fop = &simple_dir_operations; |
961 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
962 | inc_nlink(inode); |
963 | lockdep_annotate_inode_mutex_key(inode); |
964 | } |
965 | return inode; |
966 | } |
967 | |
968 | /* |
969 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
970 | * be taken from reclaim -- unlike regular filesystems. This needs an |
971 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
972 | * i_mmap_rwsem. |
973 | */ |
974 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
975 | |
976 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
977 | struct inode *dir, |
978 | umode_t mode, dev_t dev) |
979 | { |
980 | struct inode *inode; |
981 | struct resv_map *resv_map = NULL; |
982 | |
983 | /* |
984 | * Reserve maps are only needed for inodes that can have associated |
985 | * page allocations. |
986 | */ |
987 | if (S_ISREG(mode) || S_ISLNK(mode)) { |
988 | resv_map = resv_map_alloc(); |
989 | if (!resv_map) |
990 | return NULL; |
991 | } |
992 | |
993 | inode = new_inode(sb); |
994 | if (inode) { |
995 | struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); |
996 | |
997 | inode->i_ino = get_next_ino(); |
998 | inode_init_owner(idmap: &nop_mnt_idmap, inode, dir, mode); |
999 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
1000 | &hugetlbfs_i_mmap_rwsem_key); |
1001 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
1002 | simple_inode_init_ts(inode); |
1003 | inode->i_mapping->private_data = resv_map; |
1004 | info->seals = F_SEAL_SEAL; |
1005 | switch (mode & S_IFMT) { |
1006 | default: |
1007 | init_special_inode(inode, mode, dev); |
1008 | break; |
1009 | case S_IFREG: |
1010 | inode->i_op = &hugetlbfs_inode_operations; |
1011 | inode->i_fop = &hugetlbfs_file_operations; |
1012 | break; |
1013 | case S_IFDIR: |
1014 | inode->i_op = &hugetlbfs_dir_inode_operations; |
1015 | inode->i_fop = &simple_dir_operations; |
1016 | |
1017 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ |
1018 | inc_nlink(inode); |
1019 | break; |
1020 | case S_IFLNK: |
1021 | inode->i_op = &page_symlink_inode_operations; |
1022 | inode_nohighmem(inode); |
1023 | break; |
1024 | } |
1025 | lockdep_annotate_inode_mutex_key(inode); |
1026 | } else { |
1027 | if (resv_map) |
1028 | kref_put(kref: &resv_map->refs, release: resv_map_release); |
1029 | } |
1030 | |
1031 | return inode; |
1032 | } |
1033 | |
1034 | /* |
1035 | * File creation. Allocate an inode, and we're done.. |
1036 | */ |
1037 | static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, |
1038 | struct dentry *dentry, umode_t mode, dev_t dev) |
1039 | { |
1040 | struct inode *inode; |
1041 | |
1042 | inode = hugetlbfs_get_inode(sb: dir->i_sb, dir, mode, dev); |
1043 | if (!inode) |
1044 | return -ENOSPC; |
1045 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
1046 | d_instantiate(dentry, inode); |
1047 | dget(dentry);/* Extra count - pin the dentry in core */ |
1048 | return 0; |
1049 | } |
1050 | |
1051 | static int hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, |
1052 | struct dentry *dentry, umode_t mode) |
1053 | { |
1054 | int retval = hugetlbfs_mknod(idmap: &nop_mnt_idmap, dir, dentry, |
1055 | mode: mode | S_IFDIR, dev: 0); |
1056 | if (!retval) |
1057 | inc_nlink(inode: dir); |
1058 | return retval; |
1059 | } |
1060 | |
1061 | static int hugetlbfs_create(struct mnt_idmap *idmap, |
1062 | struct inode *dir, struct dentry *dentry, |
1063 | umode_t mode, bool excl) |
1064 | { |
1065 | return hugetlbfs_mknod(idmap: &nop_mnt_idmap, dir, dentry, mode: mode | S_IFREG, dev: 0); |
1066 | } |
1067 | |
1068 | static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, |
1069 | struct inode *dir, struct file *file, |
1070 | umode_t mode) |
1071 | { |
1072 | struct inode *inode; |
1073 | |
1074 | inode = hugetlbfs_get_inode(sb: dir->i_sb, dir, mode: mode | S_IFREG, dev: 0); |
1075 | if (!inode) |
1076 | return -ENOSPC; |
1077 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
1078 | d_tmpfile(file, inode); |
1079 | return finish_open_simple(file, error: 0); |
1080 | } |
1081 | |
1082 | static int hugetlbfs_symlink(struct mnt_idmap *idmap, |
1083 | struct inode *dir, struct dentry *dentry, |
1084 | const char *symname) |
1085 | { |
1086 | struct inode *inode; |
1087 | int error = -ENOSPC; |
1088 | |
1089 | inode = hugetlbfs_get_inode(sb: dir->i_sb, dir, S_IFLNK|S_IRWXUGO, dev: 0); |
1090 | if (inode) { |
1091 | int l = strlen(symname)+1; |
1092 | error = page_symlink(inode, symname, len: l); |
1093 | if (!error) { |
1094 | d_instantiate(dentry, inode); |
1095 | dget(dentry); |
1096 | } else |
1097 | iput(inode); |
1098 | } |
1099 | inode_set_mtime_to_ts(inode: dir, ts: inode_set_ctime_current(inode: dir)); |
1100 | |
1101 | return error; |
1102 | } |
1103 | |
1104 | #ifdef CONFIG_MIGRATION |
1105 | static int hugetlbfs_migrate_folio(struct address_space *mapping, |
1106 | struct folio *dst, struct folio *src, |
1107 | enum migrate_mode mode) |
1108 | { |
1109 | int rc; |
1110 | |
1111 | rc = migrate_huge_page_move_mapping(mapping, dst, src); |
1112 | if (rc != MIGRATEPAGE_SUCCESS) |
1113 | return rc; |
1114 | |
1115 | if (hugetlb_folio_subpool(folio: src)) { |
1116 | hugetlb_set_folio_subpool(folio: dst, |
1117 | subpool: hugetlb_folio_subpool(folio: src)); |
1118 | hugetlb_set_folio_subpool(folio: src, NULL); |
1119 | } |
1120 | |
1121 | if (mode != MIGRATE_SYNC_NO_COPY) |
1122 | folio_migrate_copy(newfolio: dst, folio: src); |
1123 | else |
1124 | folio_migrate_flags(newfolio: dst, folio: src); |
1125 | |
1126 | return MIGRATEPAGE_SUCCESS; |
1127 | } |
1128 | #else |
1129 | #define hugetlbfs_migrate_folio NULL |
1130 | #endif |
1131 | |
1132 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
1133 | struct page *page) |
1134 | { |
1135 | return 0; |
1136 | } |
1137 | |
1138 | /* |
1139 | * Display the mount options in /proc/mounts. |
1140 | */ |
1141 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) |
1142 | { |
1143 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb: root->d_sb); |
1144 | struct hugepage_subpool *spool = sbinfo->spool; |
1145 | unsigned long hpage_size = huge_page_size(h: sbinfo->hstate); |
1146 | unsigned hpage_shift = huge_page_shift(h: sbinfo->hstate); |
1147 | char mod; |
1148 | |
1149 | if (!uid_eq(left: sbinfo->uid, GLOBAL_ROOT_UID)) |
1150 | seq_printf(m, ",uid=%u" , |
1151 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
1152 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
1153 | seq_printf(m, ",gid=%u" , |
1154 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
1155 | if (sbinfo->mode != 0755) |
1156 | seq_printf(m, ",mode=%o" , sbinfo->mode); |
1157 | if (sbinfo->max_inodes != -1) |
1158 | seq_printf(m, ",nr_inodes=%lu" , sbinfo->max_inodes); |
1159 | |
1160 | hpage_size /= 1024; |
1161 | mod = 'K'; |
1162 | if (hpage_size >= 1024) { |
1163 | hpage_size /= 1024; |
1164 | mod = 'M'; |
1165 | } |
1166 | seq_printf(m, ",pagesize=%lu%c" , hpage_size, mod); |
1167 | if (spool) { |
1168 | if (spool->max_hpages != -1) |
1169 | seq_printf(m, ",size=%llu" , |
1170 | (unsigned long long)spool->max_hpages << hpage_shift); |
1171 | if (spool->min_hpages != -1) |
1172 | seq_printf(m, ",min_size=%llu" , |
1173 | (unsigned long long)spool->min_hpages << hpage_shift); |
1174 | } |
1175 | return 0; |
1176 | } |
1177 | |
1178 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1179 | { |
1180 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb: dentry->d_sb); |
1181 | struct hstate *h = hstate_inode(i: d_inode(dentry)); |
1182 | u64 id = huge_encode_dev(dev: dentry->d_sb->s_dev); |
1183 | |
1184 | buf->f_fsid = u64_to_fsid(v: id); |
1185 | buf->f_type = HUGETLBFS_MAGIC; |
1186 | buf->f_bsize = huge_page_size(h); |
1187 | if (sbinfo) { |
1188 | spin_lock(lock: &sbinfo->stat_lock); |
1189 | /* If no limits set, just report 0 or -1 for max/free/used |
1190 | * blocks, like simple_statfs() */ |
1191 | if (sbinfo->spool) { |
1192 | long free_pages; |
1193 | |
1194 | spin_lock_irq(lock: &sbinfo->spool->lock); |
1195 | buf->f_blocks = sbinfo->spool->max_hpages; |
1196 | free_pages = sbinfo->spool->max_hpages |
1197 | - sbinfo->spool->used_hpages; |
1198 | buf->f_bavail = buf->f_bfree = free_pages; |
1199 | spin_unlock_irq(lock: &sbinfo->spool->lock); |
1200 | buf->f_files = sbinfo->max_inodes; |
1201 | buf->f_ffree = sbinfo->free_inodes; |
1202 | } |
1203 | spin_unlock(lock: &sbinfo->stat_lock); |
1204 | } |
1205 | buf->f_namelen = NAME_MAX; |
1206 | return 0; |
1207 | } |
1208 | |
1209 | static void hugetlbfs_put_super(struct super_block *sb) |
1210 | { |
1211 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); |
1212 | |
1213 | if (sbi) { |
1214 | sb->s_fs_info = NULL; |
1215 | |
1216 | if (sbi->spool) |
1217 | hugepage_put_subpool(spool: sbi->spool); |
1218 | |
1219 | kfree(objp: sbi); |
1220 | } |
1221 | } |
1222 | |
1223 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1224 | { |
1225 | if (sbinfo->free_inodes >= 0) { |
1226 | spin_lock(lock: &sbinfo->stat_lock); |
1227 | if (unlikely(!sbinfo->free_inodes)) { |
1228 | spin_unlock(lock: &sbinfo->stat_lock); |
1229 | return 0; |
1230 | } |
1231 | sbinfo->free_inodes--; |
1232 | spin_unlock(lock: &sbinfo->stat_lock); |
1233 | } |
1234 | |
1235 | return 1; |
1236 | } |
1237 | |
1238 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
1239 | { |
1240 | if (sbinfo->free_inodes >= 0) { |
1241 | spin_lock(lock: &sbinfo->stat_lock); |
1242 | sbinfo->free_inodes++; |
1243 | spin_unlock(lock: &sbinfo->stat_lock); |
1244 | } |
1245 | } |
1246 | |
1247 | |
1248 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1249 | |
1250 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) |
1251 | { |
1252 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1253 | struct hugetlbfs_inode_info *p; |
1254 | |
1255 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
1256 | return NULL; |
1257 | p = alloc_inode_sb(sb, cache: hugetlbfs_inode_cachep, GFP_KERNEL); |
1258 | if (unlikely(!p)) { |
1259 | hugetlbfs_inc_free_inodes(sbinfo); |
1260 | return NULL; |
1261 | } |
1262 | return &p->vfs_inode; |
1263 | } |
1264 | |
1265 | static void hugetlbfs_free_inode(struct inode *inode) |
1266 | { |
1267 | kmem_cache_free(s: hugetlbfs_inode_cachep, objp: HUGETLBFS_I(inode)); |
1268 | } |
1269 | |
1270 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1271 | { |
1272 | hugetlbfs_inc_free_inodes(sbinfo: HUGETLBFS_SB(sb: inode->i_sb)); |
1273 | } |
1274 | |
1275 | static const struct address_space_operations hugetlbfs_aops = { |
1276 | .write_begin = hugetlbfs_write_begin, |
1277 | .write_end = hugetlbfs_write_end, |
1278 | .dirty_folio = noop_dirty_folio, |
1279 | .migrate_folio = hugetlbfs_migrate_folio, |
1280 | .error_remove_page = hugetlbfs_error_remove_page, |
1281 | }; |
1282 | |
1283 | |
1284 | static void init_once(void *foo) |
1285 | { |
1286 | struct hugetlbfs_inode_info *ei = foo; |
1287 | |
1288 | inode_init_once(&ei->vfs_inode); |
1289 | } |
1290 | |
1291 | const struct file_operations hugetlbfs_file_operations = { |
1292 | .read_iter = hugetlbfs_read_iter, |
1293 | .mmap = hugetlbfs_file_mmap, |
1294 | .fsync = noop_fsync, |
1295 | .get_unmapped_area = hugetlb_get_unmapped_area, |
1296 | .llseek = default_llseek, |
1297 | .fallocate = hugetlbfs_fallocate, |
1298 | }; |
1299 | |
1300 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1301 | .create = hugetlbfs_create, |
1302 | .lookup = simple_lookup, |
1303 | .link = simple_link, |
1304 | .unlink = simple_unlink, |
1305 | .symlink = hugetlbfs_symlink, |
1306 | .mkdir = hugetlbfs_mkdir, |
1307 | .rmdir = simple_rmdir, |
1308 | .mknod = hugetlbfs_mknod, |
1309 | .rename = simple_rename, |
1310 | .setattr = hugetlbfs_setattr, |
1311 | .tmpfile = hugetlbfs_tmpfile, |
1312 | }; |
1313 | |
1314 | static const struct inode_operations hugetlbfs_inode_operations = { |
1315 | .setattr = hugetlbfs_setattr, |
1316 | }; |
1317 | |
1318 | static const struct super_operations hugetlbfs_ops = { |
1319 | .alloc_inode = hugetlbfs_alloc_inode, |
1320 | .free_inode = hugetlbfs_free_inode, |
1321 | .destroy_inode = hugetlbfs_destroy_inode, |
1322 | .evict_inode = hugetlbfs_evict_inode, |
1323 | .statfs = hugetlbfs_statfs, |
1324 | .put_super = hugetlbfs_put_super, |
1325 | .show_options = hugetlbfs_show_options, |
1326 | }; |
1327 | |
1328 | /* |
1329 | * Convert size option passed from command line to number of huge pages |
1330 | * in the pool specified by hstate. Size option could be in bytes |
1331 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). |
1332 | */ |
1333 | static long |
1334 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
1335 | enum hugetlbfs_size_type val_type) |
1336 | { |
1337 | if (val_type == NO_SIZE) |
1338 | return -1; |
1339 | |
1340 | if (val_type == SIZE_PERCENT) { |
1341 | size_opt <<= huge_page_shift(h); |
1342 | size_opt *= h->max_huge_pages; |
1343 | do_div(size_opt, 100); |
1344 | } |
1345 | |
1346 | size_opt >>= huge_page_shift(h); |
1347 | return size_opt; |
1348 | } |
1349 | |
1350 | /* |
1351 | * Parse one mount parameter. |
1352 | */ |
1353 | static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) |
1354 | { |
1355 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1356 | struct fs_parse_result result; |
1357 | char *rest; |
1358 | unsigned long ps; |
1359 | int opt; |
1360 | |
1361 | opt = fs_parse(fc, desc: hugetlb_fs_parameters, param, result: &result); |
1362 | if (opt < 0) |
1363 | return opt; |
1364 | |
1365 | switch (opt) { |
1366 | case Opt_uid: |
1367 | ctx->uid = make_kuid(current_user_ns(), uid: result.uint_32); |
1368 | if (!uid_valid(uid: ctx->uid)) |
1369 | goto bad_val; |
1370 | return 0; |
1371 | |
1372 | case Opt_gid: |
1373 | ctx->gid = make_kgid(current_user_ns(), gid: result.uint_32); |
1374 | if (!gid_valid(gid: ctx->gid)) |
1375 | goto bad_val; |
1376 | return 0; |
1377 | |
1378 | case Opt_mode: |
1379 | ctx->mode = result.uint_32 & 01777U; |
1380 | return 0; |
1381 | |
1382 | case Opt_size: |
1383 | /* memparse() will accept a K/M/G without a digit */ |
1384 | if (!param->string || !isdigit(c: param->string[0])) |
1385 | goto bad_val; |
1386 | ctx->max_size_opt = memparse(ptr: param->string, retptr: &rest); |
1387 | ctx->max_val_type = SIZE_STD; |
1388 | if (*rest == '%') |
1389 | ctx->max_val_type = SIZE_PERCENT; |
1390 | return 0; |
1391 | |
1392 | case Opt_nr_inodes: |
1393 | /* memparse() will accept a K/M/G without a digit */ |
1394 | if (!param->string || !isdigit(c: param->string[0])) |
1395 | goto bad_val; |
1396 | ctx->nr_inodes = memparse(ptr: param->string, retptr: &rest); |
1397 | return 0; |
1398 | |
1399 | case Opt_pagesize: |
1400 | ps = memparse(ptr: param->string, retptr: &rest); |
1401 | ctx->hstate = size_to_hstate(size: ps); |
1402 | if (!ctx->hstate) { |
1403 | pr_err("Unsupported page size %lu MB\n" , ps / SZ_1M); |
1404 | return -EINVAL; |
1405 | } |
1406 | return 0; |
1407 | |
1408 | case Opt_min_size: |
1409 | /* memparse() will accept a K/M/G without a digit */ |
1410 | if (!param->string || !isdigit(c: param->string[0])) |
1411 | goto bad_val; |
1412 | ctx->min_size_opt = memparse(ptr: param->string, retptr: &rest); |
1413 | ctx->min_val_type = SIZE_STD; |
1414 | if (*rest == '%') |
1415 | ctx->min_val_type = SIZE_PERCENT; |
1416 | return 0; |
1417 | |
1418 | default: |
1419 | return -EINVAL; |
1420 | } |
1421 | |
1422 | bad_val: |
1423 | return invalfc(fc, "Bad value '%s' for mount option '%s'\n" , |
1424 | param->string, param->key); |
1425 | } |
1426 | |
1427 | /* |
1428 | * Validate the parsed options. |
1429 | */ |
1430 | static int hugetlbfs_validate(struct fs_context *fc) |
1431 | { |
1432 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1433 | |
1434 | /* |
1435 | * Use huge page pool size (in hstate) to convert the size |
1436 | * options to number of huge pages. If NO_SIZE, -1 is returned. |
1437 | */ |
1438 | ctx->max_hpages = hugetlbfs_size_to_hpages(h: ctx->hstate, |
1439 | size_opt: ctx->max_size_opt, |
1440 | val_type: ctx->max_val_type); |
1441 | ctx->min_hpages = hugetlbfs_size_to_hpages(h: ctx->hstate, |
1442 | size_opt: ctx->min_size_opt, |
1443 | val_type: ctx->min_val_type); |
1444 | |
1445 | /* |
1446 | * If max_size was specified, then min_size must be smaller |
1447 | */ |
1448 | if (ctx->max_val_type > NO_SIZE && |
1449 | ctx->min_hpages > ctx->max_hpages) { |
1450 | pr_err("Minimum size can not be greater than maximum size\n" ); |
1451 | return -EINVAL; |
1452 | } |
1453 | |
1454 | return 0; |
1455 | } |
1456 | |
1457 | static int |
1458 | hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) |
1459 | { |
1460 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1461 | struct hugetlbfs_sb_info *sbinfo; |
1462 | |
1463 | sbinfo = kmalloc(size: sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); |
1464 | if (!sbinfo) |
1465 | return -ENOMEM; |
1466 | sb->s_fs_info = sbinfo; |
1467 | spin_lock_init(&sbinfo->stat_lock); |
1468 | sbinfo->hstate = ctx->hstate; |
1469 | sbinfo->max_inodes = ctx->nr_inodes; |
1470 | sbinfo->free_inodes = ctx->nr_inodes; |
1471 | sbinfo->spool = NULL; |
1472 | sbinfo->uid = ctx->uid; |
1473 | sbinfo->gid = ctx->gid; |
1474 | sbinfo->mode = ctx->mode; |
1475 | |
1476 | /* |
1477 | * Allocate and initialize subpool if maximum or minimum size is |
1478 | * specified. Any needed reservations (for minimum size) are taken |
1479 | * when the subpool is created. |
1480 | */ |
1481 | if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { |
1482 | sbinfo->spool = hugepage_new_subpool(h: ctx->hstate, |
1483 | max_hpages: ctx->max_hpages, |
1484 | min_hpages: ctx->min_hpages); |
1485 | if (!sbinfo->spool) |
1486 | goto out_free; |
1487 | } |
1488 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
1489 | sb->s_blocksize = huge_page_size(h: ctx->hstate); |
1490 | sb->s_blocksize_bits = huge_page_shift(h: ctx->hstate); |
1491 | sb->s_magic = HUGETLBFS_MAGIC; |
1492 | sb->s_op = &hugetlbfs_ops; |
1493 | sb->s_time_gran = 1; |
1494 | |
1495 | /* |
1496 | * Due to the special and limited functionality of hugetlbfs, it does |
1497 | * not work well as a stacking filesystem. |
1498 | */ |
1499 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; |
1500 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); |
1501 | if (!sb->s_root) |
1502 | goto out_free; |
1503 | return 0; |
1504 | out_free: |
1505 | kfree(objp: sbinfo->spool); |
1506 | kfree(objp: sbinfo); |
1507 | return -ENOMEM; |
1508 | } |
1509 | |
1510 | static int hugetlbfs_get_tree(struct fs_context *fc) |
1511 | { |
1512 | int err = hugetlbfs_validate(fc); |
1513 | if (err) |
1514 | return err; |
1515 | return get_tree_nodev(fc, fill_super: hugetlbfs_fill_super); |
1516 | } |
1517 | |
1518 | static void hugetlbfs_fs_context_free(struct fs_context *fc) |
1519 | { |
1520 | kfree(objp: fc->fs_private); |
1521 | } |
1522 | |
1523 | static const struct fs_context_operations hugetlbfs_fs_context_ops = { |
1524 | .free = hugetlbfs_fs_context_free, |
1525 | .parse_param = hugetlbfs_parse_param, |
1526 | .get_tree = hugetlbfs_get_tree, |
1527 | }; |
1528 | |
1529 | static int hugetlbfs_init_fs_context(struct fs_context *fc) |
1530 | { |
1531 | struct hugetlbfs_fs_context *ctx; |
1532 | |
1533 | ctx = kzalloc(size: sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); |
1534 | if (!ctx) |
1535 | return -ENOMEM; |
1536 | |
1537 | ctx->max_hpages = -1; /* No limit on size by default */ |
1538 | ctx->nr_inodes = -1; /* No limit on number of inodes by default */ |
1539 | ctx->uid = current_fsuid(); |
1540 | ctx->gid = current_fsgid(); |
1541 | ctx->mode = 0755; |
1542 | ctx->hstate = &default_hstate; |
1543 | ctx->min_hpages = -1; /* No default minimum size */ |
1544 | ctx->max_val_type = NO_SIZE; |
1545 | ctx->min_val_type = NO_SIZE; |
1546 | fc->fs_private = ctx; |
1547 | fc->ops = &hugetlbfs_fs_context_ops; |
1548 | return 0; |
1549 | } |
1550 | |
1551 | static struct file_system_type hugetlbfs_fs_type = { |
1552 | .name = "hugetlbfs" , |
1553 | .init_fs_context = hugetlbfs_init_fs_context, |
1554 | .parameters = hugetlb_fs_parameters, |
1555 | .kill_sb = kill_litter_super, |
1556 | }; |
1557 | |
1558 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1559 | |
1560 | static int can_do_hugetlb_shm(void) |
1561 | { |
1562 | kgid_t shm_group; |
1563 | shm_group = make_kgid(from: &init_user_ns, gid: sysctl_hugetlb_shm_group); |
1564 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); |
1565 | } |
1566 | |
1567 | static int get_hstate_idx(int page_size_log) |
1568 | { |
1569 | struct hstate *h = hstate_sizelog(page_size_log); |
1570 | |
1571 | if (!h) |
1572 | return -1; |
1573 | return hstate_index(h); |
1574 | } |
1575 | |
1576 | /* |
1577 | * Note that size should be aligned to proper hugepage size in caller side, |
1578 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. |
1579 | */ |
1580 | struct file *hugetlb_file_setup(const char *name, size_t size, |
1581 | vm_flags_t acctflag, int creat_flags, |
1582 | int page_size_log) |
1583 | { |
1584 | struct inode *inode; |
1585 | struct vfsmount *mnt; |
1586 | int hstate_idx; |
1587 | struct file *file; |
1588 | |
1589 | hstate_idx = get_hstate_idx(page_size_log); |
1590 | if (hstate_idx < 0) |
1591 | return ERR_PTR(error: -ENODEV); |
1592 | |
1593 | mnt = hugetlbfs_vfsmount[hstate_idx]; |
1594 | if (!mnt) |
1595 | return ERR_PTR(error: -ENOENT); |
1596 | |
1597 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
1598 | struct ucounts *ucounts = current_ucounts(); |
1599 | |
1600 | if (user_shm_lock(size, ucounts)) { |
1601 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n" , |
1602 | current->comm, current->pid); |
1603 | user_shm_unlock(size, ucounts); |
1604 | } |
1605 | return ERR_PTR(error: -EPERM); |
1606 | } |
1607 | |
1608 | file = ERR_PTR(error: -ENOSPC); |
1609 | inode = hugetlbfs_get_inode(sb: mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, dev: 0); |
1610 | if (!inode) |
1611 | goto out; |
1612 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1613 | inode->i_flags |= S_PRIVATE; |
1614 | |
1615 | inode->i_size = size; |
1616 | clear_nlink(inode); |
1617 | |
1618 | if (!hugetlb_reserve_pages(inode, from: 0, |
1619 | to: size >> huge_page_shift(h: hstate_inode(i: inode)), NULL, |
1620 | vm_flags: acctflag)) |
1621 | file = ERR_PTR(error: -ENOMEM); |
1622 | else |
1623 | file = alloc_file_pseudo(inode, mnt, name, O_RDWR, |
1624 | &hugetlbfs_file_operations); |
1625 | if (!IS_ERR(ptr: file)) |
1626 | return file; |
1627 | |
1628 | iput(inode); |
1629 | out: |
1630 | return file; |
1631 | } |
1632 | |
1633 | static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) |
1634 | { |
1635 | struct fs_context *fc; |
1636 | struct vfsmount *mnt; |
1637 | |
1638 | fc = fs_context_for_mount(fs_type: &hugetlbfs_fs_type, SB_KERNMOUNT); |
1639 | if (IS_ERR(ptr: fc)) { |
1640 | mnt = ERR_CAST(ptr: fc); |
1641 | } else { |
1642 | struct hugetlbfs_fs_context *ctx = fc->fs_private; |
1643 | ctx->hstate = h; |
1644 | mnt = fc_mount(fc); |
1645 | put_fs_context(fc); |
1646 | } |
1647 | if (IS_ERR(ptr: mnt)) |
1648 | pr_err("Cannot mount internal hugetlbfs for page size %luK" , |
1649 | huge_page_size(h) / SZ_1K); |
1650 | return mnt; |
1651 | } |
1652 | |
1653 | static int __init init_hugetlbfs_fs(void) |
1654 | { |
1655 | struct vfsmount *mnt; |
1656 | struct hstate *h; |
1657 | int error; |
1658 | int i; |
1659 | |
1660 | if (!hugepages_supported()) { |
1661 | pr_info("disabling because there are no supported hugepage sizes\n" ); |
1662 | return -ENOTSUPP; |
1663 | } |
1664 | |
1665 | error = -ENOMEM; |
1666 | hugetlbfs_inode_cachep = kmem_cache_create(name: "hugetlbfs_inode_cache" , |
1667 | size: sizeof(struct hugetlbfs_inode_info), |
1668 | align: 0, SLAB_ACCOUNT, ctor: init_once); |
1669 | if (hugetlbfs_inode_cachep == NULL) |
1670 | goto out; |
1671 | |
1672 | error = register_filesystem(&hugetlbfs_fs_type); |
1673 | if (error) |
1674 | goto out_free; |
1675 | |
1676 | /* default hstate mount is required */ |
1677 | mnt = mount_one_hugetlbfs(h: &default_hstate); |
1678 | if (IS_ERR(ptr: mnt)) { |
1679 | error = PTR_ERR(ptr: mnt); |
1680 | goto out_unreg; |
1681 | } |
1682 | hugetlbfs_vfsmount[default_hstate_idx] = mnt; |
1683 | |
1684 | /* other hstates are optional */ |
1685 | i = 0; |
1686 | for_each_hstate(h) { |
1687 | if (i == default_hstate_idx) { |
1688 | i++; |
1689 | continue; |
1690 | } |
1691 | |
1692 | mnt = mount_one_hugetlbfs(h); |
1693 | if (IS_ERR(ptr: mnt)) |
1694 | hugetlbfs_vfsmount[i] = NULL; |
1695 | else |
1696 | hugetlbfs_vfsmount[i] = mnt; |
1697 | i++; |
1698 | } |
1699 | |
1700 | return 0; |
1701 | |
1702 | out_unreg: |
1703 | (void)unregister_filesystem(&hugetlbfs_fs_type); |
1704 | out_free: |
1705 | kmem_cache_destroy(s: hugetlbfs_inode_cachep); |
1706 | out: |
1707 | return error; |
1708 | } |
1709 | fs_initcall(init_hugetlbfs_fs) |
1710 | |