1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Contiguous Memory Allocator |
4 | * |
5 | * Copyright (c) 2010-2011 by Samsung Electronics. |
6 | * Copyright IBM Corporation, 2013 |
7 | * Copyright LG Electronics Inc., 2014 |
8 | * Written by: |
9 | * Marek Szyprowski <m.szyprowski@samsung.com> |
10 | * Michal Nazarewicz <mina86@mina86.com> |
11 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
12 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
13 | */ |
14 | |
15 | #define pr_fmt(fmt) "cma: " fmt |
16 | |
17 | #ifdef CONFIG_CMA_DEBUG |
18 | #ifndef DEBUG |
19 | # define DEBUG |
20 | #endif |
21 | #endif |
22 | #define CREATE_TRACE_POINTS |
23 | |
24 | #include <linux/memblock.h> |
25 | #include <linux/err.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/sizes.h> |
28 | #include <linux/slab.h> |
29 | #include <linux/log2.h> |
30 | #include <linux/cma.h> |
31 | #include <linux/highmem.h> |
32 | #include <linux/io.h> |
33 | #include <linux/kmemleak.h> |
34 | #include <trace/events/cma.h> |
35 | |
36 | #include "internal.h" |
37 | #include "cma.h" |
38 | |
39 | struct cma cma_areas[MAX_CMA_AREAS]; |
40 | unsigned cma_area_count; |
41 | static DEFINE_MUTEX(cma_mutex); |
42 | |
43 | phys_addr_t cma_get_base(const struct cma *cma) |
44 | { |
45 | return PFN_PHYS(cma->base_pfn); |
46 | } |
47 | |
48 | unsigned long cma_get_size(const struct cma *cma) |
49 | { |
50 | return cma->count << PAGE_SHIFT; |
51 | } |
52 | |
53 | const char *cma_get_name(const struct cma *cma) |
54 | { |
55 | return cma->name; |
56 | } |
57 | |
58 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
59 | unsigned int align_order) |
60 | { |
61 | if (align_order <= cma->order_per_bit) |
62 | return 0; |
63 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
64 | } |
65 | |
66 | /* |
67 | * Find the offset of the base PFN from the specified align_order. |
68 | * The value returned is represented in order_per_bits. |
69 | */ |
70 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
71 | unsigned int align_order) |
72 | { |
73 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
74 | >> cma->order_per_bit; |
75 | } |
76 | |
77 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
78 | unsigned long pages) |
79 | { |
80 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
81 | } |
82 | |
83 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
84 | unsigned long count) |
85 | { |
86 | unsigned long bitmap_no, bitmap_count; |
87 | unsigned long flags; |
88 | |
89 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
90 | bitmap_count = cma_bitmap_pages_to_bits(cma, pages: count); |
91 | |
92 | spin_lock_irqsave(&cma->lock, flags); |
93 | bitmap_clear(map: cma->bitmap, start: bitmap_no, nbits: bitmap_count); |
94 | spin_unlock_irqrestore(lock: &cma->lock, flags); |
95 | } |
96 | |
97 | static void __init cma_activate_area(struct cma *cma) |
98 | { |
99 | unsigned long base_pfn = cma->base_pfn, pfn; |
100 | struct zone *zone; |
101 | |
102 | cma->bitmap = bitmap_zalloc(nbits: cma_bitmap_maxno(cma), GFP_KERNEL); |
103 | if (!cma->bitmap) |
104 | goto out_error; |
105 | |
106 | /* |
107 | * alloc_contig_range() requires the pfn range specified to be in the |
108 | * same zone. Simplify by forcing the entire CMA resv range to be in the |
109 | * same zone. |
110 | */ |
111 | WARN_ON_ONCE(!pfn_valid(base_pfn)); |
112 | zone = page_zone(pfn_to_page(base_pfn)); |
113 | for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { |
114 | WARN_ON_ONCE(!pfn_valid(pfn)); |
115 | if (page_zone(pfn_to_page(pfn)) != zone) |
116 | goto not_in_zone; |
117 | } |
118 | |
119 | for (pfn = base_pfn; pfn < base_pfn + cma->count; |
120 | pfn += pageblock_nr_pages) |
121 | init_cma_reserved_pageblock(pfn_to_page(pfn)); |
122 | |
123 | spin_lock_init(&cma->lock); |
124 | |
125 | #ifdef CONFIG_CMA_DEBUGFS |
126 | INIT_HLIST_HEAD(&cma->mem_head); |
127 | spin_lock_init(&cma->mem_head_lock); |
128 | #endif |
129 | |
130 | return; |
131 | |
132 | not_in_zone: |
133 | bitmap_free(bitmap: cma->bitmap); |
134 | out_error: |
135 | /* Expose all pages to the buddy, they are useless for CMA. */ |
136 | if (!cma->reserve_pages_on_error) { |
137 | for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) |
138 | free_reserved_page(pfn_to_page(pfn)); |
139 | } |
140 | totalcma_pages -= cma->count; |
141 | cma->count = 0; |
142 | pr_err("CMA area %s could not be activated\n" , cma->name); |
143 | return; |
144 | } |
145 | |
146 | static int __init cma_init_reserved_areas(void) |
147 | { |
148 | int i; |
149 | |
150 | for (i = 0; i < cma_area_count; i++) |
151 | cma_activate_area(cma: &cma_areas[i]); |
152 | |
153 | return 0; |
154 | } |
155 | core_initcall(cma_init_reserved_areas); |
156 | |
157 | void __init cma_reserve_pages_on_error(struct cma *cma) |
158 | { |
159 | cma->reserve_pages_on_error = true; |
160 | } |
161 | |
162 | /** |
163 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
164 | * @base: Base address of the reserved area |
165 | * @size: Size of the reserved area (in bytes), |
166 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
167 | * @name: The name of the area. If this parameter is NULL, the name of |
168 | * the area will be set to "cmaN", where N is a running counter of |
169 | * used areas. |
170 | * @res_cma: Pointer to store the created cma region. |
171 | * |
172 | * This function creates custom contiguous area from already reserved memory. |
173 | */ |
174 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
175 | unsigned int order_per_bit, |
176 | const char *name, |
177 | struct cma **res_cma) |
178 | { |
179 | struct cma *cma; |
180 | |
181 | /* Sanity checks */ |
182 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
183 | pr_err("Not enough slots for CMA reserved regions!\n" ); |
184 | return -ENOSPC; |
185 | } |
186 | |
187 | if (!size || !memblock_is_region_reserved(base, size)) |
188 | return -EINVAL; |
189 | |
190 | /* alignment should be aligned with order_per_bit */ |
191 | if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit)) |
192 | return -EINVAL; |
193 | |
194 | /* ensure minimal alignment required by mm core */ |
195 | if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) |
196 | return -EINVAL; |
197 | |
198 | /* |
199 | * Each reserved area must be initialised later, when more kernel |
200 | * subsystems (like slab allocator) are available. |
201 | */ |
202 | cma = &cma_areas[cma_area_count]; |
203 | |
204 | if (name) |
205 | snprintf(buf: cma->name, CMA_MAX_NAME, fmt: name); |
206 | else |
207 | snprintf(buf: cma->name, CMA_MAX_NAME, fmt: "cma%d\n" , cma_area_count); |
208 | |
209 | cma->base_pfn = PFN_DOWN(base); |
210 | cma->count = size >> PAGE_SHIFT; |
211 | cma->order_per_bit = order_per_bit; |
212 | *res_cma = cma; |
213 | cma_area_count++; |
214 | totalcma_pages += (size / PAGE_SIZE); |
215 | |
216 | return 0; |
217 | } |
218 | |
219 | /** |
220 | * cma_declare_contiguous_nid() - reserve custom contiguous area |
221 | * @base: Base address of the reserved area optional, use 0 for any |
222 | * @size: Size of the reserved area (in bytes), |
223 | * @limit: End address of the reserved memory (optional, 0 for any). |
224 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
225 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
226 | * @fixed: hint about where to place the reserved area |
227 | * @name: The name of the area. See function cma_init_reserved_mem() |
228 | * @res_cma: Pointer to store the created cma region. |
229 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
230 | * |
231 | * This function reserves memory from early allocator. It should be |
232 | * called by arch specific code once the early allocator (memblock or bootmem) |
233 | * has been activated and all other subsystems have already allocated/reserved |
234 | * memory. This function allows to create custom reserved areas. |
235 | * |
236 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
237 | * reserve in range from @base to @limit. |
238 | */ |
239 | int __init cma_declare_contiguous_nid(phys_addr_t base, |
240 | phys_addr_t size, phys_addr_t limit, |
241 | phys_addr_t alignment, unsigned int order_per_bit, |
242 | bool fixed, const char *name, struct cma **res_cma, |
243 | int nid) |
244 | { |
245 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
246 | phys_addr_t highmem_start; |
247 | int ret = 0; |
248 | |
249 | /* |
250 | * We can't use __pa(high_memory) directly, since high_memory |
251 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) |
252 | * complain. Find the boundary by adding one to the last valid |
253 | * address. |
254 | */ |
255 | highmem_start = __pa(high_memory - 1) + 1; |
256 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n" , |
257 | __func__, &size, &base, &limit, &alignment); |
258 | |
259 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
260 | pr_err("Not enough slots for CMA reserved regions!\n" ); |
261 | return -ENOSPC; |
262 | } |
263 | |
264 | if (!size) |
265 | return -EINVAL; |
266 | |
267 | if (alignment && !is_power_of_2(n: alignment)) |
268 | return -EINVAL; |
269 | |
270 | if (!IS_ENABLED(CONFIG_NUMA)) |
271 | nid = NUMA_NO_NODE; |
272 | |
273 | /* Sanitise input arguments. */ |
274 | alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); |
275 | if (fixed && base & (alignment - 1)) { |
276 | ret = -EINVAL; |
277 | pr_err("Region at %pa must be aligned to %pa bytes\n" , |
278 | &base, &alignment); |
279 | goto err; |
280 | } |
281 | base = ALIGN(base, alignment); |
282 | size = ALIGN(size, alignment); |
283 | limit &= ~(alignment - 1); |
284 | |
285 | if (!base) |
286 | fixed = false; |
287 | |
288 | /* size should be aligned with order_per_bit */ |
289 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
290 | return -EINVAL; |
291 | |
292 | /* |
293 | * If allocating at a fixed base the request region must not cross the |
294 | * low/high memory boundary. |
295 | */ |
296 | if (fixed && base < highmem_start && base + size > highmem_start) { |
297 | ret = -EINVAL; |
298 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n" , |
299 | &base, &highmem_start); |
300 | goto err; |
301 | } |
302 | |
303 | /* |
304 | * If the limit is unspecified or above the memblock end, its effective |
305 | * value will be the memblock end. Set it explicitly to simplify further |
306 | * checks. |
307 | */ |
308 | if (limit == 0 || limit > memblock_end) |
309 | limit = memblock_end; |
310 | |
311 | if (base + size > limit) { |
312 | ret = -EINVAL; |
313 | pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n" , |
314 | &size, &base, &limit); |
315 | goto err; |
316 | } |
317 | |
318 | /* Reserve memory */ |
319 | if (fixed) { |
320 | if (memblock_is_region_reserved(base, size) || |
321 | memblock_reserve(base, size) < 0) { |
322 | ret = -EBUSY; |
323 | goto err; |
324 | } |
325 | } else { |
326 | phys_addr_t addr = 0; |
327 | |
328 | /* |
329 | * If there is enough memory, try a bottom-up allocation first. |
330 | * It will place the new cma area close to the start of the node |
331 | * and guarantee that the compaction is moving pages out of the |
332 | * cma area and not into it. |
333 | * Avoid using first 4GB to not interfere with constrained zones |
334 | * like DMA/DMA32. |
335 | */ |
336 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
337 | if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { |
338 | memblock_set_bottom_up(enable: true); |
339 | addr = memblock_alloc_range_nid(size, align: alignment, SZ_4G, |
340 | end: limit, nid, exact_nid: true); |
341 | memblock_set_bottom_up(enable: false); |
342 | } |
343 | #endif |
344 | |
345 | /* |
346 | * All pages in the reserved area must come from the same zone. |
347 | * If the requested region crosses the low/high memory boundary, |
348 | * try allocating from high memory first and fall back to low |
349 | * memory in case of failure. |
350 | */ |
351 | if (!addr && base < highmem_start && limit > highmem_start) { |
352 | addr = memblock_alloc_range_nid(size, align: alignment, |
353 | start: highmem_start, end: limit, nid, exact_nid: true); |
354 | limit = highmem_start; |
355 | } |
356 | |
357 | if (!addr) { |
358 | addr = memblock_alloc_range_nid(size, align: alignment, start: base, |
359 | end: limit, nid, exact_nid: true); |
360 | if (!addr) { |
361 | ret = -ENOMEM; |
362 | goto err; |
363 | } |
364 | } |
365 | |
366 | /* |
367 | * kmemleak scans/reads tracked objects for pointers to other |
368 | * objects but this address isn't mapped and accessible |
369 | */ |
370 | kmemleak_ignore_phys(phys: addr); |
371 | base = addr; |
372 | } |
373 | |
374 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); |
375 | if (ret) |
376 | goto free_mem; |
377 | |
378 | pr_info("Reserved %ld MiB at %pa on node %d\n" , (unsigned long)size / SZ_1M, |
379 | &base, nid); |
380 | return 0; |
381 | |
382 | free_mem: |
383 | memblock_phys_free(base, size); |
384 | err: |
385 | pr_err("Failed to reserve %ld MiB on node %d\n" , (unsigned long)size / SZ_1M, |
386 | nid); |
387 | return ret; |
388 | } |
389 | |
390 | #ifdef CONFIG_CMA_DEBUG |
391 | static void cma_debug_show_areas(struct cma *cma) |
392 | { |
393 | unsigned long next_zero_bit, next_set_bit, nr_zero; |
394 | unsigned long start = 0; |
395 | unsigned long nr_part, nr_total = 0; |
396 | unsigned long nbits = cma_bitmap_maxno(cma); |
397 | |
398 | spin_lock_irq(lock: &cma->lock); |
399 | pr_info("number of available pages: " ); |
400 | for (;;) { |
401 | next_zero_bit = find_next_zero_bit(addr: cma->bitmap, size: nbits, offset: start); |
402 | if (next_zero_bit >= nbits) |
403 | break; |
404 | next_set_bit = find_next_bit(addr: cma->bitmap, size: nbits, offset: next_zero_bit); |
405 | nr_zero = next_set_bit - next_zero_bit; |
406 | nr_part = nr_zero << cma->order_per_bit; |
407 | pr_cont("%s%lu@%lu" , nr_total ? "+" : "" , nr_part, |
408 | next_zero_bit); |
409 | nr_total += nr_part; |
410 | start = next_zero_bit + nr_zero; |
411 | } |
412 | pr_cont("=> %lu free of %lu total pages\n" , nr_total, cma->count); |
413 | spin_unlock_irq(lock: &cma->lock); |
414 | } |
415 | #else |
416 | static inline void cma_debug_show_areas(struct cma *cma) { } |
417 | #endif |
418 | |
419 | /** |
420 | * cma_alloc() - allocate pages from contiguous area |
421 | * @cma: Contiguous memory region for which the allocation is performed. |
422 | * @count: Requested number of pages. |
423 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
424 | * @no_warn: Avoid printing message about failed allocation |
425 | * |
426 | * This function allocates part of contiguous memory on specific |
427 | * contiguous memory area. |
428 | */ |
429 | struct page *cma_alloc(struct cma *cma, unsigned long count, |
430 | unsigned int align, bool no_warn) |
431 | { |
432 | unsigned long mask, offset; |
433 | unsigned long pfn = -1; |
434 | unsigned long start = 0; |
435 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
436 | unsigned long i; |
437 | struct page *page = NULL; |
438 | int ret = -ENOMEM; |
439 | |
440 | if (!cma || !cma->count || !cma->bitmap) |
441 | goto out; |
442 | |
443 | pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n" , __func__, |
444 | (void *)cma, cma->name, count, align); |
445 | |
446 | if (!count) |
447 | goto out; |
448 | |
449 | trace_cma_alloc_start(name: cma->name, count, align); |
450 | |
451 | mask = cma_bitmap_aligned_mask(cma, align_order: align); |
452 | offset = cma_bitmap_aligned_offset(cma, align_order: align); |
453 | bitmap_maxno = cma_bitmap_maxno(cma); |
454 | bitmap_count = cma_bitmap_pages_to_bits(cma, pages: count); |
455 | |
456 | if (bitmap_count > bitmap_maxno) |
457 | goto out; |
458 | |
459 | for (;;) { |
460 | spin_lock_irq(lock: &cma->lock); |
461 | bitmap_no = bitmap_find_next_zero_area_off(map: cma->bitmap, |
462 | size: bitmap_maxno, start, nr: bitmap_count, align_mask: mask, |
463 | align_offset: offset); |
464 | if (bitmap_no >= bitmap_maxno) { |
465 | spin_unlock_irq(lock: &cma->lock); |
466 | break; |
467 | } |
468 | bitmap_set(map: cma->bitmap, start: bitmap_no, nbits: bitmap_count); |
469 | /* |
470 | * It's safe to drop the lock here. We've marked this region for |
471 | * our exclusive use. If the migration fails we will take the |
472 | * lock again and unmark it. |
473 | */ |
474 | spin_unlock_irq(lock: &cma->lock); |
475 | |
476 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
477 | mutex_lock(&cma_mutex); |
478 | ret = alloc_contig_range(start: pfn, end: pfn + count, migratetype: MIGRATE_CMA, |
479 | GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); |
480 | mutex_unlock(lock: &cma_mutex); |
481 | if (ret == 0) { |
482 | page = pfn_to_page(pfn); |
483 | break; |
484 | } |
485 | |
486 | cma_clear_bitmap(cma, pfn, count); |
487 | if (ret != -EBUSY) |
488 | break; |
489 | |
490 | pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n" , |
491 | __func__, pfn, pfn_to_page(pfn)); |
492 | |
493 | trace_cma_alloc_busy_retry(name: cma->name, pfn, pfn_to_page(pfn), |
494 | count, align); |
495 | /* try again with a bit different memory target */ |
496 | start = bitmap_no + mask + 1; |
497 | } |
498 | |
499 | trace_cma_alloc_finish(name: cma->name, pfn, page, count, align, errorno: ret); |
500 | |
501 | /* |
502 | * CMA can allocate multiple page blocks, which results in different |
503 | * blocks being marked with different tags. Reset the tags to ignore |
504 | * those page blocks. |
505 | */ |
506 | if (page) { |
507 | for (i = 0; i < count; i++) |
508 | page_kasan_tag_reset(nth_page(page, i)); |
509 | } |
510 | |
511 | if (ret && !no_warn) { |
512 | pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n" , |
513 | __func__, cma->name, count, ret); |
514 | cma_debug_show_areas(cma); |
515 | } |
516 | |
517 | pr_debug("%s(): returned %p\n" , __func__, page); |
518 | out: |
519 | if (page) { |
520 | count_vm_event(item: CMA_ALLOC_SUCCESS); |
521 | cma_sysfs_account_success_pages(cma, nr_pages: count); |
522 | } else { |
523 | count_vm_event(item: CMA_ALLOC_FAIL); |
524 | if (cma) |
525 | cma_sysfs_account_fail_pages(cma, nr_pages: count); |
526 | } |
527 | |
528 | return page; |
529 | } |
530 | |
531 | bool cma_pages_valid(struct cma *cma, const struct page *pages, |
532 | unsigned long count) |
533 | { |
534 | unsigned long pfn; |
535 | |
536 | if (!cma || !pages) |
537 | return false; |
538 | |
539 | pfn = page_to_pfn(pages); |
540 | |
541 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { |
542 | pr_debug("%s(page %p, count %lu)\n" , __func__, |
543 | (void *)pages, count); |
544 | return false; |
545 | } |
546 | |
547 | return true; |
548 | } |
549 | |
550 | /** |
551 | * cma_release() - release allocated pages |
552 | * @cma: Contiguous memory region for which the allocation is performed. |
553 | * @pages: Allocated pages. |
554 | * @count: Number of allocated pages. |
555 | * |
556 | * This function releases memory allocated by cma_alloc(). |
557 | * It returns false when provided pages do not belong to contiguous area and |
558 | * true otherwise. |
559 | */ |
560 | bool cma_release(struct cma *cma, const struct page *pages, |
561 | unsigned long count) |
562 | { |
563 | unsigned long pfn; |
564 | |
565 | if (!cma_pages_valid(cma, pages, count)) |
566 | return false; |
567 | |
568 | pr_debug("%s(page %p, count %lu)\n" , __func__, (void *)pages, count); |
569 | |
570 | pfn = page_to_pfn(pages); |
571 | |
572 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
573 | |
574 | free_contig_range(pfn, nr_pages: count); |
575 | cma_clear_bitmap(cma, pfn, count); |
576 | trace_cma_release(name: cma->name, pfn, page: pages, count); |
577 | |
578 | return true; |
579 | } |
580 | |
581 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) |
582 | { |
583 | int i; |
584 | |
585 | for (i = 0; i < cma_area_count; i++) { |
586 | int ret = it(&cma_areas[i], data); |
587 | |
588 | if (ret) |
589 | return ret; |
590 | } |
591 | |
592 | return 0; |
593 | } |
594 | |