1 | /* |
2 | * Contiguous Memory Allocator |
3 | * |
4 | * Copyright (c) 2010-2011 by Samsung Electronics. |
5 | * Copyright IBM Corporation, 2013 |
6 | * Copyright LG Electronics Inc., 2014 |
7 | * Written by: |
8 | * Marek Szyprowski <m.szyprowski@samsung.com> |
9 | * Michal Nazarewicz <mina86@mina86.com> |
10 | * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> |
11 | * Joonsoo Kim <iamjoonsoo.kim@lge.com> |
12 | * |
13 | * This program is free software; you can redistribute it and/or |
14 | * modify it under the terms of the GNU General Public License as |
15 | * published by the Free Software Foundation; either version 2 of the |
16 | * License or (at your optional) any later version of the license. |
17 | */ |
18 | |
19 | #define pr_fmt(fmt) "cma: " fmt |
20 | |
21 | #ifdef CONFIG_CMA_DEBUG |
22 | #ifndef DEBUG |
23 | # define DEBUG |
24 | #endif |
25 | #endif |
26 | #define CREATE_TRACE_POINTS |
27 | |
28 | #include <linux/memblock.h> |
29 | #include <linux/err.h> |
30 | #include <linux/mm.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/sizes.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/log2.h> |
35 | #include <linux/cma.h> |
36 | #include <linux/highmem.h> |
37 | #include <linux/io.h> |
38 | #include <linux/kmemleak.h> |
39 | #include <trace/events/cma.h> |
40 | |
41 | #include "cma.h" |
42 | |
43 | struct cma cma_areas[MAX_CMA_AREAS]; |
44 | unsigned cma_area_count; |
45 | static DEFINE_MUTEX(cma_mutex); |
46 | |
47 | phys_addr_t cma_get_base(const struct cma *cma) |
48 | { |
49 | return PFN_PHYS(cma->base_pfn); |
50 | } |
51 | |
52 | unsigned long cma_get_size(const struct cma *cma) |
53 | { |
54 | return cma->count << PAGE_SHIFT; |
55 | } |
56 | |
57 | const char *cma_get_name(const struct cma *cma) |
58 | { |
59 | return cma->name ? cma->name : "(undefined)" ; |
60 | } |
61 | |
62 | static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, |
63 | unsigned int align_order) |
64 | { |
65 | if (align_order <= cma->order_per_bit) |
66 | return 0; |
67 | return (1UL << (align_order - cma->order_per_bit)) - 1; |
68 | } |
69 | |
70 | /* |
71 | * Find the offset of the base PFN from the specified align_order. |
72 | * The value returned is represented in order_per_bits. |
73 | */ |
74 | static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, |
75 | unsigned int align_order) |
76 | { |
77 | return (cma->base_pfn & ((1UL << align_order) - 1)) |
78 | >> cma->order_per_bit; |
79 | } |
80 | |
81 | static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, |
82 | unsigned long pages) |
83 | { |
84 | return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; |
85 | } |
86 | |
87 | static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, |
88 | unsigned int count) |
89 | { |
90 | unsigned long bitmap_no, bitmap_count; |
91 | |
92 | bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; |
93 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
94 | |
95 | mutex_lock(&cma->lock); |
96 | bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); |
97 | mutex_unlock(&cma->lock); |
98 | } |
99 | |
100 | static int __init cma_activate_area(struct cma *cma) |
101 | { |
102 | int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); |
103 | unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; |
104 | unsigned i = cma->count >> pageblock_order; |
105 | struct zone *zone; |
106 | |
107 | cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
108 | |
109 | if (!cma->bitmap) |
110 | return -ENOMEM; |
111 | |
112 | WARN_ON_ONCE(!pfn_valid(pfn)); |
113 | zone = page_zone(pfn_to_page(pfn)); |
114 | |
115 | do { |
116 | unsigned j; |
117 | |
118 | base_pfn = pfn; |
119 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
120 | WARN_ON_ONCE(!pfn_valid(pfn)); |
121 | /* |
122 | * alloc_contig_range requires the pfn range |
123 | * specified to be in the same zone. Make this |
124 | * simple by forcing the entire CMA resv range |
125 | * to be in the same zone. |
126 | */ |
127 | if (page_zone(pfn_to_page(pfn)) != zone) |
128 | goto not_in_zone; |
129 | } |
130 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
131 | } while (--i); |
132 | |
133 | mutex_init(&cma->lock); |
134 | |
135 | #ifdef CONFIG_CMA_DEBUGFS |
136 | INIT_HLIST_HEAD(&cma->mem_head); |
137 | spin_lock_init(&cma->mem_head_lock); |
138 | #endif |
139 | |
140 | return 0; |
141 | |
142 | not_in_zone: |
143 | pr_err("CMA area %s could not be activated\n" , cma->name); |
144 | kfree(cma->bitmap); |
145 | cma->count = 0; |
146 | return -EINVAL; |
147 | } |
148 | |
149 | static int __init cma_init_reserved_areas(void) |
150 | { |
151 | int i; |
152 | |
153 | for (i = 0; i < cma_area_count; i++) { |
154 | int ret = cma_activate_area(&cma_areas[i]); |
155 | |
156 | if (ret) |
157 | return ret; |
158 | } |
159 | |
160 | return 0; |
161 | } |
162 | core_initcall(cma_init_reserved_areas); |
163 | |
164 | /** |
165 | * cma_init_reserved_mem() - create custom contiguous area from reserved memory |
166 | * @base: Base address of the reserved area |
167 | * @size: Size of the reserved area (in bytes), |
168 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
169 | * @name: The name of the area. If this parameter is NULL, the name of |
170 | * the area will be set to "cmaN", where N is a running counter of |
171 | * used areas. |
172 | * @res_cma: Pointer to store the created cma region. |
173 | * |
174 | * This function creates custom contiguous area from already reserved memory. |
175 | */ |
176 | int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
177 | unsigned int order_per_bit, |
178 | const char *name, |
179 | struct cma **res_cma) |
180 | { |
181 | struct cma *cma; |
182 | phys_addr_t alignment; |
183 | |
184 | /* Sanity checks */ |
185 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
186 | pr_err("Not enough slots for CMA reserved regions!\n" ); |
187 | return -ENOSPC; |
188 | } |
189 | |
190 | if (!size || !memblock_is_region_reserved(base, size)) |
191 | return -EINVAL; |
192 | |
193 | /* ensure minimal alignment required by mm core */ |
194 | alignment = PAGE_SIZE << |
195 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
196 | |
197 | /* alignment should be aligned with order_per_bit */ |
198 | if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) |
199 | return -EINVAL; |
200 | |
201 | if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) |
202 | return -EINVAL; |
203 | |
204 | /* |
205 | * Each reserved area must be initialised later, when more kernel |
206 | * subsystems (like slab allocator) are available. |
207 | */ |
208 | cma = &cma_areas[cma_area_count]; |
209 | if (name) { |
210 | cma->name = name; |
211 | } else { |
212 | cma->name = kasprintf(GFP_KERNEL, "cma%d\n" , cma_area_count); |
213 | if (!cma->name) |
214 | return -ENOMEM; |
215 | } |
216 | cma->base_pfn = PFN_DOWN(base); |
217 | cma->count = size >> PAGE_SHIFT; |
218 | cma->order_per_bit = order_per_bit; |
219 | *res_cma = cma; |
220 | cma_area_count++; |
221 | totalcma_pages += (size / PAGE_SIZE); |
222 | |
223 | return 0; |
224 | } |
225 | |
226 | /** |
227 | * cma_declare_contiguous() - reserve custom contiguous area |
228 | * @base: Base address of the reserved area optional, use 0 for any |
229 | * @size: Size of the reserved area (in bytes), |
230 | * @limit: End address of the reserved memory (optional, 0 for any). |
231 | * @alignment: Alignment for the CMA area, should be power of 2 or zero |
232 | * @order_per_bit: Order of pages represented by one bit on bitmap. |
233 | * @fixed: hint about where to place the reserved area |
234 | * @name: The name of the area. See function cma_init_reserved_mem() |
235 | * @res_cma: Pointer to store the created cma region. |
236 | * |
237 | * This function reserves memory from early allocator. It should be |
238 | * called by arch specific code once the early allocator (memblock or bootmem) |
239 | * has been activated and all other subsystems have already allocated/reserved |
240 | * memory. This function allows to create custom reserved areas. |
241 | * |
242 | * If @fixed is true, reserve contiguous area at exactly @base. If false, |
243 | * reserve in range from @base to @limit. |
244 | */ |
245 | int __init cma_declare_contiguous(phys_addr_t base, |
246 | phys_addr_t size, phys_addr_t limit, |
247 | phys_addr_t alignment, unsigned int order_per_bit, |
248 | bool fixed, const char *name, struct cma **res_cma) |
249 | { |
250 | phys_addr_t memblock_end = memblock_end_of_DRAM(); |
251 | phys_addr_t highmem_start; |
252 | int ret = 0; |
253 | |
254 | /* |
255 | * We can't use __pa(high_memory) directly, since high_memory |
256 | * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) |
257 | * complain. Find the boundary by adding one to the last valid |
258 | * address. |
259 | */ |
260 | highmem_start = __pa(high_memory - 1) + 1; |
261 | pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n" , |
262 | __func__, &size, &base, &limit, &alignment); |
263 | |
264 | if (cma_area_count == ARRAY_SIZE(cma_areas)) { |
265 | pr_err("Not enough slots for CMA reserved regions!\n" ); |
266 | return -ENOSPC; |
267 | } |
268 | |
269 | if (!size) |
270 | return -EINVAL; |
271 | |
272 | if (alignment && !is_power_of_2(alignment)) |
273 | return -EINVAL; |
274 | |
275 | /* |
276 | * Sanitise input arguments. |
277 | * Pages both ends in CMA area could be merged into adjacent unmovable |
278 | * migratetype page by page allocator's buddy algorithm. In the case, |
279 | * you couldn't get a contiguous memory, which is not what we want. |
280 | */ |
281 | alignment = max(alignment, (phys_addr_t)PAGE_SIZE << |
282 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); |
283 | base = ALIGN(base, alignment); |
284 | size = ALIGN(size, alignment); |
285 | limit &= ~(alignment - 1); |
286 | |
287 | if (!base) |
288 | fixed = false; |
289 | |
290 | /* size should be aligned with order_per_bit */ |
291 | if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) |
292 | return -EINVAL; |
293 | |
294 | /* |
295 | * If allocating at a fixed base the request region must not cross the |
296 | * low/high memory boundary. |
297 | */ |
298 | if (fixed && base < highmem_start && base + size > highmem_start) { |
299 | ret = -EINVAL; |
300 | pr_err("Region at %pa defined on low/high memory boundary (%pa)\n" , |
301 | &base, &highmem_start); |
302 | goto err; |
303 | } |
304 | |
305 | /* |
306 | * If the limit is unspecified or above the memblock end, its effective |
307 | * value will be the memblock end. Set it explicitly to simplify further |
308 | * checks. |
309 | */ |
310 | if (limit == 0 || limit > memblock_end) |
311 | limit = memblock_end; |
312 | |
313 | /* Reserve memory */ |
314 | if (fixed) { |
315 | if (memblock_is_region_reserved(base, size) || |
316 | memblock_reserve(base, size) < 0) { |
317 | ret = -EBUSY; |
318 | goto err; |
319 | } |
320 | } else { |
321 | phys_addr_t addr = 0; |
322 | |
323 | /* |
324 | * All pages in the reserved area must come from the same zone. |
325 | * If the requested region crosses the low/high memory boundary, |
326 | * try allocating from high memory first and fall back to low |
327 | * memory in case of failure. |
328 | */ |
329 | if (base < highmem_start && limit > highmem_start) { |
330 | addr = memblock_phys_alloc_range(size, alignment, |
331 | highmem_start, limit); |
332 | limit = highmem_start; |
333 | } |
334 | |
335 | if (!addr) { |
336 | addr = memblock_phys_alloc_range(size, alignment, base, |
337 | limit); |
338 | if (!addr) { |
339 | ret = -ENOMEM; |
340 | goto err; |
341 | } |
342 | } |
343 | |
344 | /* |
345 | * kmemleak scans/reads tracked objects for pointers to other |
346 | * objects but this address isn't mapped and accessible |
347 | */ |
348 | kmemleak_ignore_phys(addr); |
349 | base = addr; |
350 | } |
351 | |
352 | ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); |
353 | if (ret) |
354 | goto free_mem; |
355 | |
356 | pr_info("Reserved %ld MiB at %pa\n" , (unsigned long)size / SZ_1M, |
357 | &base); |
358 | return 0; |
359 | |
360 | free_mem: |
361 | memblock_free(base, size); |
362 | err: |
363 | pr_err("Failed to reserve %ld MiB\n" , (unsigned long)size / SZ_1M); |
364 | return ret; |
365 | } |
366 | |
367 | #ifdef CONFIG_CMA_DEBUG |
368 | static void cma_debug_show_areas(struct cma *cma) |
369 | { |
370 | unsigned long next_zero_bit, next_set_bit; |
371 | unsigned long start = 0; |
372 | unsigned int nr_zero, nr_total = 0; |
373 | |
374 | mutex_lock(&cma->lock); |
375 | pr_info("number of available pages: " ); |
376 | for (;;) { |
377 | next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); |
378 | if (next_zero_bit >= cma->count) |
379 | break; |
380 | next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); |
381 | nr_zero = next_set_bit - next_zero_bit; |
382 | pr_cont("%s%u@%lu" , nr_total ? "+" : "" , nr_zero, next_zero_bit); |
383 | nr_total += nr_zero; |
384 | start = next_zero_bit + nr_zero; |
385 | } |
386 | pr_cont("=> %u free of %lu total pages\n" , nr_total, cma->count); |
387 | mutex_unlock(&cma->lock); |
388 | } |
389 | #else |
390 | static inline void cma_debug_show_areas(struct cma *cma) { } |
391 | #endif |
392 | |
393 | /** |
394 | * cma_alloc() - allocate pages from contiguous area |
395 | * @cma: Contiguous memory region for which the allocation is performed. |
396 | * @count: Requested number of pages. |
397 | * @align: Requested alignment of pages (in PAGE_SIZE order). |
398 | * @no_warn: Avoid printing message about failed allocation |
399 | * |
400 | * This function allocates part of contiguous memory on specific |
401 | * contiguous memory area. |
402 | */ |
403 | struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, |
404 | bool no_warn) |
405 | { |
406 | unsigned long mask, offset; |
407 | unsigned long pfn = -1; |
408 | unsigned long start = 0; |
409 | unsigned long bitmap_maxno, bitmap_no, bitmap_count; |
410 | size_t i; |
411 | struct page *page = NULL; |
412 | int ret = -ENOMEM; |
413 | |
414 | if (!cma || !cma->count) |
415 | return NULL; |
416 | |
417 | pr_debug("%s(cma %p, count %zu, align %d)\n" , __func__, (void *)cma, |
418 | count, align); |
419 | |
420 | if (!count) |
421 | return NULL; |
422 | |
423 | mask = cma_bitmap_aligned_mask(cma, align); |
424 | offset = cma_bitmap_aligned_offset(cma, align); |
425 | bitmap_maxno = cma_bitmap_maxno(cma); |
426 | bitmap_count = cma_bitmap_pages_to_bits(cma, count); |
427 | |
428 | if (bitmap_count > bitmap_maxno) |
429 | return NULL; |
430 | |
431 | for (;;) { |
432 | mutex_lock(&cma->lock); |
433 | bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, |
434 | bitmap_maxno, start, bitmap_count, mask, |
435 | offset); |
436 | if (bitmap_no >= bitmap_maxno) { |
437 | mutex_unlock(&cma->lock); |
438 | break; |
439 | } |
440 | bitmap_set(cma->bitmap, bitmap_no, bitmap_count); |
441 | /* |
442 | * It's safe to drop the lock here. We've marked this region for |
443 | * our exclusive use. If the migration fails we will take the |
444 | * lock again and unmark it. |
445 | */ |
446 | mutex_unlock(&cma->lock); |
447 | |
448 | pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); |
449 | mutex_lock(&cma_mutex); |
450 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, |
451 | GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); |
452 | mutex_unlock(&cma_mutex); |
453 | if (ret == 0) { |
454 | page = pfn_to_page(pfn); |
455 | break; |
456 | } |
457 | |
458 | cma_clear_bitmap(cma, pfn, count); |
459 | if (ret != -EBUSY) |
460 | break; |
461 | |
462 | pr_debug("%s(): memory range at %p is busy, retrying\n" , |
463 | __func__, pfn_to_page(pfn)); |
464 | /* try again with a bit different memory target */ |
465 | start = bitmap_no + mask + 1; |
466 | } |
467 | |
468 | trace_cma_alloc(pfn, page, count, align); |
469 | |
470 | /* |
471 | * CMA can allocate multiple page blocks, which results in different |
472 | * blocks being marked with different tags. Reset the tags to ignore |
473 | * those page blocks. |
474 | */ |
475 | if (page) { |
476 | for (i = 0; i < count; i++) |
477 | page_kasan_tag_reset(page + i); |
478 | } |
479 | |
480 | if (ret && !no_warn) { |
481 | pr_err("%s: alloc failed, req-size: %zu pages, ret: %d\n" , |
482 | __func__, count, ret); |
483 | cma_debug_show_areas(cma); |
484 | } |
485 | |
486 | pr_debug("%s(): returned %p\n" , __func__, page); |
487 | return page; |
488 | } |
489 | |
490 | /** |
491 | * cma_release() - release allocated pages |
492 | * @cma: Contiguous memory region for which the allocation is performed. |
493 | * @pages: Allocated pages. |
494 | * @count: Number of allocated pages. |
495 | * |
496 | * This function releases memory allocated by alloc_cma(). |
497 | * It returns false when provided pages do not belong to contiguous area and |
498 | * true otherwise. |
499 | */ |
500 | bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) |
501 | { |
502 | unsigned long pfn; |
503 | |
504 | if (!cma || !pages) |
505 | return false; |
506 | |
507 | pr_debug("%s(page %p)\n" , __func__, (void *)pages); |
508 | |
509 | pfn = page_to_pfn(pages); |
510 | |
511 | if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) |
512 | return false; |
513 | |
514 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
515 | |
516 | free_contig_range(pfn, count); |
517 | cma_clear_bitmap(cma, pfn, count); |
518 | trace_cma_release(pfn, pages, count); |
519 | |
520 | return true; |
521 | } |
522 | |
523 | int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) |
524 | { |
525 | int i; |
526 | |
527 | for (i = 0; i < cma_area_count; i++) { |
528 | int ret = it(&cma_areas[i], data); |
529 | |
530 | if (ret) |
531 | return ret; |
532 | } |
533 | |
534 | return 0; |
535 | } |
536 | |