1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | #ifndef _LINUX_MEMBLOCK_H |
3 | #define _LINUX_MEMBLOCK_H |
4 | |
5 | /* |
6 | * Logical memory blocks. |
7 | * |
8 | * Copyright (C) 2001 Peter Bergner, IBM Corp. |
9 | */ |
10 | |
11 | #include <linux/init.h> |
12 | #include <linux/mm.h> |
13 | #include <asm/dma.h> |
14 | |
15 | extern unsigned long max_low_pfn; |
16 | extern unsigned long min_low_pfn; |
17 | |
18 | /* |
19 | * highest page |
20 | */ |
21 | extern unsigned long max_pfn; |
22 | /* |
23 | * highest possible page |
24 | */ |
25 | extern unsigned long long max_possible_pfn; |
26 | |
27 | /** |
28 | * enum memblock_flags - definition of memory region attributes |
29 | * @MEMBLOCK_NONE: no special request |
30 | * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory |
31 | * map during early boot as hot(un)pluggable system RAM (e.g., memory range |
32 | * that might get hotunplugged later). With "movable_node" set on the kernel |
33 | * commandline, try keeping this memory region hotunpluggable. Does not apply |
34 | * to memblocks added ("hotplugged") after early boot. |
35 | * @MEMBLOCK_MIRROR: mirrored region |
36 | * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as |
37 | * reserved in the memory map; refer to memblock_mark_nomap() description |
38 | * for further details |
39 | * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added |
40 | * via a driver, and never indicated in the firmware-provided memory map as |
41 | * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the |
42 | * kernel resource tree. |
43 | * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are |
44 | * not initialized (only for reserved regions). |
45 | */ |
46 | enum memblock_flags { |
47 | MEMBLOCK_NONE = 0x0, /* No special request */ |
48 | MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ |
49 | MEMBLOCK_MIRROR = 0x2, /* mirrored region */ |
50 | MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ |
51 | MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */ |
52 | MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */ |
53 | }; |
54 | |
55 | /** |
56 | * struct memblock_region - represents a memory region |
57 | * @base: base address of the region |
58 | * @size: size of the region |
59 | * @flags: memory region attributes |
60 | * @nid: NUMA node id |
61 | */ |
62 | struct memblock_region { |
63 | phys_addr_t base; |
64 | phys_addr_t size; |
65 | enum memblock_flags flags; |
66 | #ifdef CONFIG_NUMA |
67 | int nid; |
68 | #endif |
69 | }; |
70 | |
71 | /** |
72 | * struct memblock_type - collection of memory regions of certain type |
73 | * @cnt: number of regions |
74 | * @max: size of the allocated array |
75 | * @total_size: size of all regions |
76 | * @regions: array of regions |
77 | * @name: the memory type symbolic name |
78 | */ |
79 | struct memblock_type { |
80 | unsigned long cnt; |
81 | unsigned long max; |
82 | phys_addr_t total_size; |
83 | struct memblock_region *regions; |
84 | char *name; |
85 | }; |
86 | |
87 | /** |
88 | * struct memblock - memblock allocator metadata |
89 | * @bottom_up: is bottom up direction? |
90 | * @current_limit: physical address of the current allocation limit |
91 | * @memory: usable memory regions |
92 | * @reserved: reserved memory regions |
93 | */ |
94 | struct memblock { |
95 | bool bottom_up; /* is bottom up direction? */ |
96 | phys_addr_t current_limit; |
97 | struct memblock_type memory; |
98 | struct memblock_type reserved; |
99 | }; |
100 | |
101 | extern struct memblock memblock; |
102 | |
103 | #ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
104 | #define __init_memblock __meminit |
105 | #define __initdata_memblock __meminitdata |
106 | void memblock_discard(void); |
107 | #else |
108 | #define __init_memblock |
109 | #define __initdata_memblock |
110 | static inline void memblock_discard(void) {} |
111 | #endif |
112 | |
113 | void memblock_allow_resize(void); |
114 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, |
115 | enum memblock_flags flags); |
116 | int memblock_add(phys_addr_t base, phys_addr_t size); |
117 | int memblock_remove(phys_addr_t base, phys_addr_t size); |
118 | int memblock_phys_free(phys_addr_t base, phys_addr_t size); |
119 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
120 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
121 | int memblock_physmem_add(phys_addr_t base, phys_addr_t size); |
122 | #endif |
123 | void memblock_trim_memory(phys_addr_t align); |
124 | bool memblock_overlaps_region(struct memblock_type *type, |
125 | phys_addr_t base, phys_addr_t size); |
126 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
127 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); |
128 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
129 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); |
130 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); |
131 | int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size); |
132 | |
133 | void memblock_free_all(void); |
134 | void memblock_free(void *ptr, size_t size); |
135 | void reset_all_zones_managed_pages(void); |
136 | |
137 | /* Low level functions */ |
138 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
139 | struct memblock_type *type_a, |
140 | struct memblock_type *type_b, phys_addr_t *out_start, |
141 | phys_addr_t *out_end, int *out_nid); |
142 | |
143 | void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, |
144 | struct memblock_type *type_a, |
145 | struct memblock_type *type_b, phys_addr_t *out_start, |
146 | phys_addr_t *out_end, int *out_nid); |
147 | |
148 | void memblock_free_late(phys_addr_t base, phys_addr_t size); |
149 | |
150 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
151 | static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, |
152 | phys_addr_t *out_start, |
153 | phys_addr_t *out_end) |
154 | { |
155 | extern struct memblock_type physmem; |
156 | |
157 | __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, |
158 | out_start, out_end, NULL); |
159 | } |
160 | |
161 | /** |
162 | * for_each_physmem_range - iterate through physmem areas not included in type. |
163 | * @i: u64 used as loop variable |
164 | * @type: ptr to memblock_type which excludes from the iteration, can be %NULL |
165 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
166 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
167 | */ |
168 | #define for_each_physmem_range(i, type, p_start, p_end) \ |
169 | for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ |
170 | i != (u64)ULLONG_MAX; \ |
171 | __next_physmem_range(&i, type, p_start, p_end)) |
172 | #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ |
173 | |
174 | /** |
175 | * __for_each_mem_range - iterate through memblock areas from type_a and not |
176 | * included in type_b. Or just type_a if type_b is NULL. |
177 | * @i: u64 used as loop variable |
178 | * @type_a: ptr to memblock_type to iterate |
179 | * @type_b: ptr to memblock_type which excludes from the iteration |
180 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
181 | * @flags: pick from blocks based on memory attributes |
182 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
183 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
184 | * @p_nid: ptr to int for nid of the range, can be %NULL |
185 | */ |
186 | #define __for_each_mem_range(i, type_a, type_b, nid, flags, \ |
187 | p_start, p_end, p_nid) \ |
188 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
189 | p_start, p_end, p_nid); \ |
190 | i != (u64)ULLONG_MAX; \ |
191 | __next_mem_range(&i, nid, flags, type_a, type_b, \ |
192 | p_start, p_end, p_nid)) |
193 | |
194 | /** |
195 | * __for_each_mem_range_rev - reverse iterate through memblock areas from |
196 | * type_a and not included in type_b. Or just type_a if type_b is NULL. |
197 | * @i: u64 used as loop variable |
198 | * @type_a: ptr to memblock_type to iterate |
199 | * @type_b: ptr to memblock_type which excludes from the iteration |
200 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
201 | * @flags: pick from blocks based on memory attributes |
202 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
203 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
204 | * @p_nid: ptr to int for nid of the range, can be %NULL |
205 | */ |
206 | #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
207 | p_start, p_end, p_nid) \ |
208 | for (i = (u64)ULLONG_MAX, \ |
209 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
210 | p_start, p_end, p_nid); \ |
211 | i != (u64)ULLONG_MAX; \ |
212 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
213 | p_start, p_end, p_nid)) |
214 | |
215 | /** |
216 | * for_each_mem_range - iterate through memory areas. |
217 | * @i: u64 used as loop variable |
218 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
219 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
220 | */ |
221 | #define for_each_mem_range(i, p_start, p_end) \ |
222 | __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ |
223 | MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \ |
224 | p_start, p_end, NULL) |
225 | |
226 | /** |
227 | * for_each_mem_range_rev - reverse iterate through memblock areas from |
228 | * type_a and not included in type_b. Or just type_a if type_b is NULL. |
229 | * @i: u64 used as loop variable |
230 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
231 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
232 | */ |
233 | #define for_each_mem_range_rev(i, p_start, p_end) \ |
234 | __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ |
235 | MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\ |
236 | p_start, p_end, NULL) |
237 | |
238 | /** |
239 | * for_each_reserved_mem_range - iterate over all reserved memblock areas |
240 | * @i: u64 used as loop variable |
241 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
242 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
243 | * |
244 | * Walks over reserved areas of memblock. Available as soon as memblock |
245 | * is initialized. |
246 | */ |
247 | #define for_each_reserved_mem_range(i, p_start, p_end) \ |
248 | __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ |
249 | MEMBLOCK_NONE, p_start, p_end, NULL) |
250 | |
251 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
252 | { |
253 | return m->flags & MEMBLOCK_HOTPLUG; |
254 | } |
255 | |
256 | static inline bool memblock_is_mirror(struct memblock_region *m) |
257 | { |
258 | return m->flags & MEMBLOCK_MIRROR; |
259 | } |
260 | |
261 | static inline bool memblock_is_nomap(struct memblock_region *m) |
262 | { |
263 | return m->flags & MEMBLOCK_NOMAP; |
264 | } |
265 | |
266 | static inline bool memblock_is_reserved_noinit(struct memblock_region *m) |
267 | { |
268 | return m->flags & MEMBLOCK_RSRV_NOINIT; |
269 | } |
270 | |
271 | static inline bool memblock_is_driver_managed(struct memblock_region *m) |
272 | { |
273 | return m->flags & MEMBLOCK_DRIVER_MANAGED; |
274 | } |
275 | |
276 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
277 | unsigned long *end_pfn); |
278 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
279 | unsigned long *out_end_pfn, int *out_nid); |
280 | |
281 | /** |
282 | * for_each_mem_pfn_range - early memory pfn range iterator |
283 | * @i: an integer used as loop variable |
284 | * @nid: node selector, %MAX_NUMNODES for all nodes |
285 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL |
286 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL |
287 | * @p_nid: ptr to int for nid of the range, can be %NULL |
288 | * |
289 | * Walks over configured memory ranges. |
290 | */ |
291 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ |
292 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ |
293 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) |
294 | |
295 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
296 | void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, |
297 | unsigned long *out_spfn, |
298 | unsigned long *out_epfn); |
299 | /** |
300 | * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free |
301 | * memblock areas |
302 | * @i: u64 used as loop variable |
303 | * @zone: zone in which all of the memory blocks reside |
304 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
305 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
306 | * |
307 | * Walks over free (memory && !reserved) areas of memblock in a specific |
308 | * zone. Available once memblock and an empty zone is initialized. The main |
309 | * assumption is that the zone start, end, and pgdat have been associated. |
310 | * This way we can use the zone to determine NUMA node, and if a given part |
311 | * of the memblock is valid for the zone. |
312 | */ |
313 | #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ |
314 | for (i = 0, \ |
315 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ |
316 | i != U64_MAX; \ |
317 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) |
318 | |
319 | /** |
320 | * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific |
321 | * free memblock areas from a given point |
322 | * @i: u64 used as loop variable |
323 | * @zone: zone in which all of the memory blocks reside |
324 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
325 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
326 | * |
327 | * Walks over free (memory && !reserved) areas of memblock in a specific |
328 | * zone, continuing from current position. Available as soon as memblock is |
329 | * initialized. |
330 | */ |
331 | #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ |
332 | for (; i != U64_MAX; \ |
333 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) |
334 | |
335 | int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); |
336 | |
337 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
338 | |
339 | /** |
340 | * for_each_free_mem_range - iterate through free memblock areas |
341 | * @i: u64 used as loop variable |
342 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
343 | * @flags: pick from blocks based on memory attributes |
344 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
345 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
346 | * @p_nid: ptr to int for nid of the range, can be %NULL |
347 | * |
348 | * Walks over free (memory && !reserved) areas of memblock. Available as |
349 | * soon as memblock is initialized. |
350 | */ |
351 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
352 | __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
353 | nid, flags, p_start, p_end, p_nid) |
354 | |
355 | /** |
356 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
357 | * @i: u64 used as loop variable |
358 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
359 | * @flags: pick from blocks based on memory attributes |
360 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
361 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
362 | * @p_nid: ptr to int for nid of the range, can be %NULL |
363 | * |
364 | * Walks over free (memory && !reserved) areas of memblock in reverse |
365 | * order. Available as soon as memblock is initialized. |
366 | */ |
367 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
368 | p_nid) \ |
369 | __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
370 | nid, flags, p_start, p_end, p_nid) |
371 | |
372 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
373 | struct memblock_type *type, int nid); |
374 | |
375 | #ifdef CONFIG_NUMA |
376 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) |
377 | { |
378 | r->nid = nid; |
379 | } |
380 | |
381 | static inline int memblock_get_region_node(const struct memblock_region *r) |
382 | { |
383 | return r->nid; |
384 | } |
385 | #else |
386 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) |
387 | { |
388 | } |
389 | |
390 | static inline int memblock_get_region_node(const struct memblock_region *r) |
391 | { |
392 | return 0; |
393 | } |
394 | #endif /* CONFIG_NUMA */ |
395 | |
396 | /* Flags for memblock allocation APIs */ |
397 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
398 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
399 | #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 |
400 | |
401 | /* We are using top down, so it is safe to use 0 here */ |
402 | #define MEMBLOCK_LOW_LIMIT 0 |
403 | |
404 | #ifndef ARCH_LOW_ADDRESS_LIMIT |
405 | #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL |
406 | #endif |
407 | |
408 | phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, |
409 | phys_addr_t start, phys_addr_t end); |
410 | phys_addr_t memblock_alloc_range_nid(phys_addr_t size, |
411 | phys_addr_t align, phys_addr_t start, |
412 | phys_addr_t end, int nid, bool exact_nid); |
413 | phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
414 | |
415 | static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, |
416 | phys_addr_t align) |
417 | { |
418 | return memblock_phys_alloc_range(size, align, start: 0, |
419 | MEMBLOCK_ALLOC_ACCESSIBLE); |
420 | } |
421 | |
422 | void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, |
423 | phys_addr_t min_addr, phys_addr_t max_addr, |
424 | int nid); |
425 | void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, |
426 | phys_addr_t min_addr, phys_addr_t max_addr, |
427 | int nid); |
428 | void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
429 | phys_addr_t min_addr, phys_addr_t max_addr, |
430 | int nid); |
431 | |
432 | static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) |
433 | { |
434 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
435 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
436 | } |
437 | |
438 | static inline void *memblock_alloc_raw(phys_addr_t size, |
439 | phys_addr_t align) |
440 | { |
441 | return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, |
442 | MEMBLOCK_ALLOC_ACCESSIBLE, |
443 | NUMA_NO_NODE); |
444 | } |
445 | |
446 | static inline void *memblock_alloc_from(phys_addr_t size, |
447 | phys_addr_t align, |
448 | phys_addr_t min_addr) |
449 | { |
450 | return memblock_alloc_try_nid(size, align, min_addr, |
451 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
452 | } |
453 | |
454 | static inline void *memblock_alloc_low(phys_addr_t size, |
455 | phys_addr_t align) |
456 | { |
457 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
458 | ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); |
459 | } |
460 | |
461 | static inline void *memblock_alloc_node(phys_addr_t size, |
462 | phys_addr_t align, int nid) |
463 | { |
464 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
465 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
466 | } |
467 | |
468 | /* |
469 | * Set the allocation direction to bottom-up or top-down. |
470 | */ |
471 | static inline __init_memblock void memblock_set_bottom_up(bool enable) |
472 | { |
473 | memblock.bottom_up = enable; |
474 | } |
475 | |
476 | /* |
477 | * Check if the allocation direction is bottom-up or not. |
478 | * if this is true, that said, memblock will allocate memory |
479 | * in bottom-up direction. |
480 | */ |
481 | static inline __init_memblock bool memblock_bottom_up(void) |
482 | { |
483 | return memblock.bottom_up; |
484 | } |
485 | |
486 | phys_addr_t memblock_phys_mem_size(void); |
487 | phys_addr_t memblock_reserved_size(void); |
488 | phys_addr_t memblock_start_of_DRAM(void); |
489 | phys_addr_t memblock_end_of_DRAM(void); |
490 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); |
491 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); |
492 | void memblock_mem_limit_remove_map(phys_addr_t limit); |
493 | bool memblock_is_memory(phys_addr_t addr); |
494 | bool memblock_is_map_memory(phys_addr_t addr); |
495 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
496 | bool memblock_is_reserved(phys_addr_t addr); |
497 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
498 | |
499 | void memblock_dump_all(void); |
500 | |
501 | /** |
502 | * memblock_set_current_limit - Set the current allocation limit to allow |
503 | * limiting allocations to what is currently |
504 | * accessible during boot |
505 | * @limit: New limit value (physical address) |
506 | */ |
507 | void memblock_set_current_limit(phys_addr_t limit); |
508 | |
509 | |
510 | phys_addr_t memblock_get_current_limit(void); |
511 | |
512 | /* |
513 | * pfn conversion functions |
514 | * |
515 | * While the memory MEMBLOCKs should always be page aligned, the reserved |
516 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear |
517 | * idea of what they return for such non aligned MEMBLOCKs. |
518 | */ |
519 | |
520 | /** |
521 | * memblock_region_memory_base_pfn - get the lowest pfn of the memory region |
522 | * @reg: memblock_region structure |
523 | * |
524 | * Return: the lowest pfn intersecting with the memory region |
525 | */ |
526 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) |
527 | { |
528 | return PFN_UP(reg->base); |
529 | } |
530 | |
531 | /** |
532 | * memblock_region_memory_end_pfn - get the end pfn of the memory region |
533 | * @reg: memblock_region structure |
534 | * |
535 | * Return: the end_pfn of the reserved region |
536 | */ |
537 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) |
538 | { |
539 | return PFN_DOWN(reg->base + reg->size); |
540 | } |
541 | |
542 | /** |
543 | * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region |
544 | * @reg: memblock_region structure |
545 | * |
546 | * Return: the lowest pfn intersecting with the reserved region |
547 | */ |
548 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) |
549 | { |
550 | return PFN_DOWN(reg->base); |
551 | } |
552 | |
553 | /** |
554 | * memblock_region_reserved_end_pfn - get the end pfn of the reserved region |
555 | * @reg: memblock_region structure |
556 | * |
557 | * Return: the end_pfn of the reserved region |
558 | */ |
559 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) |
560 | { |
561 | return PFN_UP(reg->base + reg->size); |
562 | } |
563 | |
564 | /** |
565 | * for_each_mem_region - itereate over memory regions |
566 | * @region: loop variable |
567 | */ |
568 | #define for_each_mem_region(region) \ |
569 | for (region = memblock.memory.regions; \ |
570 | region < (memblock.memory.regions + memblock.memory.cnt); \ |
571 | region++) |
572 | |
573 | /** |
574 | * for_each_reserved_mem_region - itereate over reserved memory regions |
575 | * @region: loop variable |
576 | */ |
577 | #define for_each_reserved_mem_region(region) \ |
578 | for (region = memblock.reserved.regions; \ |
579 | region < (memblock.reserved.regions + memblock.reserved.cnt); \ |
580 | region++) |
581 | |
582 | extern void *alloc_large_system_hash(const char *tablename, |
583 | unsigned long bucketsize, |
584 | unsigned long numentries, |
585 | int scale, |
586 | int flags, |
587 | unsigned int *_hash_shift, |
588 | unsigned int *_hash_mask, |
589 | unsigned long low_limit, |
590 | unsigned long high_limit); |
591 | |
592 | #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ |
593 | #define HASH_ZERO 0x00000002 /* Zero allocated hash table */ |
594 | |
595 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have |
596 | * sufficient vmalloc space. |
597 | */ |
598 | #ifdef CONFIG_NUMA |
599 | #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) |
600 | extern int hashdist; /* Distribute hashes across NUMA nodes? */ |
601 | #else |
602 | #define hashdist (0) |
603 | #endif |
604 | |
605 | #ifdef CONFIG_MEMTEST |
606 | void early_memtest(phys_addr_t start, phys_addr_t end); |
607 | void memtest_report_meminfo(struct seq_file *m); |
608 | #else |
609 | static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } |
610 | static inline void memtest_report_meminfo(struct seq_file *m) { } |
611 | #endif |
612 | |
613 | |
614 | #endif /* _LINUX_MEMBLOCK_H */ |
615 | |