1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2015 Intel Corporation. All rights reserved. */ |
3 | #include <linux/device.h> |
4 | #include <linux/io.h> |
5 | #include <linux/kasan.h> |
6 | #include <linux/memory_hotplug.h> |
7 | #include <linux/memremap.h> |
8 | #include <linux/pfn_t.h> |
9 | #include <linux/swap.h> |
10 | #include <linux/mmzone.h> |
11 | #include <linux/swapops.h> |
12 | #include <linux/types.h> |
13 | #include <linux/wait_bit.h> |
14 | #include <linux/xarray.h> |
15 | #include "internal.h" |
16 | |
17 | static DEFINE_XARRAY(pgmap_array); |
18 | |
19 | /* |
20 | * The memremap() and memremap_pages() interfaces are alternately used |
21 | * to map persistent memory namespaces. These interfaces place different |
22 | * constraints on the alignment and size of the mapping (namespace). |
23 | * memremap() can map individual PAGE_SIZE pages. memremap_pages() can |
24 | * only map subsections (2MB), and at least one architecture (PowerPC) |
25 | * the minimum mapping granularity of memremap_pages() is 16MB. |
26 | * |
27 | * The role of memremap_compat_align() is to communicate the minimum |
28 | * arch supported alignment of a namespace such that it can freely |
29 | * switch modes without violating the arch constraint. Namely, do not |
30 | * allow a namespace to be PAGE_SIZE aligned since that namespace may be |
31 | * reconfigured into a mode that requires SUBSECTION_SIZE alignment. |
32 | */ |
33 | #ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN |
34 | unsigned long memremap_compat_align(void) |
35 | { |
36 | return SUBSECTION_SIZE; |
37 | } |
38 | EXPORT_SYMBOL_GPL(memremap_compat_align); |
39 | #endif |
40 | |
41 | #ifdef CONFIG_FS_DAX |
42 | DEFINE_STATIC_KEY_FALSE(devmap_managed_key); |
43 | EXPORT_SYMBOL(devmap_managed_key); |
44 | |
45 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
46 | { |
47 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
48 | static_branch_dec(&devmap_managed_key); |
49 | } |
50 | |
51 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
52 | { |
53 | if (pgmap->type == MEMORY_DEVICE_FS_DAX) |
54 | static_branch_inc(&devmap_managed_key); |
55 | } |
56 | #else |
57 | static void devmap_managed_enable_get(struct dev_pagemap *pgmap) |
58 | { |
59 | } |
60 | static void devmap_managed_enable_put(struct dev_pagemap *pgmap) |
61 | { |
62 | } |
63 | #endif /* CONFIG_FS_DAX */ |
64 | |
65 | static void pgmap_array_delete(struct range *range) |
66 | { |
67 | xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), |
68 | NULL, GFP_KERNEL); |
69 | synchronize_rcu(); |
70 | } |
71 | |
72 | static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id) |
73 | { |
74 | struct range *range = &pgmap->ranges[range_id]; |
75 | unsigned long pfn = PHYS_PFN(range->start); |
76 | |
77 | if (range_id) |
78 | return pfn; |
79 | return pfn + vmem_altmap_offset(altmap: pgmap_altmap(pgmap)); |
80 | } |
81 | |
82 | bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) |
83 | { |
84 | int i; |
85 | |
86 | for (i = 0; i < pgmap->nr_range; i++) { |
87 | struct range *range = &pgmap->ranges[i]; |
88 | |
89 | if (pfn >= PHYS_PFN(range->start) && |
90 | pfn <= PHYS_PFN(range->end)) |
91 | return pfn >= pfn_first(pgmap, range_id: i); |
92 | } |
93 | |
94 | return false; |
95 | } |
96 | |
97 | static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id) |
98 | { |
99 | const struct range *range = &pgmap->ranges[range_id]; |
100 | |
101 | return (range->start + range_len(range)) >> PAGE_SHIFT; |
102 | } |
103 | |
104 | static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id) |
105 | { |
106 | return (pfn_end(pgmap, range_id) - |
107 | pfn_first(pgmap, range_id)) >> pgmap->vmemmap_shift; |
108 | } |
109 | |
110 | static void pageunmap_range(struct dev_pagemap *pgmap, int range_id) |
111 | { |
112 | struct range *range = &pgmap->ranges[range_id]; |
113 | struct page *first_page; |
114 | |
115 | /* make sure to access a memmap that was actually initialized */ |
116 | first_page = pfn_to_page(pfn_first(pgmap, range_id)); |
117 | |
118 | /* pages are dead and unused, undo the arch mapping */ |
119 | mem_hotplug_begin(); |
120 | remove_pfn_range_from_zone(zone: page_zone(page: first_page), PHYS_PFN(range->start), |
121 | PHYS_PFN(range_len(range))); |
122 | if (pgmap->type == MEMORY_DEVICE_PRIVATE) { |
123 | __remove_pages(PHYS_PFN(range->start), |
124 | PHYS_PFN(range_len(range)), NULL); |
125 | } else { |
126 | arch_remove_memory(start: range->start, size: range_len(range), |
127 | altmap: pgmap_altmap(pgmap)); |
128 | kasan_remove_zero_shadow(__va(range->start), size: range_len(range)); |
129 | } |
130 | mem_hotplug_done(); |
131 | |
132 | untrack_pfn(NULL, PHYS_PFN(range->start), size: range_len(range), mm_wr_locked: true); |
133 | pgmap_array_delete(range); |
134 | } |
135 | |
136 | void memunmap_pages(struct dev_pagemap *pgmap) |
137 | { |
138 | int i; |
139 | |
140 | percpu_ref_kill(ref: &pgmap->ref); |
141 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
142 | pgmap->type != MEMORY_DEVICE_COHERENT) |
143 | for (i = 0; i < pgmap->nr_range; i++) |
144 | percpu_ref_put_many(ref: &pgmap->ref, nr: pfn_len(pgmap, range_id: i)); |
145 | |
146 | wait_for_completion(&pgmap->done); |
147 | |
148 | for (i = 0; i < pgmap->nr_range; i++) |
149 | pageunmap_range(pgmap, range_id: i); |
150 | percpu_ref_exit(ref: &pgmap->ref); |
151 | |
152 | WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n" ); |
153 | devmap_managed_enable_put(pgmap); |
154 | } |
155 | EXPORT_SYMBOL_GPL(memunmap_pages); |
156 | |
157 | static void devm_memremap_pages_release(void *data) |
158 | { |
159 | memunmap_pages(data); |
160 | } |
161 | |
162 | static void dev_pagemap_percpu_release(struct percpu_ref *ref) |
163 | { |
164 | struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref); |
165 | |
166 | complete(&pgmap->done); |
167 | } |
168 | |
169 | static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params, |
170 | int range_id, int nid) |
171 | { |
172 | const bool is_private = pgmap->type == MEMORY_DEVICE_PRIVATE; |
173 | struct range *range = &pgmap->ranges[range_id]; |
174 | struct dev_pagemap *conflict_pgmap; |
175 | int error, is_ram; |
176 | |
177 | if (WARN_ONCE(pgmap_altmap(pgmap) && range_id > 0, |
178 | "altmap not supported for multiple ranges\n" )) |
179 | return -EINVAL; |
180 | |
181 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL); |
182 | if (conflict_pgmap) { |
183 | WARN(1, "Conflicting mapping in same section\n" ); |
184 | put_dev_pagemap(pgmap: conflict_pgmap); |
185 | return -ENOMEM; |
186 | } |
187 | |
188 | conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL); |
189 | if (conflict_pgmap) { |
190 | WARN(1, "Conflicting mapping in same section\n" ); |
191 | put_dev_pagemap(pgmap: conflict_pgmap); |
192 | return -ENOMEM; |
193 | } |
194 | |
195 | is_ram = region_intersects(offset: range->start, size: range_len(range), |
196 | IORESOURCE_SYSTEM_RAM, desc: IORES_DESC_NONE); |
197 | |
198 | if (is_ram != REGION_DISJOINT) { |
199 | WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n" , |
200 | is_ram == REGION_MIXED ? "mixed" : "ram" , |
201 | range->start, range->end); |
202 | return -ENXIO; |
203 | } |
204 | |
205 | error = xa_err(entry: xa_store_range(&pgmap_array, PHYS_PFN(range->start), |
206 | PHYS_PFN(range->end), entry: pgmap, GFP_KERNEL)); |
207 | if (error) |
208 | return error; |
209 | |
210 | if (nid < 0) |
211 | nid = numa_mem_id(); |
212 | |
213 | error = track_pfn_remap(NULL, prot: ¶ms->pgprot, PHYS_PFN(range->start), addr: 0, |
214 | size: range_len(range)); |
215 | if (error) |
216 | goto err_pfn_remap; |
217 | |
218 | if (!mhp_range_allowed(start: range->start, size: range_len(range), need_mapping: !is_private)) { |
219 | error = -EINVAL; |
220 | goto err_kasan; |
221 | } |
222 | |
223 | mem_hotplug_begin(); |
224 | |
225 | /* |
226 | * For device private memory we call add_pages() as we only need to |
227 | * allocate and initialize struct page for the device memory. More- |
228 | * over the device memory is un-accessible thus we do not want to |
229 | * create a linear mapping for the memory like arch_add_memory() |
230 | * would do. |
231 | * |
232 | * For all other device memory types, which are accessible by |
233 | * the CPU, we do want the linear mapping and thus use |
234 | * arch_add_memory(). |
235 | */ |
236 | if (is_private) { |
237 | error = add_pages(nid, PHYS_PFN(range->start), |
238 | PHYS_PFN(range_len(range)), params); |
239 | } else { |
240 | error = kasan_add_zero_shadow(__va(range->start), size: range_len(range)); |
241 | if (error) { |
242 | mem_hotplug_done(); |
243 | goto err_kasan; |
244 | } |
245 | |
246 | error = arch_add_memory(nid, start: range->start, size: range_len(range), |
247 | params); |
248 | } |
249 | |
250 | if (!error) { |
251 | struct zone *zone; |
252 | |
253 | zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; |
254 | move_pfn_range_to_zone(zone, PHYS_PFN(range->start), |
255 | PHYS_PFN(range_len(range)), altmap: params->altmap, |
256 | migratetype: MIGRATE_MOVABLE); |
257 | } |
258 | |
259 | mem_hotplug_done(); |
260 | if (error) |
261 | goto err_add_memory; |
262 | |
263 | /* |
264 | * Initialization of the pages has been deferred until now in order |
265 | * to allow us to do the work while not holding the hotplug lock. |
266 | */ |
267 | memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], |
268 | PHYS_PFN(range->start), |
269 | PHYS_PFN(range_len(range)), pgmap); |
270 | if (pgmap->type != MEMORY_DEVICE_PRIVATE && |
271 | pgmap->type != MEMORY_DEVICE_COHERENT) |
272 | percpu_ref_get_many(ref: &pgmap->ref, nr: pfn_len(pgmap, range_id)); |
273 | return 0; |
274 | |
275 | err_add_memory: |
276 | if (!is_private) |
277 | kasan_remove_zero_shadow(__va(range->start), size: range_len(range)); |
278 | err_kasan: |
279 | untrack_pfn(NULL, PHYS_PFN(range->start), size: range_len(range), mm_wr_locked: true); |
280 | err_pfn_remap: |
281 | pgmap_array_delete(range); |
282 | return error; |
283 | } |
284 | |
285 | |
286 | /* |
287 | * Not device managed version of devm_memremap_pages, undone by |
288 | * memunmap_pages(). Please use devm_memremap_pages if you have a struct |
289 | * device available. |
290 | */ |
291 | void *memremap_pages(struct dev_pagemap *pgmap, int nid) |
292 | { |
293 | struct mhp_params params = { |
294 | .altmap = pgmap_altmap(pgmap), |
295 | .pgmap = pgmap, |
296 | .pgprot = PAGE_KERNEL, |
297 | }; |
298 | const int nr_range = pgmap->nr_range; |
299 | int error, i; |
300 | |
301 | if (WARN_ONCE(!nr_range, "nr_range must be specified\n" )) |
302 | return ERR_PTR(error: -EINVAL); |
303 | |
304 | switch (pgmap->type) { |
305 | case MEMORY_DEVICE_PRIVATE: |
306 | if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) { |
307 | WARN(1, "Device private memory not supported\n" ); |
308 | return ERR_PTR(error: -EINVAL); |
309 | } |
310 | if (!pgmap->ops || !pgmap->ops->migrate_to_ram) { |
311 | WARN(1, "Missing migrate_to_ram method\n" ); |
312 | return ERR_PTR(error: -EINVAL); |
313 | } |
314 | if (!pgmap->ops->page_free) { |
315 | WARN(1, "Missing page_free method\n" ); |
316 | return ERR_PTR(error: -EINVAL); |
317 | } |
318 | if (!pgmap->owner) { |
319 | WARN(1, "Missing owner\n" ); |
320 | return ERR_PTR(error: -EINVAL); |
321 | } |
322 | break; |
323 | case MEMORY_DEVICE_COHERENT: |
324 | if (!pgmap->ops->page_free) { |
325 | WARN(1, "Missing page_free method\n" ); |
326 | return ERR_PTR(error: -EINVAL); |
327 | } |
328 | if (!pgmap->owner) { |
329 | WARN(1, "Missing owner\n" ); |
330 | return ERR_PTR(error: -EINVAL); |
331 | } |
332 | break; |
333 | case MEMORY_DEVICE_FS_DAX: |
334 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) { |
335 | WARN(1, "File system DAX not supported\n" ); |
336 | return ERR_PTR(error: -EINVAL); |
337 | } |
338 | params.pgprot = pgprot_decrypted(params.pgprot); |
339 | break; |
340 | case MEMORY_DEVICE_GENERIC: |
341 | break; |
342 | case MEMORY_DEVICE_PCI_P2PDMA: |
343 | params.pgprot = pgprot_noncached(params.pgprot); |
344 | break; |
345 | default: |
346 | WARN(1, "Invalid pgmap type %d\n" , pgmap->type); |
347 | break; |
348 | } |
349 | |
350 | init_completion(x: &pgmap->done); |
351 | error = percpu_ref_init(ref: &pgmap->ref, release: dev_pagemap_percpu_release, flags: 0, |
352 | GFP_KERNEL); |
353 | if (error) |
354 | return ERR_PTR(error); |
355 | |
356 | devmap_managed_enable_get(pgmap); |
357 | |
358 | /* |
359 | * Clear the pgmap nr_range as it will be incremented for each |
360 | * successfully processed range. This communicates how many |
361 | * regions to unwind in the abort case. |
362 | */ |
363 | pgmap->nr_range = 0; |
364 | error = 0; |
365 | for (i = 0; i < nr_range; i++) { |
366 | error = pagemap_range(pgmap, params: ¶ms, range_id: i, nid); |
367 | if (error) |
368 | break; |
369 | pgmap->nr_range++; |
370 | } |
371 | |
372 | if (i < nr_range) { |
373 | memunmap_pages(pgmap); |
374 | pgmap->nr_range = nr_range; |
375 | return ERR_PTR(error); |
376 | } |
377 | |
378 | return __va(pgmap->ranges[0].start); |
379 | } |
380 | EXPORT_SYMBOL_GPL(memremap_pages); |
381 | |
382 | /** |
383 | * devm_memremap_pages - remap and provide memmap backing for the given resource |
384 | * @dev: hosting device for @res |
385 | * @pgmap: pointer to a struct dev_pagemap |
386 | * |
387 | * Notes: |
388 | * 1/ At a minimum the range and type members of @pgmap must be initialized |
389 | * by the caller before passing it to this function |
390 | * |
391 | * 2/ The altmap field may optionally be initialized, in which case |
392 | * PGMAP_ALTMAP_VALID must be set in pgmap->flags. |
393 | * |
394 | * 3/ The ref field may optionally be provided, in which pgmap->ref must be |
395 | * 'live' on entry and will be killed and reaped at |
396 | * devm_memremap_pages_release() time, or if this routine fails. |
397 | * |
398 | * 4/ range is expected to be a host memory range that could feasibly be |
399 | * treated as a "System RAM" range, i.e. not a device mmio range, but |
400 | * this is not enforced. |
401 | */ |
402 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) |
403 | { |
404 | int error; |
405 | void *ret; |
406 | |
407 | ret = memremap_pages(pgmap, dev_to_node(dev)); |
408 | if (IS_ERR(ptr: ret)) |
409 | return ret; |
410 | |
411 | error = devm_add_action_or_reset(dev, devm_memremap_pages_release, |
412 | pgmap); |
413 | if (error) |
414 | return ERR_PTR(error); |
415 | return ret; |
416 | } |
417 | EXPORT_SYMBOL_GPL(devm_memremap_pages); |
418 | |
419 | void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap) |
420 | { |
421 | devm_release_action(dev, action: devm_memremap_pages_release, data: pgmap); |
422 | } |
423 | EXPORT_SYMBOL_GPL(devm_memunmap_pages); |
424 | |
425 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) |
426 | { |
427 | /* number of pfns from base where pfn_to_page() is valid */ |
428 | if (altmap) |
429 | return altmap->reserve + altmap->free; |
430 | return 0; |
431 | } |
432 | |
433 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) |
434 | { |
435 | altmap->alloc -= nr_pfns; |
436 | } |
437 | |
438 | /** |
439 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn |
440 | * @pfn: page frame number to lookup page_map |
441 | * @pgmap: optional known pgmap that already has a reference |
442 | * |
443 | * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap |
444 | * is non-NULL but does not cover @pfn the reference to it will be released. |
445 | */ |
446 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
447 | struct dev_pagemap *pgmap) |
448 | { |
449 | resource_size_t phys = PFN_PHYS(pfn); |
450 | |
451 | /* |
452 | * In the cached case we're already holding a live reference. |
453 | */ |
454 | if (pgmap) { |
455 | if (phys >= pgmap->range.start && phys <= pgmap->range.end) |
456 | return pgmap; |
457 | put_dev_pagemap(pgmap); |
458 | } |
459 | |
460 | /* fall back to slow path lookup */ |
461 | rcu_read_lock(); |
462 | pgmap = xa_load(&pgmap_array, PHYS_PFN(phys)); |
463 | if (pgmap && !percpu_ref_tryget_live_rcu(ref: &pgmap->ref)) |
464 | pgmap = NULL; |
465 | rcu_read_unlock(); |
466 | |
467 | return pgmap; |
468 | } |
469 | EXPORT_SYMBOL_GPL(get_dev_pagemap); |
470 | |
471 | void free_zone_device_page(struct page *page) |
472 | { |
473 | if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free)) |
474 | return; |
475 | |
476 | mem_cgroup_uncharge(page_folio(page)); |
477 | |
478 | /* |
479 | * Note: we don't expect anonymous compound pages yet. Once supported |
480 | * and we could PTE-map them similar to THP, we'd have to clear |
481 | * PG_anon_exclusive on all tail pages. |
482 | */ |
483 | VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page); |
484 | if (PageAnon(page)) |
485 | __ClearPageAnonExclusive(page); |
486 | |
487 | /* |
488 | * When a device managed page is freed, the page->mapping field |
489 | * may still contain a (stale) mapping value. For example, the |
490 | * lower bits of page->mapping may still identify the page as an |
491 | * anonymous page. Ultimately, this entire field is just stale |
492 | * and wrong, and it will cause errors if not cleared. One |
493 | * example is: |
494 | * |
495 | * migrate_vma_pages() |
496 | * migrate_vma_insert_page() |
497 | * page_add_new_anon_rmap() |
498 | * __page_set_anon_rmap() |
499 | * ...checks page->mapping, via PageAnon(page) call, |
500 | * and incorrectly concludes that the page is an |
501 | * anonymous page. Therefore, it incorrectly, |
502 | * silently fails to set up the new anon rmap. |
503 | * |
504 | * For other types of ZONE_DEVICE pages, migration is either |
505 | * handled differently or not done at all, so there is no need |
506 | * to clear page->mapping. |
507 | */ |
508 | page->mapping = NULL; |
509 | page->pgmap->ops->page_free(page); |
510 | |
511 | if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && |
512 | page->pgmap->type != MEMORY_DEVICE_COHERENT) |
513 | /* |
514 | * Reset the page count to 1 to prepare for handing out the page |
515 | * again. |
516 | */ |
517 | set_page_count(page, v: 1); |
518 | else |
519 | put_dev_pagemap(pgmap: page->pgmap); |
520 | } |
521 | |
522 | void zone_device_page_init(struct page *page) |
523 | { |
524 | /* |
525 | * Drivers shouldn't be allocating pages after calling |
526 | * memunmap_pages(). |
527 | */ |
528 | WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); |
529 | set_page_count(page, v: 1); |
530 | lock_page(page); |
531 | } |
532 | EXPORT_SYMBOL_GPL(zone_device_page_init); |
533 | |
534 | #ifdef CONFIG_FS_DAX |
535 | bool __put_devmap_managed_page_refs(struct page *page, int refs) |
536 | { |
537 | if (page->pgmap->type != MEMORY_DEVICE_FS_DAX) |
538 | return false; |
539 | |
540 | /* |
541 | * fsdax page refcounts are 1-based, rather than 0-based: if |
542 | * refcount is 1, then the page is free and the refcount is |
543 | * stable because nobody holds a reference on the page. |
544 | */ |
545 | if (page_ref_sub_return(page, nr: refs) == 1) |
546 | wake_up_var(var: &page->_refcount); |
547 | return true; |
548 | } |
549 | EXPORT_SYMBOL(__put_devmap_managed_page_refs); |
550 | #endif /* CONFIG_FS_DAX */ |
551 | |