1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/percpu-km.c - kernel memory based chunk allocation
4 *
5 * Copyright (C) 2010 SUSE Linux Products GmbH
6 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
7 *
8 * Chunks are allocated as a contiguous kernel memory using gfp
9 * allocation. This is to be used on nommu architectures.
10 *
11 * To use percpu-km,
12 *
13 * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig.
14 *
15 * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's
16 * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work
17 * fine.
18 *
19 * - NUMA is not supported. When setting up the first chunk,
20 * @cpu_distance_fn should be NULL or report all CPUs to be nearer
21 * than or at LOCAL_DISTANCE.
22 *
23 * - It's best if the chunk size is power of two multiple of
24 * PAGE_SIZE. Because each chunk is allocated as a contiguous
25 * kernel memory block using alloc_pages(), memory will be wasted if
26 * chunk size is not aligned. percpu-km code will whine about it.
27 */
28
29#if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
30#error "contiguous percpu allocation is incompatible with paged first chunk"
31#endif
32
33#include <linux/log2.h>
34
35static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
36 int page_start, int page_end)
37{
38 /* nothing */
39}
40
41static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
42 int page_start, int page_end, gfp_t gfp)
43{
44 return 0;
45}
46
47static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
48 int page_start, int page_end)
49{
50 /* nada */
51}
52
53static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
54{
55 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
56 struct pcpu_chunk *chunk;
57 struct page *pages;
58 unsigned long flags;
59 int i;
60
61 chunk = pcpu_alloc_chunk(gfp);
62 if (!chunk)
63 return NULL;
64
65 pages = alloc_pages(gfp, order_base_2(nr_pages));
66 if (!pages) {
67 pcpu_free_chunk(chunk);
68 return NULL;
69 }
70
71 for (i = 0; i < nr_pages; i++)
72 pcpu_set_page_chunk(nth_page(pages, i), chunk);
73
74 chunk->data = pages;
75 chunk->base_addr = page_address(pages);
76
77 spin_lock_irqsave(&pcpu_lock, flags);
78 pcpu_chunk_populated(chunk, 0, nr_pages);
79 spin_unlock_irqrestore(&pcpu_lock, flags);
80
81 pcpu_stats_chunk_alloc();
82 trace_percpu_create_chunk(chunk->base_addr);
83
84 return chunk;
85}
86
87static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
88{
89 const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
90
91 if (!chunk)
92 return;
93
94 pcpu_stats_chunk_dealloc();
95 trace_percpu_destroy_chunk(chunk->base_addr);
96
97 if (chunk->data)
98 __free_pages(chunk->data, order_base_2(nr_pages));
99 pcpu_free_chunk(chunk);
100}
101
102static struct page *pcpu_addr_to_page(void *addr)
103{
104 return virt_to_page(addr);
105}
106
107static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
108{
109 size_t nr_pages, alloc_pages;
110
111 /* all units must be in a single group */
112 if (ai->nr_groups != 1) {
113 pr_crit("can't handle more than one group\n");
114 return -EINVAL;
115 }
116
117 nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT;
118 alloc_pages = roundup_pow_of_two(nr_pages);
119
120 if (alloc_pages > nr_pages)
121 pr_warn("wasting %zu pages per chunk\n",
122 alloc_pages - nr_pages);
123
124 return 0;
125}
126
127static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk)
128{
129 return false;
130}
131

source code of linux/mm/percpu-km.c