1/* Repeating a memory blob, with alias mapping optimization.
2 Copyright (C) 2018-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19#include <errno.h>
20#include <fcntl.h>
21#include <stdbool.h>
22#include <stdlib.h>
23#include <string.h>
24#include <support/blob_repeat.h>
25#include <support/check.h>
26#include <support/test-driver.h>
27#include <support/support.h>
28#include <support/xunistd.h>
29#include <sys/mman.h>
30#include <unistd.h>
31#include <wchar.h>
32
33/* Small allocations should use malloc directly instead of the mmap
34 optimization because mappings carry a lot of overhead. */
35static const size_t maximum_small_size = 4 * 1024 * 1024;
36
37/* Internal helper for fill. */
38static void
39fill0 (char *target, const char *element, size_t element_size,
40 size_t count)
41{
42 while (count > 0)
43 {
44 memcpy (dest: target, src: element, n: element_size);
45 target += element_size;
46 --count;
47 }
48}
49
50/* Fill the buffer at TARGET with COUNT copies of the ELEMENT_SIZE
51 bytes starting at ELEMENT. */
52static void
53fill (char *target, const char *element, size_t element_size,
54 size_t count)
55{
56 if (element_size == 0 || count == 0)
57 return;
58 else if (element_size == 1)
59 memset (s: target, c: element[0], n: count);
60 else if (element_size == sizeof (wchar_t))
61 {
62 wchar_t wc;
63 memcpy (dest: &wc, src: element, n: sizeof (wc));
64 wmemset (s: (wchar_t *) target, c: wc, n: count);
65 }
66 else if (element_size < 1024 && count > 4096)
67 {
68 /* Use larger copies for really small element sizes. */
69 char buffer[8192];
70 size_t buffer_count = sizeof (buffer) / element_size;
71 fill0 (target: buffer, element, element_size, count: buffer_count);
72 while (count > 0)
73 {
74 size_t copy_count = buffer_count;
75 if (copy_count > count)
76 copy_count = count;
77 size_t copy_bytes = copy_count * element_size;
78 memcpy (dest: target, src: buffer, n: copy_bytes);
79 target += copy_bytes;
80 count -= copy_count;
81 }
82 }
83 else
84 fill0 (target, element, element_size, count);
85}
86
87/* Use malloc instead of mmap for small allocations and unusual size
88 combinations. */
89static struct support_blob_repeat
90allocate_malloc (size_t total_size, const void *element, size_t element_size,
91 size_t count)
92{
93 void *buffer = malloc (size: total_size);
94 if (buffer == NULL)
95 return (struct support_blob_repeat) { 0 };
96 fill (buffer, element, element_size, count);
97 return (struct support_blob_repeat)
98 {
99 .start = buffer,
100 .size = total_size,
101 .use_malloc = true
102 };
103}
104
105/* Return the least common multiple of PAGE_SIZE and ELEMENT_SIZE,
106 avoiding overflow. This assumes that PAGE_SIZE is a power of
107 two. */
108static size_t
109minimum_stride_size (size_t page_size, size_t element_size)
110{
111 TEST_VERIFY_EXIT (page_size > 0);
112 TEST_VERIFY_EXIT (element_size > 0);
113
114 /* Compute the number of trailing zeros common to both sizes. */
115 unsigned int common_zeros = __builtin_ctzll (page_size | element_size);
116
117 /* In the product, this power of two appears twice, but in the least
118 common multiple, it appears only once. Therefore, shift one
119 factor. */
120 size_t multiple;
121 if (__builtin_mul_overflow (page_size >> common_zeros, element_size,
122 &multiple))
123 return 0;
124 return multiple;
125}
126
127/* Allocations larger than maximum_small_size potentially use mmap
128 with alias mappings. If SHARED, the alias mappings are created
129 using MAP_SHARED instead of MAP_PRIVATE. */
130static struct support_blob_repeat
131allocate_big (size_t total_size, const void *element, size_t element_size,
132 size_t count, bool shared)
133{
134 unsigned long page_size = xsysconf (_SC_PAGESIZE);
135 size_t stride_size = minimum_stride_size (page_size, element_size);
136 if (stride_size == 0)
137 {
138 errno = EOVERFLOW;
139 return (struct support_blob_repeat) { 0 };
140 }
141
142 /* Ensure that the stride size is at least maximum_small_size. This
143 is necessary to reduce the number of distinct mappings. */
144 if (stride_size < maximum_small_size)
145 stride_size
146 = ((maximum_small_size + stride_size - 1) / stride_size) * stride_size;
147
148 if (stride_size > total_size)
149 /* The mmap optimization would not save anything. */
150 return allocate_malloc (total_size, element, element_size, count);
151
152 /* Reserve the memory region. If we cannot create the mapping,
153 there is no reason to set up the backing file. */
154 void *target = mmap (NULL, total_size, PROT_NONE,
155 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
156 if (target == MAP_FAILED)
157 return (struct support_blob_repeat) { 0 };
158
159 /* Create the backing file for the repeated mapping. Call mkstemp
160 directly to remove the resources backing the temporary file
161 immediately, once support_blob_repeat_free is called. Using
162 create_temp_file would result in a warning during post-test
163 cleanup. */
164 int fd;
165 {
166 char *temppath = xasprintf ("%s/support_blob_repeat-XXXXXX", test_dir);
167 fd = mkstemp (temppath);
168 if (fd < 0)
169 FAIL_EXIT1 ("mkstemp (\"%s\"): %m", temppath);
170 xunlink (temppath);
171 free (temppath);
172 }
173
174 /* Make sure that there is backing storage, so that the fill
175 operation will not fault. */
176 if (posix_fallocate (fd, 0, stride_size) != 0)
177 FAIL_EXIT1 ("posix_fallocate (%zu): %m", stride_size);
178
179 /* The stride size must still be a multiple of the page size and
180 element size. */
181 TEST_VERIFY_EXIT ((stride_size % page_size) == 0);
182 TEST_VERIFY_EXIT ((stride_size % element_size) == 0);
183
184 /* Fill the backing store. */
185 {
186 void *ptr = mmap (target, stride_size, PROT_READ | PROT_WRITE,
187 MAP_FIXED | MAP_FILE | MAP_SHARED, fd, 0);
188 if (ptr == MAP_FAILED)
189 {
190 int saved_errno = errno;
191 xmunmap (target, total_size);
192 xclose (fd);
193 errno = saved_errno;
194 return (struct support_blob_repeat) { 0 };
195 }
196 if (ptr != target)
197 FAIL_EXIT1 ("mapping of %zu bytes moved from %p to %p",
198 stride_size, target, ptr);
199
200 /* Write the repeating data. */
201 fill (target, element, element_size, stride_size / element_size);
202
203 /* Return to a PROT_NONE mapping, just to be on the safe side. */
204 ptr = mmap (target, stride_size, PROT_NONE,
205 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
206 if (ptr == MAP_FAILED)
207 FAIL_EXIT1 ("Failed to reinstate PROT_NONE mapping: %m");
208 if (ptr != target)
209 FAIL_EXIT1 ("PROT_NONE mapping of %zu bytes moved from %p to %p",
210 stride_size, target, ptr);
211 }
212
213 /* Create the alias mappings. */
214 {
215 size_t remaining_size = total_size;
216 char *current = target;
217 int flags = MAP_FIXED | MAP_FILE;
218 if (shared)
219 flags |= MAP_SHARED;
220 else
221 flags |= MAP_PRIVATE;
222#ifdef MAP_NORESERVE
223 flags |= MAP_NORESERVE;
224#endif
225 while (remaining_size > 0)
226 {
227 size_t to_map = stride_size;
228 if (to_map > remaining_size)
229 to_map = remaining_size;
230 void *ptr = mmap (current, to_map, PROT_READ | PROT_WRITE,
231 flags, fd, 0);
232 if (ptr == MAP_FAILED)
233 {
234 int saved_errno = errno;
235 xmunmap (target, total_size);
236 xclose (fd);
237 errno = saved_errno;
238 return (struct support_blob_repeat) { 0 };
239 }
240 if (ptr != current)
241 FAIL_EXIT1 ("MAP_PRIVATE mapping of %zu bytes moved from %p to %p",
242 to_map, target, ptr);
243 remaining_size -= to_map;
244 current += to_map;
245 }
246 }
247
248 xclose (fd);
249
250 return (struct support_blob_repeat)
251 {
252 .start = target,
253 .size = total_size,
254 .use_malloc = false
255 };
256}
257
258struct support_blob_repeat
259repeat_allocate (const void *element, size_t element_size,
260 size_t count, bool shared)
261{
262 size_t total_size;
263 if (__builtin_mul_overflow (element_size, count, &total_size))
264 {
265 errno = EOVERFLOW;
266 return (struct support_blob_repeat) { 0 };
267 }
268 if (total_size <= maximum_small_size)
269 return allocate_malloc (total_size, element, element_size, count);
270 else
271 return allocate_big (total_size, element, element_size, count, shared);
272}
273
274struct support_blob_repeat
275support_blob_repeat_allocate (const void *element, size_t element_size,
276 size_t count)
277{
278 return repeat_allocate (element, element_size, count, false);
279}
280
281struct support_blob_repeat
282support_blob_repeat_allocate_shared (const void *element, size_t element_size,
283 size_t count)
284{
285 return repeat_allocate (element, element_size, count, true);
286}
287
288void
289support_blob_repeat_free (struct support_blob_repeat *blob)
290{
291 if (blob->size > 0)
292 {
293 int saved_errno = errno;
294 if (blob->use_malloc)
295 free (ptr: blob->start);
296 else
297 xmunmap (addr: blob->start, length: blob->size);
298 errno = saved_errno;
299 }
300 *blob = (struct support_blob_repeat) { 0 };
301}
302

source code of glibc/support/blob_repeat.c