1 | /* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */ |
2 | /* gtksecurememory.c - API for allocating memory that is non-pageable |
3 | |
4 | Copyright 2007 Stefan Walter |
5 | Copyright 2020 GNOME Foundation |
6 | |
7 | SPDX-License-Identifier: LGPL-2.0-or-later |
8 | |
9 | The Gnome Keyring Library is free software; you can redistribute it and/or |
10 | modify it under the terms of the GNU Library General Public License as |
11 | published by the Free Software Foundation; either version 2 of the |
12 | License, or (at your option) any later version. |
13 | |
14 | The Gnome Keyring Library is distributed in the hope that it will be useful, |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | Library General Public License for more details. |
18 | |
19 | You should have received a copy of the GNU Library General Public |
20 | License along with the Gnome Library; see the file COPYING.LIB. If not, |
21 | see <http://www.gnu.org/licenses/>. |
22 | |
23 | Author: Stef Walter <stef@memberwebs.com> |
24 | */ |
25 | |
26 | /* |
27 | * IMPORTANT: This is pure vanila standard C, no glib. We need this |
28 | * because certain consumers of this protocol need to be built |
29 | * without linking in any special libraries. ie: the PKCS#11 module. |
30 | */ |
31 | |
32 | #include "config.h" |
33 | |
34 | #include "gtksecurememoryprivate.h" |
35 | |
36 | #include <sys/types.h> |
37 | |
38 | #if defined(HAVE_MLOCK) |
39 | #include <sys/mman.h> |
40 | #endif |
41 | |
42 | #include <stddef.h> |
43 | #include <string.h> |
44 | #include <stdio.h> |
45 | #include <stdlib.h> |
46 | #include <errno.h> |
47 | |
48 | #ifdef HAVE_UNISTD_H |
49 | #include <unistd.h> |
50 | #endif |
51 | |
52 | #include <assert.h> |
53 | |
54 | #ifdef WITH_VALGRIND |
55 | #include <valgrind/valgrind.h> |
56 | #include <valgrind/memcheck.h> |
57 | #endif |
58 | |
59 | #define DEBUG_SECURE_MEMORY 0 |
60 | |
61 | #if DEBUG_SECURE_MEMORY |
62 | #define DEBUG_ALLOC(msg, n) fprintf(stderr, "%s %lu bytes\n", msg, n); |
63 | #else |
64 | #define DEBUG_ALLOC(msg, n) |
65 | #endif |
66 | |
67 | #define DEFAULT_BLOCK_SIZE 16384 |
68 | |
69 | #define DO_LOCK() \ |
70 | GTK_SECURE_GLOBALS.lock (); |
71 | |
72 | #define DO_UNLOCK() \ |
73 | GTK_SECURE_GLOBALS.unlock (); |
74 | |
75 | typedef struct { |
76 | void (* lock) (void); |
77 | void (* unlock) (void); |
78 | void * (* fallback_alloc) (void *pointer, |
79 | size_t length); |
80 | void (* fallback_free) (void *pointer); |
81 | void * pool_data; |
82 | const char * pool_version; |
83 | } GtkSecureGlob; |
84 | |
85 | #include <glib.h> |
86 | |
87 | #ifdef G_OS_WIN32 |
88 | # define WIN32_LEAN_AND_MEAN |
89 | # include <windows.h> |
90 | # include <dpapi.h> /* for CryptProtectMemory() */ |
91 | #endif |
92 | |
93 | #define GTK_SECURE_POOL_VER_STR "1.0" |
94 | |
95 | static int show_warning = 1; |
96 | static int gtk_secure_warnings = 1; |
97 | static GMutex memory_mutex; |
98 | |
99 | static void |
100 | gtk_memory_lock (void) |
101 | { |
102 | g_mutex_lock (mutex: &memory_mutex); |
103 | } |
104 | |
105 | static void |
106 | gtk_memory_unlock (void) |
107 | { |
108 | g_mutex_unlock (mutex: &memory_mutex); |
109 | } |
110 | |
111 | static GtkSecureGlob GTK_SECURE_GLOBALS = { |
112 | .lock = gtk_memory_lock, |
113 | .unlock = gtk_memory_unlock, |
114 | .fallback_alloc = g_realloc, |
115 | .fallback_free = g_free, |
116 | .pool_data = NULL, |
117 | .pool_version = GTK_SECURE_POOL_VER_STR, |
118 | }; |
119 | |
120 | /* |
121 | * We allocate all memory in units of sizeof(void*). This |
122 | * is our definition of 'word'. |
123 | */ |
124 | typedef void* word_t; |
125 | |
126 | /* The amount of extra words we can allocate */ |
127 | #define WASTE 4 |
128 | |
129 | /* |
130 | * Track allocated memory or a free block. This structure is not stored |
131 | * in the secure memory area. It is allocated from a pool of other |
132 | * memory. See meta_pool_xxx (). |
133 | */ |
134 | typedef struct _Cell { |
135 | word_t *words; /* Pointer to secure memory */ |
136 | size_t n_words; /* Amount of secure memory in words */ |
137 | size_t requested; /* Amount actually requested by app, in bytes, 0 if unused */ |
138 | const char *tag; /* Tag which describes the allocation */ |
139 | struct _Cell *next; /* Next in memory ring */ |
140 | struct _Cell *prev; /* Previous in memory ring */ |
141 | } Cell; |
142 | |
143 | /* |
144 | * A block of secure memory. This structure is the header in that block. |
145 | */ |
146 | typedef struct _Block { |
147 | word_t *words; /* Actual memory hangs off here */ |
148 | size_t n_words; /* Number of words in block */ |
149 | size_t n_used; /* Number of used allocations */ |
150 | struct _Cell* used_cells; /* Ring of used allocations */ |
151 | struct _Cell* unused_cells; /* Ring of unused allocations */ |
152 | struct _Block *next; /* Next block in list */ |
153 | } Block; |
154 | |
155 | /* ----------------------------------------------------------------------------- |
156 | * UNUSED STACK |
157 | */ |
158 | |
159 | static inline void |
160 | unused_push (void **stack, void *ptr) |
161 | { |
162 | g_assert (ptr); |
163 | g_assert (stack); |
164 | *((void**)ptr) = *stack; |
165 | *stack = ptr; |
166 | } |
167 | |
168 | static inline void* |
169 | unused_pop (void **stack) |
170 | { |
171 | void *ptr; |
172 | g_assert (stack); |
173 | ptr = *stack; |
174 | *stack = *(void**)ptr; |
175 | return ptr; |
176 | |
177 | } |
178 | |
179 | static inline void* |
180 | unused_peek (void **stack) |
181 | { |
182 | g_assert (stack); |
183 | return *stack; |
184 | } |
185 | |
186 | /* ----------------------------------------------------------------------------- |
187 | * POOL META DATA ALLOCATION |
188 | * |
189 | * A pool for memory meta data. We allocate fixed size blocks. There are actually |
190 | * two different structures stored in this pool: Cell and Block. Cell is allocated |
191 | * way more often, and is bigger so we just allocate that size for both. |
192 | */ |
193 | |
194 | /* Pool allocates this data type */ |
195 | typedef union _Item { |
196 | Cell cell; |
197 | Block block; |
198 | } Item; |
199 | |
200 | typedef struct _Pool { |
201 | struct _Pool *next; /* Next pool in list */ |
202 | size_t length; /* Length in bytes of the pool */ |
203 | size_t used; /* Number of cells used in pool */ |
204 | void *unused; /* Unused stack of unused stuff */ |
205 | size_t n_items; /* Total number of items in pool */ |
206 | Item items[1]; /* Actual items hang off here */ |
207 | } Pool; |
208 | |
209 | static int |
210 | check_pool_version (void) |
211 | { |
212 | if (GTK_SECURE_GLOBALS.pool_version == NULL || |
213 | strcmp (s1: GTK_SECURE_GLOBALS.pool_version, GTK_SECURE_POOL_VER_STR) != 0) { |
214 | return 0; |
215 | } |
216 | |
217 | return 1; |
218 | } |
219 | |
220 | static void * |
221 | pool_alloc (void) |
222 | { |
223 | if (!check_pool_version ()) { |
224 | if (show_warning && gtk_secure_warnings) |
225 | fprintf (stderr, format: "the secure memory pool version does not match the code '%s' != '%s'\n" , |
226 | GTK_SECURE_GLOBALS.pool_version ? GTK_SECURE_GLOBALS.pool_version : "(null)" , |
227 | GTK_SECURE_POOL_VER_STR); |
228 | show_warning = 0; |
229 | return NULL; |
230 | } |
231 | |
232 | #ifdef HAVE_MMAP |
233 | /* A pool with an available item */ |
234 | Pool *pool = NULL; |
235 | |
236 | for (pool = GTK_SECURE_GLOBALS.pool_data; pool != NULL; pool = pool->next) { |
237 | if (unused_peek (stack: &pool->unused)) |
238 | break; |
239 | } |
240 | |
241 | void *item = NULL; |
242 | |
243 | /* Create a new pool */ |
244 | if (pool == NULL) { |
245 | size_t len = getpagesize () * 2; |
246 | void *pages = mmap (addr: 0, len: len, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, fd: -1, offset: 0); |
247 | if (pages == MAP_FAILED) |
248 | return NULL; |
249 | |
250 | /* Fill in the block header, and inlude in block list */ |
251 | pool = pages; |
252 | pool->next = GTK_SECURE_GLOBALS.pool_data; |
253 | GTK_SECURE_GLOBALS.pool_data = pool; |
254 | pool->length = len; |
255 | pool->used = 0; |
256 | pool->unused = NULL; |
257 | |
258 | /* Fill block with unused items */ |
259 | pool->n_items = (len - sizeof (Pool)) / sizeof (Item); |
260 | for (size_t i = 0; i < pool->n_items; ++i) |
261 | unused_push (stack: &pool->unused, ptr: pool->items + i); |
262 | |
263 | #ifdef WITH_VALGRIND |
264 | VALGRIND_CREATE_MEMPOOL(pool, 0, 0); |
265 | #endif |
266 | } |
267 | |
268 | ++pool->used; |
269 | g_assert (unused_peek (&pool->unused)); |
270 | item = unused_pop (stack: &pool->unused); |
271 | |
272 | #ifdef WITH_VALGRIND |
273 | VALGRIND_MEMPOOL_ALLOC (pool, item, sizeof (Item)); |
274 | #endif |
275 | |
276 | return memset (s: item, c: 0, n: sizeof (Item)); |
277 | #else /* HAVE_MMAP */ |
278 | return NULL; |
279 | #endif |
280 | } |
281 | |
282 | static void |
283 | pool_free (void* item) |
284 | { |
285 | #ifdef HAVE_MMAP |
286 | Pool *pool, **at; |
287 | char *ptr, *beg, *end; |
288 | |
289 | ptr = item; |
290 | |
291 | /* Find which block this one belongs to */ |
292 | for (at = (Pool **)>K_SECURE_GLOBALS.pool_data, pool = *at; |
293 | pool != NULL; at = &pool->next, pool = *at) { |
294 | beg = (char*)pool->items; |
295 | end = (char*)pool + pool->length - sizeof (Item); |
296 | if (ptr >= beg && ptr <= end) { |
297 | g_assert ((ptr - beg) % sizeof (Item) == 0); |
298 | break; |
299 | } |
300 | } |
301 | |
302 | /* Otherwise invalid meta */ |
303 | g_assert (at); |
304 | g_assert (pool); |
305 | g_assert (pool->used > 0); |
306 | |
307 | /* No more meta cells used in this block, remove from list, destroy */ |
308 | if (pool->used == 1) { |
309 | *at = pool->next; |
310 | |
311 | #ifdef WITH_VALGRIND |
312 | VALGRIND_DESTROY_MEMPOOL (pool); |
313 | #endif |
314 | |
315 | munmap (addr: pool, len: pool->length); |
316 | return; |
317 | } |
318 | |
319 | #ifdef WITH_VALGRIND |
320 | VALGRIND_MEMPOOL_FREE (pool, item); |
321 | VALGRIND_MAKE_MEM_UNDEFINED (item, sizeof (Item)); |
322 | #endif |
323 | |
324 | --pool->used; |
325 | memset (s: item, c: 0xCD, n: sizeof (Item)); |
326 | unused_push (stack: &pool->unused, ptr: item); |
327 | #endif /* HAVE_MMAP */ |
328 | } |
329 | |
330 | #ifndef G_DISABLE_ASSERT |
331 | |
332 | static int |
333 | pool_valid (void* item) |
334 | { |
335 | Pool *pool; |
336 | char *ptr, *beg, *end; |
337 | |
338 | ptr = item; |
339 | |
340 | /* Find which block this one belongs to */ |
341 | for (pool = GTK_SECURE_GLOBALS.pool_data; pool; pool = pool->next) { |
342 | beg = (char*)pool->items; |
343 | end = (char*)pool + pool->length - sizeof (Item); |
344 | if (ptr >= beg && ptr <= end) |
345 | return (pool->used && (ptr - beg) % sizeof (Item) == 0); |
346 | } |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | #endif /* G_DISABLE_g_assert */ |
352 | |
353 | /* ----------------------------------------------------------------------------- |
354 | * SEC ALLOCATION |
355 | * |
356 | * Each memory cell begins and ends with a pointer to its metadata. These are also |
357 | * used as guards or red zones. Since they're treated as redzones by valgrind we |
358 | * have to jump through a few hoops before reading and/or writing them. |
359 | */ |
360 | |
361 | static inline size_t |
362 | sec_size_to_words (size_t length) |
363 | { |
364 | return (length % sizeof (void*) ? 1 : 0) + (length / sizeof (void*)); |
365 | } |
366 | |
367 | static inline void |
368 | sec_write_guards (Cell *cell) |
369 | { |
370 | #ifdef WITH_VALGRIND |
371 | VALGRIND_MAKE_MEM_UNDEFINED (cell->words, sizeof (word_t)); |
372 | VALGRIND_MAKE_MEM_UNDEFINED (cell->words + cell->n_words - 1, sizeof (word_t)); |
373 | #endif |
374 | |
375 | ((void**)cell->words)[0] = (void*)cell; |
376 | ((void**)cell->words)[cell->n_words - 1] = (void*)cell; |
377 | |
378 | #ifdef WITH_VALGRIND |
379 | VALGRIND_MAKE_MEM_NOACCESS (cell->words, sizeof (word_t)); |
380 | VALGRIND_MAKE_MEM_NOACCESS (cell->words + cell->n_words - 1, sizeof (word_t)); |
381 | #endif |
382 | } |
383 | |
384 | static inline void |
385 | sec_check_guards (Cell *cell) |
386 | { |
387 | #ifdef WITH_VALGRIND |
388 | VALGRIND_MAKE_MEM_DEFINED (cell->words, sizeof (word_t)); |
389 | VALGRIND_MAKE_MEM_DEFINED (cell->words + cell->n_words - 1, sizeof (word_t)); |
390 | #endif |
391 | |
392 | g_assert(((void**)cell->words)[0] == (void*)cell); |
393 | g_assert(((void**)cell->words)[cell->n_words - 1] == (void*)cell); |
394 | |
395 | #ifdef WITH_VALGRIND |
396 | VALGRIND_MAKE_MEM_NOACCESS (cell->words, sizeof (word_t)); |
397 | VALGRIND_MAKE_MEM_NOACCESS (cell->words + cell->n_words - 1, sizeof (word_t)); |
398 | #endif |
399 | } |
400 | |
401 | static void |
402 | sec_insert_cell_ring (Cell **ring, Cell *cell) |
403 | { |
404 | g_assert (ring); |
405 | g_assert (cell); |
406 | g_assert (cell != *ring); |
407 | g_assert (cell->next == NULL); |
408 | g_assert (cell->prev == NULL); |
409 | |
410 | /* Insert back into the mix of available memory */ |
411 | if (*ring) { |
412 | cell->next = (*ring)->next; |
413 | cell->prev = *ring; |
414 | cell->next->prev = cell; |
415 | cell->prev->next = cell; |
416 | } else { |
417 | cell->next = cell; |
418 | cell->prev = cell; |
419 | } |
420 | |
421 | *ring = cell; |
422 | g_assert (cell->next->prev == cell); |
423 | g_assert (cell->prev->next == cell); |
424 | } |
425 | |
426 | static void |
427 | sec_remove_cell_ring (Cell **ring, Cell *cell) |
428 | { |
429 | g_assert (ring); |
430 | g_assert (*ring); |
431 | g_assert (cell->next); |
432 | g_assert (cell->prev); |
433 | |
434 | g_assert (cell->next->prev == cell); |
435 | g_assert (cell->prev->next == cell); |
436 | |
437 | if (cell == *ring) { |
438 | /* The last meta? */ |
439 | if (cell->next == cell) { |
440 | g_assert (cell->prev == cell); |
441 | *ring = NULL; |
442 | |
443 | /* Just pointing to this meta */ |
444 | } else { |
445 | g_assert (cell->prev != cell); |
446 | *ring = cell->next; |
447 | } |
448 | } |
449 | |
450 | cell->next->prev = cell->prev; |
451 | cell->prev->next = cell->next; |
452 | cell->next = cell->prev = NULL; |
453 | |
454 | g_assert (*ring != cell); |
455 | } |
456 | |
457 | static inline void* |
458 | sec_cell_to_memory (Cell *cell) |
459 | { |
460 | return cell->words + 1; |
461 | } |
462 | |
463 | static inline int |
464 | sec_is_valid_word (Block *block, word_t *word) |
465 | { |
466 | return (word >= block->words && word < block->words + block->n_words); |
467 | } |
468 | |
469 | static inline void |
470 | sec_clear_undefined (void *memory, |
471 | size_t from, |
472 | size_t to) |
473 | { |
474 | char *ptr = memory; |
475 | g_assert (from <= to); |
476 | #ifdef WITH_VALGRIND |
477 | VALGRIND_MAKE_MEM_UNDEFINED (ptr + from, to - from); |
478 | #endif |
479 | memset (s: ptr + from, c: 0, n: to - from); |
480 | #ifdef WITH_VALGRIND |
481 | VALGRIND_MAKE_MEM_UNDEFINED (ptr + from, to - from); |
482 | #endif |
483 | } |
484 | static inline void |
485 | sec_clear_noaccess (void *memory, size_t from, size_t to) |
486 | { |
487 | char *ptr = memory; |
488 | g_assert (from <= to); |
489 | #ifdef WITH_VALGRIND |
490 | VALGRIND_MAKE_MEM_UNDEFINED (ptr + from, to - from); |
491 | #endif |
492 | memset (s: ptr + from, c: 0, n: to - from); |
493 | #ifdef WITH_VALGRIND |
494 | VALGRIND_MAKE_MEM_NOACCESS (ptr + from, to - from); |
495 | #endif |
496 | } |
497 | |
498 | static Cell* |
499 | sec_neighbor_before (Block *block, Cell *cell) |
500 | { |
501 | word_t *word; |
502 | |
503 | g_assert (cell); |
504 | g_assert (block); |
505 | |
506 | word = cell->words - 1; |
507 | if (!sec_is_valid_word (block, word)) |
508 | return NULL; |
509 | |
510 | #ifdef WITH_VALGRIND |
511 | VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); |
512 | #endif |
513 | |
514 | cell = *word; |
515 | sec_check_guards (cell); |
516 | |
517 | #ifdef WITH_VALGRIND |
518 | VALGRIND_MAKE_MEM_NOACCESS (word, sizeof (word_t)); |
519 | #endif |
520 | |
521 | return cell; |
522 | } |
523 | |
524 | static Cell* |
525 | sec_neighbor_after (Block *block, Cell *cell) |
526 | { |
527 | word_t *word; |
528 | |
529 | g_assert (cell); |
530 | g_assert (block); |
531 | |
532 | word = cell->words + cell->n_words; |
533 | if (!sec_is_valid_word (block, word)) |
534 | return NULL; |
535 | |
536 | #ifdef WITH_VALGRIND |
537 | VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); |
538 | #endif |
539 | |
540 | cell = *word; |
541 | sec_check_guards (cell); |
542 | |
543 | #ifdef WITH_VALGRIND |
544 | VALGRIND_MAKE_MEM_NOACCESS (word, sizeof (word_t)); |
545 | #endif |
546 | |
547 | return cell; |
548 | } |
549 | |
550 | static void* |
551 | sec_alloc (Block *block, |
552 | const char *tag, |
553 | size_t length) |
554 | { |
555 | Cell *cell, *other; |
556 | size_t n_words; |
557 | void *memory; |
558 | |
559 | g_assert (block); |
560 | g_assert (length); |
561 | g_assert (tag); |
562 | |
563 | if (!block->unused_cells) |
564 | return NULL; |
565 | |
566 | /* |
567 | * Each memory allocation is aligned to a pointer size, and |
568 | * then, sandwidched between two pointers to its meta data. |
569 | * These pointers also act as guards. |
570 | * |
571 | * We allocate memory in units of sizeof (void*) |
572 | */ |
573 | |
574 | n_words = sec_size_to_words (length) + 2; |
575 | |
576 | /* Look for a cell of at least our required size */ |
577 | cell = block->unused_cells; |
578 | while (cell->n_words < n_words) { |
579 | cell = cell->next; |
580 | if (cell == block->unused_cells) { |
581 | cell = NULL; |
582 | break; |
583 | } |
584 | } |
585 | |
586 | if (!cell) |
587 | return NULL; |
588 | |
589 | g_assert (cell->tag == NULL); |
590 | g_assert (cell->requested == 0); |
591 | g_assert (cell->prev); |
592 | g_assert (cell->words); |
593 | sec_check_guards (cell); |
594 | |
595 | /* Steal from the cell if it's too long */ |
596 | if (cell->n_words > n_words + WASTE) { |
597 | other = pool_alloc (); |
598 | if (!other) |
599 | return NULL; |
600 | other->n_words = n_words; |
601 | other->words = cell->words; |
602 | cell->n_words -= n_words; |
603 | cell->words += n_words; |
604 | |
605 | sec_write_guards (cell: other); |
606 | sec_write_guards (cell); |
607 | |
608 | cell = other; |
609 | } |
610 | |
611 | if (cell->next) |
612 | sec_remove_cell_ring (ring: &block->unused_cells, cell); |
613 | |
614 | ++block->n_used; |
615 | cell->tag = tag; |
616 | cell->requested = length; |
617 | sec_insert_cell_ring (ring: &block->used_cells, cell); |
618 | memory = sec_cell_to_memory (cell); |
619 | |
620 | #ifdef WITH_VALGRIND |
621 | VALGRIND_MAKE_MEM_UNDEFINED (memory, length); |
622 | #endif |
623 | |
624 | return memset (s: memory, c: 0, n: length); |
625 | } |
626 | |
627 | static void* |
628 | sec_free (Block *block, void *memory) |
629 | { |
630 | Cell *cell, *other; |
631 | word_t *word; |
632 | |
633 | g_assert (block); |
634 | g_assert (memory); |
635 | |
636 | word = memory; |
637 | --word; |
638 | |
639 | #ifdef WITH_VALGRIND |
640 | VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); |
641 | #endif |
642 | |
643 | /* Lookup the meta for this memory block (using guard pointer) */ |
644 | g_assert (sec_is_valid_word (block, word)); |
645 | g_assert (pool_valid (*word)); |
646 | cell = *word; |
647 | |
648 | #ifdef WITH_VALGRIND |
649 | VALGRIND_MAKE_MEM_DEFINED (cell->words, cell->n_words * sizeof (word_t)); |
650 | #endif |
651 | |
652 | sec_check_guards (cell); |
653 | sec_clear_noaccess (memory, from: 0, to: cell->requested); |
654 | |
655 | sec_check_guards (cell); |
656 | g_assert (cell->requested > 0); |
657 | g_assert (cell->tag != NULL); |
658 | |
659 | /* Remove from the used cell ring */ |
660 | sec_remove_cell_ring (ring: &block->used_cells, cell); |
661 | |
662 | /* Find previous unallocated neighbor, and merge if possible */ |
663 | other = sec_neighbor_before (block, cell); |
664 | if (other && other->requested == 0) { |
665 | g_assert (other->tag == NULL); |
666 | g_assert (other->next && other->prev); |
667 | other->n_words += cell->n_words; |
668 | sec_write_guards (cell: other); |
669 | pool_free (item: cell); |
670 | cell = other; |
671 | } |
672 | |
673 | /* Find next unallocated neighbor, and merge if possible */ |
674 | other = sec_neighbor_after (block, cell); |
675 | if (other && other->requested == 0) { |
676 | g_assert (other->tag == NULL); |
677 | g_assert (other->next && other->prev); |
678 | other->n_words += cell->n_words; |
679 | other->words = cell->words; |
680 | if (cell->next) |
681 | sec_remove_cell_ring (ring: &block->unused_cells, cell); |
682 | sec_write_guards (cell: other); |
683 | pool_free (item: cell); |
684 | cell = other; |
685 | } |
686 | |
687 | /* Add to the unused list if not already there */ |
688 | if (!cell->next) |
689 | sec_insert_cell_ring (ring: &block->unused_cells, cell); |
690 | |
691 | cell->tag = NULL; |
692 | cell->requested = 0; |
693 | --block->n_used; |
694 | return NULL; |
695 | } |
696 | |
697 | static void |
698 | memcpy_with_vbits (void *dest, |
699 | void *src, |
700 | size_t length) |
701 | { |
702 | #ifdef WITH_VALGRIND |
703 | int vbits_setup = 0; |
704 | void *vbits = NULL; |
705 | |
706 | if (RUNNING_ON_VALGRIND) { |
707 | vbits = malloc (length); |
708 | if (vbits != NULL) |
709 | vbits_setup = VALGRIND_GET_VBITS (src, vbits, length); |
710 | VALGRIND_MAKE_MEM_DEFINED (src, length); |
711 | } |
712 | #endif |
713 | |
714 | memcpy (dest: dest, src: src, n: length); |
715 | |
716 | #ifdef WITH_VALGRIND |
717 | if (vbits_setup == 1) { |
718 | VALGRIND_SET_VBITS (dest, vbits, length); |
719 | VALGRIND_SET_VBITS (src, vbits, length); |
720 | } |
721 | free (vbits); |
722 | #endif |
723 | } |
724 | |
725 | static void* |
726 | sec_realloc (Block *block, |
727 | const char *tag, |
728 | void *memory, |
729 | size_t length) |
730 | { |
731 | Cell *cell, *other; |
732 | word_t *word; |
733 | size_t n_words; |
734 | size_t valid; |
735 | void *alloc; |
736 | |
737 | /* Standard realloc behavior, should have been handled elsewhere */ |
738 | g_assert (memory != NULL); |
739 | g_assert (length > 0); |
740 | g_assert (tag != NULL); |
741 | |
742 | /* Dig out where the meta should be */ |
743 | word = memory; |
744 | --word; |
745 | |
746 | #ifdef WITH_VALGRIND |
747 | VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); |
748 | #endif |
749 | |
750 | g_assert (sec_is_valid_word (block, word)); |
751 | g_assert (pool_valid (*word)); |
752 | cell = *word; |
753 | |
754 | /* Validate that it's actually for real */ |
755 | sec_check_guards (cell); |
756 | g_assert (cell->requested > 0); |
757 | g_assert (cell->tag != NULL); |
758 | |
759 | /* The amount of valid data */ |
760 | valid = cell->requested; |
761 | |
762 | /* How many words we actually want */ |
763 | n_words = sec_size_to_words (length) + 2; |
764 | |
765 | /* Less memory is required than is in the cell */ |
766 | if (n_words <= cell->n_words) { |
767 | |
768 | /* TODO: No shrinking behavior yet */ |
769 | cell->requested = length; |
770 | alloc = sec_cell_to_memory (cell); |
771 | |
772 | /* |
773 | * Even though we may be reusing the same cell, that doesn't |
774 | * mean that the allocation is shrinking. It could have shrunk |
775 | * and is now expanding back some. |
776 | */ |
777 | if (length < valid) |
778 | sec_clear_undefined (memory: alloc, from: length, to: valid); |
779 | |
780 | return alloc; |
781 | } |
782 | |
783 | /* Need braaaaaiiiiiinsss... */ |
784 | while (cell->n_words < n_words) { |
785 | |
786 | /* See if we have a neighbor who can give us some memory */ |
787 | other = sec_neighbor_after (block, cell); |
788 | if (!other || other->requested != 0) |
789 | break; |
790 | |
791 | /* Eat the whole neighbor if not too big */ |
792 | if (n_words - cell->n_words + WASTE >= other->n_words) { |
793 | cell->n_words += other->n_words; |
794 | sec_write_guards (cell); |
795 | sec_remove_cell_ring (ring: &block->unused_cells, cell: other); |
796 | pool_free (item: other); |
797 | |
798 | /* Steal from the neighbor */ |
799 | } else { |
800 | other->words += n_words - cell->n_words; |
801 | other->n_words -= n_words - cell->n_words; |
802 | sec_write_guards (cell: other); |
803 | cell->n_words = n_words; |
804 | sec_write_guards (cell); |
805 | } |
806 | } |
807 | |
808 | if (cell->n_words >= n_words) { |
809 | cell->requested = length; |
810 | cell->tag = tag; |
811 | alloc = sec_cell_to_memory (cell); |
812 | sec_clear_undefined (memory: alloc, from: valid, to: length); |
813 | return alloc; |
814 | } |
815 | |
816 | /* That didn't work, try alloc/free */ |
817 | alloc = sec_alloc (block, tag, length); |
818 | if (alloc) { |
819 | memcpy_with_vbits (dest: alloc, src: memory, length: valid); |
820 | sec_free (block, memory); |
821 | } |
822 | |
823 | return alloc; |
824 | } |
825 | |
826 | |
827 | static size_t |
828 | sec_allocated (Block *block, void *memory) |
829 | { |
830 | Cell *cell; |
831 | word_t *word; |
832 | |
833 | g_assert (block); |
834 | g_assert (memory); |
835 | |
836 | word = memory; |
837 | --word; |
838 | |
839 | #ifdef WITH_VALGRIND |
840 | VALGRIND_MAKE_MEM_DEFINED (word, sizeof (word_t)); |
841 | #endif |
842 | |
843 | /* Lookup the meta for this memory block (using guard pointer) */ |
844 | g_assert (sec_is_valid_word (block, word)); |
845 | g_assert (pool_valid (*word)); |
846 | cell = *word; |
847 | |
848 | sec_check_guards (cell); |
849 | g_assert (cell->requested > 0); |
850 | g_assert (cell->tag != NULL); |
851 | |
852 | #ifdef WITH_VALGRIND |
853 | VALGRIND_MAKE_MEM_NOACCESS (word, sizeof (word_t)); |
854 | #endif |
855 | |
856 | return cell->requested; |
857 | } |
858 | |
859 | static void |
860 | sec_validate (Block *block) |
861 | { |
862 | Cell *cell; |
863 | word_t *word, *last; |
864 | |
865 | #ifdef WITH_VALGRIND |
866 | if (RUNNING_ON_VALGRIND) |
867 | return; |
868 | #endif |
869 | |
870 | word = block->words; |
871 | last = word + block->n_words; |
872 | |
873 | for (;;) { |
874 | g_assert (word < last); |
875 | |
876 | g_assert (sec_is_valid_word (block, word)); |
877 | g_assert (pool_valid (*word)); |
878 | cell = *word; |
879 | |
880 | /* Validate that it's actually for real */ |
881 | sec_check_guards (cell); |
882 | |
883 | /* Is it an allocated block? */ |
884 | if (cell->requested > 0) { |
885 | g_assert (cell->tag != NULL); |
886 | g_assert (cell->next != NULL); |
887 | g_assert (cell->prev != NULL); |
888 | g_assert (cell->next->prev == cell); |
889 | g_assert (cell->prev->next == cell); |
890 | g_assert (cell->requested <= (cell->n_words - 2) * sizeof (word_t)); |
891 | |
892 | /* An unused block */ |
893 | } else { |
894 | g_assert (cell->tag == NULL); |
895 | g_assert (cell->next != NULL); |
896 | g_assert (cell->prev != NULL); |
897 | g_assert (cell->next->prev == cell); |
898 | g_assert (cell->prev->next == cell); |
899 | } |
900 | |
901 | word += cell->n_words; |
902 | if (word == last) |
903 | break; |
904 | } |
905 | } |
906 | |
907 | /* ----------------------------------------------------------------------------- |
908 | * LOCKED MEMORY |
909 | */ |
910 | |
911 | static void* |
912 | sec_acquire_pages (size_t *sz, |
913 | const char *during_tag) |
914 | { |
915 | g_assert (sz); |
916 | g_assert (*sz); |
917 | g_assert (during_tag); |
918 | |
919 | #if defined(HAVE_MLOCK) && defined(HAVE_MMAP) |
920 | /* Make sure sz is a multiple of the page size */ |
921 | unsigned long pgsize = getpagesize (); |
922 | |
923 | *sz = (*sz + pgsize -1) & ~(pgsize - 1); |
924 | |
925 | void *pages = mmap (addr: 0, len: *sz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, fd: -1, offset: 0); |
926 | if (pages == MAP_FAILED) { |
927 | if (show_warning && gtk_secure_warnings) |
928 | fprintf (stderr, format: "couldn't map %lu bytes of memory (%s): %s\n" , |
929 | (unsigned long)*sz, during_tag, strerror (errno)); |
930 | show_warning = 0; |
931 | return NULL; |
932 | } |
933 | |
934 | if (mlock (addr: pages, len: *sz) < 0) { |
935 | if (show_warning && gtk_secure_warnings && errno != EPERM) { |
936 | fprintf (stderr, format: "couldn't lock %lu bytes of memory (%s): %s\n" , |
937 | (unsigned long)*sz, during_tag, strerror (errno)); |
938 | show_warning = 0; |
939 | } |
940 | munmap (addr: pages, len: *sz); |
941 | return NULL; |
942 | } |
943 | |
944 | DEBUG_ALLOC ("gtk-secure-memory: new block " , *sz); |
945 | |
946 | #if defined(HAVE_MADVISE) && defined(MADV_DONTDUMP) |
947 | if (madvise (addr: pages, len: *sz, MADV_DONTDUMP) < 0) { |
948 | if (show_warning && gtk_secure_warnings) { |
949 | /* |
950 | * Not fatal - this was added in Linux 3.4 and older |
951 | * kernels will legitimately fail this at runtime |
952 | */ |
953 | fprintf (stderr, format: "couldn't MADV_DONTDUMP %lu bytes of memory (%s): %s\n" , |
954 | (unsigned long)*sz, during_tag, strerror (errno)); |
955 | } |
956 | } |
957 | #endif |
958 | |
959 | show_warning = 1; |
960 | return pages; |
961 | |
962 | #elif defined G_OS_WIN32 |
963 | /* Make sure sz is a multiple of CRYPTPROTECTMEMORY_BLOCK_SIZE in wincrypt.h */ |
964 | *sz = (*sz + CRYPTPROTECTMEMORY_BLOCK_SIZE - 1) & ~(CRYPTPROTECTMEMORY_BLOCK_SIZE - 1); |
965 | |
966 | void *data = (void *) LocalAlloc (LPTR, *sz); |
967 | |
968 | if (data == NULL) { |
969 | if (show_warning && gtk_secure_warnings) |
970 | fprintf (stderr, "couldn't allocate %lu bytes of memory (%s): %#010lX\n" , |
971 | (unsigned long)*sz, during_tag, GetLastError ()); |
972 | show_warning = 0; |
973 | return NULL; |
974 | } |
975 | |
976 | if (!CryptProtectMemory (data, *sz, CRYPTPROTECTMEMORY_SAME_PROCESS)) { |
977 | if (show_warning && gtk_secure_warnings) |
978 | fprintf (stderr, "couldn't encrypt %lu bytes of memory (%s): %#010lX\n" , |
979 | (unsigned long)*sz, during_tag, GetLastError ()); |
980 | show_warning = 0; |
981 | return NULL; |
982 | } |
983 | |
984 | DEBUG_ALLOC ("gtk-secure-memory: new block " , *sz); |
985 | |
986 | show_warning = 1; |
987 | return data; |
988 | |
989 | #else |
990 | if (show_warning && gtk_secure_warnings) |
991 | fprintf (stderr, "your system does not support private memory" ); |
992 | show_warning = 0; |
993 | return NULL; |
994 | #endif |
995 | |
996 | } |
997 | |
998 | static void |
999 | sec_release_pages (void *pages, size_t sz) |
1000 | { |
1001 | g_assert (pages); |
1002 | |
1003 | #if defined(HAVE_MLOCK) |
1004 | g_assert (sz % getpagesize () == 0); |
1005 | |
1006 | if (munlock (addr: pages, len: sz) < 0 && gtk_secure_warnings) |
1007 | fprintf (stderr, format: "couldn't unlock private memory: %s\n" , strerror (errno)); |
1008 | |
1009 | if (munmap (addr: pages, len: sz) < 0 && gtk_secure_warnings) |
1010 | fprintf (stderr, format: "couldn't unmap private anonymous memory: %s\n" , strerror (errno)); |
1011 | |
1012 | DEBUG_ALLOC ("gtk-secure-memory: freed block " , sz); |
1013 | |
1014 | #elif defined G_OS_WIN32 |
1015 | g_assert (sz % CRYPTPROTECTMEMORY_BLOCK_SIZE == 0); |
1016 | |
1017 | if (!CryptUnprotectMemory (pages, sz, CRYPTPROTECTMEMORY_SAME_PROCESS)) |
1018 | fprintf (stderr, "couldn't decrypt private memory: %#010lX\n" , GetLastError ()); |
1019 | |
1020 | if (LocalFree (pages) != NULL) |
1021 | fprintf (stderr, "couldn't free private anonymous memory: %#010lX\n" , GetLastError ()); |
1022 | |
1023 | DEBUG_ALLOC ("gtk-secure-memory: freed block " , sz); |
1024 | #else |
1025 | g_assert (FALSE); |
1026 | #endif |
1027 | } |
1028 | |
1029 | /* ----------------------------------------------------------------------------- |
1030 | * MANAGE DIFFERENT BLOCKS |
1031 | */ |
1032 | |
1033 | static Block *all_blocks = NULL; |
1034 | |
1035 | static Block* |
1036 | sec_block_create (size_t size, |
1037 | const char *during_tag) |
1038 | { |
1039 | Block *block; |
1040 | Cell *cell; |
1041 | |
1042 | g_assert (during_tag); |
1043 | |
1044 | /* We can force all all memory to be malloced */ |
1045 | if (getenv (name: "SECMEM_FORCE_FALLBACK" )) |
1046 | return NULL; |
1047 | |
1048 | block = pool_alloc (); |
1049 | if (!block) |
1050 | return NULL; |
1051 | |
1052 | cell = pool_alloc (); |
1053 | if (!cell) { |
1054 | pool_free (item: block); |
1055 | return NULL; |
1056 | } |
1057 | |
1058 | /* The size above is a minimum, we're free to go bigger */ |
1059 | if (size < DEFAULT_BLOCK_SIZE) |
1060 | size = DEFAULT_BLOCK_SIZE; |
1061 | |
1062 | block->words = sec_acquire_pages (sz: &size, during_tag); |
1063 | block->n_words = size / sizeof (word_t); |
1064 | if (!block->words) { |
1065 | pool_free (item: block); |
1066 | pool_free (item: cell); |
1067 | return NULL; |
1068 | } |
1069 | |
1070 | #ifdef WITH_VALGRIND |
1071 | VALGRIND_MAKE_MEM_DEFINED (block->words, size); |
1072 | #endif |
1073 | |
1074 | /* The first cell to allocate from */ |
1075 | cell->words = block->words; |
1076 | cell->n_words = block->n_words; |
1077 | cell->requested = 0; |
1078 | sec_write_guards (cell); |
1079 | sec_insert_cell_ring (ring: &block->unused_cells, cell); |
1080 | |
1081 | block->next = all_blocks; |
1082 | all_blocks = block; |
1083 | |
1084 | return block; |
1085 | } |
1086 | |
1087 | static void |
1088 | sec_block_destroy (Block *block) |
1089 | { |
1090 | Block *bl, **at; |
1091 | Cell *cell; |
1092 | |
1093 | g_assert (block); |
1094 | g_assert (block->words); |
1095 | g_assert (block->n_used == 0); |
1096 | |
1097 | /* Remove from the list */ |
1098 | for (at = &all_blocks, bl = *at; bl; at = &bl->next, bl = *at) { |
1099 | if (bl == block) { |
1100 | *at = block->next; |
1101 | break; |
1102 | } |
1103 | } |
1104 | |
1105 | /* Must have been found */ |
1106 | g_assert (bl == block); |
1107 | g_assert (block->used_cells == NULL); |
1108 | |
1109 | /* Release all the meta data cells */ |
1110 | while (block->unused_cells) { |
1111 | cell = block->unused_cells; |
1112 | sec_remove_cell_ring (ring: &block->unused_cells, cell); |
1113 | pool_free (item: cell); |
1114 | } |
1115 | |
1116 | /* Release all pages of secure memory */ |
1117 | sec_release_pages (pages: block->words, sz: block->n_words * sizeof (word_t)); |
1118 | |
1119 | pool_free (item: block); |
1120 | } |
1121 | |
1122 | /* ------------------------------------------------------------------------ |
1123 | * PUBLIC FUNCTIONALITY |
1124 | */ |
1125 | |
1126 | void* |
1127 | gtk_secure_alloc_full (const char *tag, |
1128 | size_t length, |
1129 | int flags) |
1130 | { |
1131 | Block *block; |
1132 | void *memory = NULL; |
1133 | |
1134 | if (tag == NULL) |
1135 | tag = "?" ; |
1136 | |
1137 | if (length > 0xFFFFFFFF / 2) { |
1138 | if (gtk_secure_warnings) |
1139 | fprintf (stderr, format: "tried to allocate an insane amount of memory: %lu\n" , |
1140 | (unsigned long)length); |
1141 | return NULL; |
1142 | } |
1143 | |
1144 | /* Can't allocate zero bytes */ |
1145 | if (length == 0) |
1146 | return NULL; |
1147 | |
1148 | DO_LOCK (); |
1149 | |
1150 | for (block = all_blocks; block; block = block->next) { |
1151 | memory = sec_alloc (block, tag, length); |
1152 | if (memory) |
1153 | break; |
1154 | } |
1155 | |
1156 | /* None of the current blocks have space, allocate new */ |
1157 | if (!memory) { |
1158 | block = sec_block_create (size: length, during_tag: tag); |
1159 | if (block) |
1160 | memory = sec_alloc (block, tag, length); |
1161 | } |
1162 | |
1163 | #ifdef WITH_VALGRIND |
1164 | if (memory != NULL) |
1165 | VALGRIND_MALLOCLIKE_BLOCK (memory, length, sizeof (void*), 1); |
1166 | #endif |
1167 | |
1168 | DO_UNLOCK (); |
1169 | |
1170 | if (!memory && (flags & GTK_SECURE_USE_FALLBACK) && GTK_SECURE_GLOBALS.fallback_alloc != NULL) { |
1171 | memory = GTK_SECURE_GLOBALS.fallback_alloc (NULL, length); |
1172 | if (memory) /* Our returned memory is always zeroed */ |
1173 | memset (s: memory, c: 0, n: length); |
1174 | } |
1175 | |
1176 | if (!memory) |
1177 | errno = ENOMEM; |
1178 | |
1179 | return memory; |
1180 | } |
1181 | |
1182 | void* |
1183 | gtk_secure_realloc_full (const char *tag, |
1184 | void *memory, |
1185 | size_t length, |
1186 | int flags) |
1187 | { |
1188 | Block *block = NULL; |
1189 | size_t previous = 0; |
1190 | int donew = 0; |
1191 | void *alloc = NULL; |
1192 | |
1193 | if (tag == NULL) |
1194 | tag = "?" ; |
1195 | |
1196 | if (length > 0xFFFFFFFF / 2) { |
1197 | if (gtk_secure_warnings) |
1198 | fprintf (stderr, format: "tried to allocate an excessive amount of memory: %lu\n" , |
1199 | (unsigned long)length); |
1200 | return NULL; |
1201 | } |
1202 | |
1203 | if (memory == NULL) |
1204 | return gtk_secure_alloc_full (tag, length, flags); |
1205 | if (!length) { |
1206 | gtk_secure_free_full (p: memory, fallback: flags); |
1207 | return NULL; |
1208 | } |
1209 | |
1210 | DO_LOCK (); |
1211 | |
1212 | /* Find out where it belongs to */ |
1213 | for (block = all_blocks; block; block = block->next) { |
1214 | if (sec_is_valid_word (block, word: memory)) { |
1215 | previous = sec_allocated (block, memory); |
1216 | |
1217 | #ifdef WITH_VALGRIND |
1218 | /* Let valgrind think we are unallocating so that it'll validate */ |
1219 | VALGRIND_FREELIKE_BLOCK (memory, sizeof (word_t)); |
1220 | #endif |
1221 | |
1222 | alloc = sec_realloc (block, tag, memory, length); |
1223 | |
1224 | #ifdef WITH_VALGRIND |
1225 | /* Now tell valgrind about either the new block or old one */ |
1226 | VALGRIND_MALLOCLIKE_BLOCK (alloc ? alloc : memory, |
1227 | alloc ? length : previous, |
1228 | sizeof (word_t), 1); |
1229 | #endif |
1230 | break; |
1231 | } |
1232 | } |
1233 | |
1234 | /* If it didn't work we may need to allocate a new block */ |
1235 | if (block && !alloc) |
1236 | donew = 1; |
1237 | |
1238 | if (block && block->n_used == 0) |
1239 | sec_block_destroy (block); |
1240 | |
1241 | DO_UNLOCK (); |
1242 | |
1243 | if (!block) { |
1244 | if ((flags & GTK_SECURE_USE_FALLBACK) && GTK_SECURE_GLOBALS.fallback_alloc) { |
1245 | /* |
1246 | * In this case we can't zero the returned memory, |
1247 | * because we don't know what the block size was. |
1248 | */ |
1249 | return GTK_SECURE_GLOBALS.fallback_alloc (memory, length); |
1250 | } else { |
1251 | if (gtk_secure_warnings) |
1252 | fprintf (stderr, format: "memory does not belong to secure memory pool: 0x%08" G_GUINTPTR_FORMAT "x\n" , |
1253 | (guintptr) memory); |
1254 | g_assert (0 && "memory does does not belong to secure memory pool" ); |
1255 | return NULL; |
1256 | } |
1257 | } |
1258 | |
1259 | if (donew) { |
1260 | alloc = gtk_secure_alloc_full (tag, length, flags); |
1261 | if (alloc) { |
1262 | memcpy_with_vbits (dest: alloc, src: memory, length: previous); |
1263 | gtk_secure_free_full (p: memory, fallback: flags); |
1264 | } |
1265 | } |
1266 | |
1267 | if (!alloc) |
1268 | errno = ENOMEM; |
1269 | |
1270 | return alloc; |
1271 | } |
1272 | |
1273 | void |
1274 | gtk_secure_free (void *memory) |
1275 | { |
1276 | gtk_secure_free_full (p: memory, GTK_SECURE_USE_FALLBACK); |
1277 | } |
1278 | |
1279 | void |
1280 | gtk_secure_free_full (void *memory, int flags) |
1281 | { |
1282 | Block *block = NULL; |
1283 | |
1284 | if (memory == NULL) |
1285 | return; |
1286 | |
1287 | DO_LOCK (); |
1288 | |
1289 | /* Find out where it belongs to */ |
1290 | for (block = all_blocks; block; block = block->next) { |
1291 | if (sec_is_valid_word (block, word: memory)) |
1292 | break; |
1293 | } |
1294 | |
1295 | #ifdef WITH_VALGRIND |
1296 | /* We like valgrind's warnings, so give it a first whack at checking for errors */ |
1297 | if (block != NULL || !(flags & GTK_SECURE_USE_FALLBACK)) |
1298 | VALGRIND_FREELIKE_BLOCK (memory, sizeof (word_t)); |
1299 | #endif |
1300 | |
1301 | if (block != NULL) { |
1302 | sec_free (block, memory); |
1303 | if (block->n_used == 0) |
1304 | sec_block_destroy (block); |
1305 | } |
1306 | |
1307 | DO_UNLOCK (); |
1308 | |
1309 | if (!block) { |
1310 | if ((flags & GTK_SECURE_USE_FALLBACK) && GTK_SECURE_GLOBALS.fallback_free) { |
1311 | GTK_SECURE_GLOBALS.fallback_free (memory); |
1312 | } else { |
1313 | if (gtk_secure_warnings) |
1314 | fprintf (stderr, format: "memory does not belong to secure memory pool: 0x%08" G_GUINTPTR_FORMAT "x\n" , |
1315 | (guintptr) memory); |
1316 | g_assert (0 && "memory does does not belong to secure memory pool" ); |
1317 | } |
1318 | } |
1319 | } |
1320 | |
1321 | int |
1322 | gtk_secure_check (const void *memory) |
1323 | { |
1324 | Block *block = NULL; |
1325 | |
1326 | DO_LOCK (); |
1327 | |
1328 | /* Find out where it belongs to */ |
1329 | for (block = all_blocks; block; block = block->next) { |
1330 | if (sec_is_valid_word (block, word: (word_t*)memory)) |
1331 | break; |
1332 | } |
1333 | |
1334 | DO_UNLOCK (); |
1335 | |
1336 | return block == NULL ? 0 : 1; |
1337 | } |
1338 | |
1339 | void |
1340 | gtk_secure_validate (void) |
1341 | { |
1342 | Block *block = NULL; |
1343 | |
1344 | DO_LOCK (); |
1345 | |
1346 | for (block = all_blocks; block; block = block->next) |
1347 | sec_validate (block); |
1348 | |
1349 | DO_UNLOCK (); |
1350 | } |
1351 | |
1352 | |
1353 | static gtk_secure_rec * |
1354 | records_for_ring (Cell *cell_ring, |
1355 | gtk_secure_rec *records, |
1356 | unsigned int *count, |
1357 | unsigned int *total) |
1358 | { |
1359 | gtk_secure_rec *new_rec; |
1360 | unsigned int allocated = *count; |
1361 | Cell *cell; |
1362 | |
1363 | cell = cell_ring; |
1364 | do { |
1365 | if (*count >= allocated) { |
1366 | new_rec = realloc (ptr: records, size: sizeof (gtk_secure_rec) * (allocated + 32)); |
1367 | if (new_rec == NULL) { |
1368 | *count = 0; |
1369 | free (ptr: records); |
1370 | return NULL; |
1371 | } else { |
1372 | records = new_rec; |
1373 | allocated += 32; |
1374 | } |
1375 | } |
1376 | |
1377 | if (cell != NULL) { |
1378 | records[*count].request_length = cell->requested; |
1379 | records[*count].block_length = cell->n_words * sizeof (word_t); |
1380 | records[*count].tag = cell->tag; |
1381 | (*count)++; |
1382 | (*total) += cell->n_words; |
1383 | cell = cell->next; |
1384 | } |
1385 | } while (cell != NULL && cell != cell_ring); |
1386 | |
1387 | return records; |
1388 | } |
1389 | |
1390 | gtk_secure_rec * |
1391 | gtk_secure_records (unsigned int *count) |
1392 | { |
1393 | gtk_secure_rec *records = NULL; |
1394 | Block *block = NULL; |
1395 | unsigned int total; |
1396 | |
1397 | *count = 0; |
1398 | |
1399 | DO_LOCK (); |
1400 | |
1401 | for (block = all_blocks; block != NULL; block = block->next) { |
1402 | total = 0; |
1403 | |
1404 | records = records_for_ring (cell_ring: block->unused_cells, records, count, total: &total); |
1405 | if (records == NULL) |
1406 | break; |
1407 | records = records_for_ring (cell_ring: block->used_cells, records, count, total: &total); |
1408 | if (records == NULL) |
1409 | break; |
1410 | |
1411 | /* Make sure this actualy accounts for all memory */ |
1412 | g_assert (total == block->n_words); |
1413 | } |
1414 | |
1415 | DO_UNLOCK (); |
1416 | |
1417 | return records; |
1418 | } |
1419 | |
1420 | char* |
1421 | gtk_secure_strdup_full (const char *tag, |
1422 | const char *str, |
1423 | int options) |
1424 | { |
1425 | size_t len; |
1426 | char *res; |
1427 | |
1428 | if (!str) |
1429 | return NULL; |
1430 | |
1431 | len = strlen (s: str) + 1; |
1432 | res = (char *)gtk_secure_alloc_full (tag, length: len, flags: options); |
1433 | strcpy (dest: res, src: str); |
1434 | return res; |
1435 | } |
1436 | |
1437 | char * |
1438 | gtk_secure_strndup_full (const char *tag, |
1439 | const char *str, |
1440 | size_t length, |
1441 | int options) |
1442 | { |
1443 | size_t len; |
1444 | char *res; |
1445 | const char *end; |
1446 | |
1447 | if (!str) |
1448 | return NULL; |
1449 | |
1450 | end = memchr (s: str, c: '\0', n: length); |
1451 | if (end != NULL) |
1452 | length = (end - str); |
1453 | len = length + 1; |
1454 | res = (char *)gtk_secure_alloc_full (tag, length: len, flags: options); |
1455 | memcpy (dest: res, src: str, n: len); |
1456 | return res; |
1457 | } |
1458 | |
1459 | void |
1460 | gtk_secure_clear (void *p, size_t length) |
1461 | { |
1462 | volatile char *vp; |
1463 | |
1464 | if (p == NULL) |
1465 | return; |
1466 | |
1467 | vp = (volatile char*)p; |
1468 | while (length) { |
1469 | *vp = 0xAA; |
1470 | vp++; |
1471 | length--; |
1472 | } |
1473 | } |
1474 | |
1475 | void |
1476 | gtk_secure_strclear (char *str) |
1477 | { |
1478 | if (!str) |
1479 | return; |
1480 | gtk_secure_clear (p: (unsigned char*)str, length: strlen (s: str)); |
1481 | } |
1482 | |
1483 | void |
1484 | gtk_secure_strfree (char *str) |
1485 | { |
1486 | /* |
1487 | * If we're using unpageable 'secure' memory, then the free call |
1488 | * should zero out the memory, but because on certain platforms |
1489 | * we may be using normal memory, zero it out here just in case. |
1490 | */ |
1491 | |
1492 | gtk_secure_strclear (str); |
1493 | gtk_secure_free_full (memory: str, GTK_SECURE_USE_FALLBACK); |
1494 | } |
1495 | |