1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
10 */
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19
20
21/*
22 * Flags to pass to kmem_cache_create().
23 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
24 */
25/* DEBUG: Perform (expensive) checks on alloc/free */
26#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
27/* DEBUG: Red zone objs in a cache */
28#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
29/* DEBUG: Poison objects */
30#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
31/* Align objs on cache lines */
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33/* Use GFP_DMA memory */
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35/* DEBUG: Store the last owner for bug hunting */
36#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37/* Panic if kmem_cache_create() fails */
38#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
39/*
40 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
41 *
42 * This delays freeing the SLAB page by a grace period, it does _NOT_
43 * delay object freeing. This means that if you do kmem_cache_free()
44 * that memory location is free to be reused at any time. Thus it may
45 * be possible to see another object there in the same RCU grace period.
46 *
47 * This feature only ensures the memory location backing the object
48 * stays valid, the trick to using this is relying on an independent
49 * object validation pass. Something like:
50 *
51 * rcu_read_lock()
52 * again:
53 * obj = lockless_lookup(key);
54 * if (obj) {
55 * if (!try_get_ref(obj)) // might fail for free objects
56 * goto again;
57 *
58 * if (obj->key != key) { // not the object we expected
59 * put_ref(obj);
60 * goto again;
61 * }
62 * }
63 * rcu_read_unlock();
64 *
65 * This is useful if we need to approach a kernel structure obliquely,
66 * from its address obtained without the usual locking. We can lock
67 * the structure to stabilize it and check it's still at the given address,
68 * only if we can be sure that the memory has not been meanwhile reused
69 * for some other kind of object (which our subsystem's lock might corrupt).
70 *
71 * rcu_read_lock before reading the address, then rcu_read_unlock after
72 * taking the spinlock within the structure expected at that address.
73 *
74 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
75 */
76/* Defer freeing slabs to RCU */
77#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
78/* Spread some memory over cpuset */
79#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
80/* Trace allocations and frees */
81#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
82
83/* Flag to prevent checks on free */
84#ifdef CONFIG_DEBUG_OBJECTS
85# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
86#else
87# define SLAB_DEBUG_OBJECTS 0
88#endif
89
90/* Avoid kmemleak tracing */
91#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
92
93/* Fault injection mark */
94#ifdef CONFIG_FAILSLAB
95# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
96#else
97# define SLAB_FAILSLAB 0
98#endif
99/* Account to memcg */
100#ifdef CONFIG_MEMCG_KMEM
101# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
102#else
103# define SLAB_ACCOUNT 0
104#endif
105
106#ifdef CONFIG_KASAN
107#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
108#else
109#define SLAB_KASAN 0
110#endif
111
112/* The following flags affect the page allocator grouping pages by mobility */
113/* Objects are reclaimable */
114#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
115#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
116/*
117 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
118 *
119 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
120 *
121 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
122 * Both make kfree a no-op.
123 */
124#define ZERO_SIZE_PTR ((void *)16)
125
126#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
127 (unsigned long)ZERO_SIZE_PTR)
128
129#include <linux/kasan.h>
130
131struct mem_cgroup;
132/*
133 * struct kmem_cache related prototypes
134 */
135void __init kmem_cache_init(void);
136bool slab_is_available(void);
137
138extern bool usercopy_fallback;
139
140struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
141 unsigned int align, slab_flags_t flags,
142 void (*ctor)(void *));
143struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 unsigned int size, unsigned int align,
145 slab_flags_t flags,
146 unsigned int useroffset, unsigned int usersize,
147 void (*ctor)(void *));
148void kmem_cache_destroy(struct kmem_cache *);
149int kmem_cache_shrink(struct kmem_cache *);
150
151void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
152void memcg_deactivate_kmem_caches(struct mem_cgroup *);
153void memcg_destroy_kmem_caches(struct mem_cgroup *);
154
155/*
156 * Please use this macro to create slab caches. Simply specify the
157 * name of the structure and maybe some flags that are listed above.
158 *
159 * The alignment of the struct determines object alignment. If you
160 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
161 * then the objects will be properly aligned in SMP configurations.
162 */
163#define KMEM_CACHE(__struct, __flags) \
164 kmem_cache_create(#__struct, sizeof(struct __struct), \
165 __alignof__(struct __struct), (__flags), NULL)
166
167/*
168 * To whitelist a single field for copying to/from usercopy, use this
169 * macro instead for KMEM_CACHE() above.
170 */
171#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
172 kmem_cache_create_usercopy(#__struct, \
173 sizeof(struct __struct), \
174 __alignof__(struct __struct), (__flags), \
175 offsetof(struct __struct, __field), \
176 sizeof_field(struct __struct, __field), NULL)
177
178/*
179 * Common kmalloc functions provided by all allocators
180 */
181void * __must_check __krealloc(const void *, size_t, gfp_t);
182void * __must_check krealloc(const void *, size_t, gfp_t);
183void kfree(const void *);
184void kzfree(const void *);
185size_t ksize(const void *);
186
187#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
188void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
189 bool to_user);
190#else
191static inline void __check_heap_object(const void *ptr, unsigned long n,
192 struct page *page, bool to_user) { }
193#endif
194
195/*
196 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
197 * alignment larger than the alignment of a 64-bit integer.
198 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
199 */
200#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
201#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
202#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
203#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
204#else
205#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
206#endif
207
208/*
209 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
210 * Intended for arches that get misalignment faults even for 64 bit integer
211 * aligned buffers.
212 */
213#ifndef ARCH_SLAB_MINALIGN
214#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
215#endif
216
217/*
218 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
219 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
220 * aligned pointers.
221 */
222#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
223#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
224#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
225
226/*
227 * Kmalloc array related definitions
228 */
229
230#ifdef CONFIG_SLAB
231/*
232 * The largest kmalloc size supported by the SLAB allocators is
233 * 32 megabyte (2^25) or the maximum allocatable page order if that is
234 * less than 32 MB.
235 *
236 * WARNING: Its not easy to increase this value since the allocators have
237 * to do various tricks to work around compiler limitations in order to
238 * ensure proper constant folding.
239 */
240#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
241 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
242#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
243#ifndef KMALLOC_SHIFT_LOW
244#define KMALLOC_SHIFT_LOW 5
245#endif
246#endif
247
248#ifdef CONFIG_SLUB
249/*
250 * SLUB directly allocates requests fitting in to an order-1 page
251 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
252 */
253#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
254#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
255#ifndef KMALLOC_SHIFT_LOW
256#define KMALLOC_SHIFT_LOW 3
257#endif
258#endif
259
260#ifdef CONFIG_SLOB
261/*
262 * SLOB passes all requests larger than one page to the page allocator.
263 * No kmalloc array is necessary since objects of different sizes can
264 * be allocated from the same page.
265 */
266#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
267#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
268#ifndef KMALLOC_SHIFT_LOW
269#define KMALLOC_SHIFT_LOW 3
270#endif
271#endif
272
273/* Maximum allocatable size */
274#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
275/* Maximum size for which we actually use a slab cache */
276#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
277/* Maximum order allocatable via the slab allocagtor */
278#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
279
280/*
281 * Kmalloc subsystem.
282 */
283#ifndef KMALLOC_MIN_SIZE
284#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
285#endif
286
287/*
288 * This restriction comes from byte sized index implementation.
289 * Page size is normally 2^12 bytes and, in this case, if we want to use
290 * byte sized index which can represent 2^8 entries, the size of the object
291 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
292 * If minimum size of kmalloc is less than 16, we use it as minimum object
293 * size and give up to use byte sized index.
294 */
295#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
296 (KMALLOC_MIN_SIZE) : 16)
297
298/*
299 * Whenever changing this, take care of that kmalloc_type() and
300 * create_kmalloc_caches() still work as intended.
301 */
302enum kmalloc_cache_type {
303 KMALLOC_NORMAL = 0,
304 KMALLOC_RECLAIM,
305#ifdef CONFIG_ZONE_DMA
306 KMALLOC_DMA,
307#endif
308 NR_KMALLOC_TYPES
309};
310
311#ifndef CONFIG_SLOB
312extern struct kmem_cache *
313kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
314
315static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
316{
317#ifdef CONFIG_ZONE_DMA
318 /*
319 * The most common case is KMALLOC_NORMAL, so test for it
320 * with a single branch for both flags.
321 */
322 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
323 return KMALLOC_NORMAL;
324
325 /*
326 * At least one of the flags has to be set. If both are, __GFP_DMA
327 * is more important.
328 */
329 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
330#else
331 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
332#endif
333}
334
335/*
336 * Figure out which kmalloc slab an allocation of a certain size
337 * belongs to.
338 * 0 = zero alloc
339 * 1 = 65 .. 96 bytes
340 * 2 = 129 .. 192 bytes
341 * n = 2^(n-1)+1 .. 2^n
342 */
343static __always_inline unsigned int kmalloc_index(size_t size)
344{
345 if (!size)
346 return 0;
347
348 if (size <= KMALLOC_MIN_SIZE)
349 return KMALLOC_SHIFT_LOW;
350
351 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
352 return 1;
353 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
354 return 2;
355 if (size <= 8) return 3;
356 if (size <= 16) return 4;
357 if (size <= 32) return 5;
358 if (size <= 64) return 6;
359 if (size <= 128) return 7;
360 if (size <= 256) return 8;
361 if (size <= 512) return 9;
362 if (size <= 1024) return 10;
363 if (size <= 2 * 1024) return 11;
364 if (size <= 4 * 1024) return 12;
365 if (size <= 8 * 1024) return 13;
366 if (size <= 16 * 1024) return 14;
367 if (size <= 32 * 1024) return 15;
368 if (size <= 64 * 1024) return 16;
369 if (size <= 128 * 1024) return 17;
370 if (size <= 256 * 1024) return 18;
371 if (size <= 512 * 1024) return 19;
372 if (size <= 1024 * 1024) return 20;
373 if (size <= 2 * 1024 * 1024) return 21;
374 if (size <= 4 * 1024 * 1024) return 22;
375 if (size <= 8 * 1024 * 1024) return 23;
376 if (size <= 16 * 1024 * 1024) return 24;
377 if (size <= 32 * 1024 * 1024) return 25;
378 if (size <= 64 * 1024 * 1024) return 26;
379 BUG();
380
381 /* Will never be reached. Needed because the compiler may complain */
382 return -1;
383}
384#endif /* !CONFIG_SLOB */
385
386void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
387void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
388void kmem_cache_free(struct kmem_cache *, void *);
389
390/*
391 * Bulk allocation and freeing operations. These are accelerated in an
392 * allocator specific way to avoid taking locks repeatedly or building
393 * metadata structures unnecessarily.
394 *
395 * Note that interrupts must be enabled when calling these functions.
396 */
397void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
398int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
399
400/*
401 * Caller must not use kfree_bulk() on memory not originally allocated
402 * by kmalloc(), because the SLOB allocator cannot handle this.
403 */
404static __always_inline void kfree_bulk(size_t size, void **p)
405{
406 kmem_cache_free_bulk(NULL, size, p);
407}
408
409#ifdef CONFIG_NUMA
410void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
411void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
412#else
413static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
414{
415 return __kmalloc(size, flags);
416}
417
418static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
419{
420 return kmem_cache_alloc(s, flags);
421}
422#endif
423
424#ifdef CONFIG_TRACING
425extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
426
427#ifdef CONFIG_NUMA
428extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
429 gfp_t gfpflags,
430 int node, size_t size) __assume_slab_alignment __malloc;
431#else
432static __always_inline void *
433kmem_cache_alloc_node_trace(struct kmem_cache *s,
434 gfp_t gfpflags,
435 int node, size_t size)
436{
437 return kmem_cache_alloc_trace(s, gfpflags, size);
438}
439#endif /* CONFIG_NUMA */
440
441#else /* CONFIG_TRACING */
442static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
443 gfp_t flags, size_t size)
444{
445 void *ret = kmem_cache_alloc(s, flags);
446
447 ret = kasan_kmalloc(s, ret, size, flags);
448 return ret;
449}
450
451static __always_inline void *
452kmem_cache_alloc_node_trace(struct kmem_cache *s,
453 gfp_t gfpflags,
454 int node, size_t size)
455{
456 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
457
458 ret = kasan_kmalloc(s, ret, size, gfpflags);
459 return ret;
460}
461#endif /* CONFIG_TRACING */
462
463extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
464
465#ifdef CONFIG_TRACING
466extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
467#else
468static __always_inline void *
469kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
470{
471 return kmalloc_order(size, flags, order);
472}
473#endif
474
475static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
476{
477 unsigned int order = get_order(size);
478 return kmalloc_order_trace(size, flags, order);
479}
480
481/**
482 * kmalloc - allocate memory
483 * @size: how many bytes of memory are required.
484 * @flags: the type of memory to allocate.
485 *
486 * kmalloc is the normal method of allocating memory
487 * for objects smaller than page size in the kernel.
488 *
489 * The @flags argument may be one of the GFP flags defined at
490 * include/linux/gfp.h and described at
491 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
492 *
493 * The recommended usage of the @flags is described at
494 * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>`
495 *
496 * Below is a brief outline of the most useful GFP flags
497 *
498 * %GFP_KERNEL
499 * Allocate normal kernel ram. May sleep.
500 *
501 * %GFP_NOWAIT
502 * Allocation will not sleep.
503 *
504 * %GFP_ATOMIC
505 * Allocation will not sleep. May use emergency pools.
506 *
507 * %GFP_HIGHUSER
508 * Allocate memory from high memory on behalf of user.
509 *
510 * Also it is possible to set different flags by OR'ing
511 * in one or more of the following additional @flags:
512 *
513 * %__GFP_HIGH
514 * This allocation has high priority and may use emergency pools.
515 *
516 * %__GFP_NOFAIL
517 * Indicate that this allocation is in no way allowed to fail
518 * (think twice before using).
519 *
520 * %__GFP_NORETRY
521 * If memory is not immediately available,
522 * then give up at once.
523 *
524 * %__GFP_NOWARN
525 * If allocation fails, don't issue any warnings.
526 *
527 * %__GFP_RETRY_MAYFAIL
528 * Try really hard to succeed the allocation but fail
529 * eventually.
530 */
531static __always_inline void *kmalloc(size_t size, gfp_t flags)
532{
533 if (__builtin_constant_p(size)) {
534#ifndef CONFIG_SLOB
535 unsigned int index;
536#endif
537 if (size > KMALLOC_MAX_CACHE_SIZE)
538 return kmalloc_large(size, flags);
539#ifndef CONFIG_SLOB
540 index = kmalloc_index(size);
541
542 if (!index)
543 return ZERO_SIZE_PTR;
544
545 return kmem_cache_alloc_trace(
546 kmalloc_caches[kmalloc_type(flags)][index],
547 flags, size);
548#endif
549 }
550 return __kmalloc(size, flags);
551}
552
553/*
554 * Determine size used for the nth kmalloc cache.
555 * return size or 0 if a kmalloc cache for that
556 * size does not exist
557 */
558static __always_inline unsigned int kmalloc_size(unsigned int n)
559{
560#ifndef CONFIG_SLOB
561 if (n > 2)
562 return 1U << n;
563
564 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
565 return 96;
566
567 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
568 return 192;
569#endif
570 return 0;
571}
572
573static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
574{
575#ifndef CONFIG_SLOB
576 if (__builtin_constant_p(size) &&
577 size <= KMALLOC_MAX_CACHE_SIZE) {
578 unsigned int i = kmalloc_index(size);
579
580 if (!i)
581 return ZERO_SIZE_PTR;
582
583 return kmem_cache_alloc_node_trace(
584 kmalloc_caches[kmalloc_type(flags)][i],
585 flags, node, size);
586 }
587#endif
588 return __kmalloc_node(size, flags, node);
589}
590
591struct memcg_cache_array {
592 struct rcu_head rcu;
593 struct kmem_cache *entries[0];
594};
595
596/*
597 * This is the main placeholder for memcg-related information in kmem caches.
598 * Both the root cache and the child caches will have it. For the root cache,
599 * this will hold a dynamically allocated array large enough to hold
600 * information about the currently limited memcgs in the system. To allow the
601 * array to be accessed without taking any locks, on relocation we free the old
602 * version only after a grace period.
603 *
604 * Root and child caches hold different metadata.
605 *
606 * @root_cache: Common to root and child caches. NULL for root, pointer to
607 * the root cache for children.
608 *
609 * The following fields are specific to root caches.
610 *
611 * @memcg_caches: kmemcg ID indexed table of child caches. This table is
612 * used to index child cachces during allocation and cleared
613 * early during shutdown.
614 *
615 * @root_caches_node: List node for slab_root_caches list.
616 *
617 * @children: List of all child caches. While the child caches are also
618 * reachable through @memcg_caches, a child cache remains on
619 * this list until it is actually destroyed.
620 *
621 * The following fields are specific to child caches.
622 *
623 * @memcg: Pointer to the memcg this cache belongs to.
624 *
625 * @children_node: List node for @root_cache->children list.
626 *
627 * @kmem_caches_node: List node for @memcg->kmem_caches list.
628 */
629struct memcg_cache_params {
630 struct kmem_cache *root_cache;
631 union {
632 struct {
633 struct memcg_cache_array __rcu *memcg_caches;
634 struct list_head __root_caches_node;
635 struct list_head children;
636 bool dying;
637 };
638 struct {
639 struct mem_cgroup *memcg;
640 struct list_head children_node;
641 struct list_head kmem_caches_node;
642
643 void (*deact_fn)(struct kmem_cache *);
644 union {
645 struct rcu_head deact_rcu_head;
646 struct work_struct deact_work;
647 };
648 };
649 };
650};
651
652int memcg_update_all_caches(int num_memcgs);
653
654/**
655 * kmalloc_array - allocate memory for an array.
656 * @n: number of elements.
657 * @size: element size.
658 * @flags: the type of memory to allocate (see kmalloc).
659 */
660static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
661{
662 size_t bytes;
663
664 if (unlikely(check_mul_overflow(n, size, &bytes)))
665 return NULL;
666 if (__builtin_constant_p(n) && __builtin_constant_p(size))
667 return kmalloc(bytes, flags);
668 return __kmalloc(bytes, flags);
669}
670
671/**
672 * kcalloc - allocate memory for an array. The memory is set to zero.
673 * @n: number of elements.
674 * @size: element size.
675 * @flags: the type of memory to allocate (see kmalloc).
676 */
677static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
678{
679 return kmalloc_array(n, size, flags | __GFP_ZERO);
680}
681
682/*
683 * kmalloc_track_caller is a special version of kmalloc that records the
684 * calling function of the routine calling it for slab leak tracking instead
685 * of just the calling function (confusing, eh?).
686 * It's useful when the call to kmalloc comes from a widely-used standard
687 * allocator where we care about the real place the memory allocation
688 * request comes from.
689 */
690extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
691#define kmalloc_track_caller(size, flags) \
692 __kmalloc_track_caller(size, flags, _RET_IP_)
693
694static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
695 int node)
696{
697 size_t bytes;
698
699 if (unlikely(check_mul_overflow(n, size, &bytes)))
700 return NULL;
701 if (__builtin_constant_p(n) && __builtin_constant_p(size))
702 return kmalloc_node(bytes, flags, node);
703 return __kmalloc_node(bytes, flags, node);
704}
705
706static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
707{
708 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
709}
710
711
712#ifdef CONFIG_NUMA
713extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
714#define kmalloc_node_track_caller(size, flags, node) \
715 __kmalloc_node_track_caller(size, flags, node, \
716 _RET_IP_)
717
718#else /* CONFIG_NUMA */
719
720#define kmalloc_node_track_caller(size, flags, node) \
721 kmalloc_track_caller(size, flags)
722
723#endif /* CONFIG_NUMA */
724
725/*
726 * Shortcuts
727 */
728static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
729{
730 return kmem_cache_alloc(k, flags | __GFP_ZERO);
731}
732
733/**
734 * kzalloc - allocate memory. The memory is set to zero.
735 * @size: how many bytes of memory are required.
736 * @flags: the type of memory to allocate (see kmalloc).
737 */
738static inline void *kzalloc(size_t size, gfp_t flags)
739{
740 return kmalloc(size, flags | __GFP_ZERO);
741}
742
743/**
744 * kzalloc_node - allocate zeroed memory from a particular memory node.
745 * @size: how many bytes of memory are required.
746 * @flags: the type of memory to allocate (see kmalloc).
747 * @node: memory node from which to allocate
748 */
749static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
750{
751 return kmalloc_node(size, flags | __GFP_ZERO, node);
752}
753
754unsigned int kmem_cache_size(struct kmem_cache *s);
755void __init kmem_cache_init_late(void);
756
757#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
758int slab_prepare_cpu(unsigned int cpu);
759int slab_dead_cpu(unsigned int cpu);
760#else
761#define slab_prepare_cpu NULL
762#define slab_dead_cpu NULL
763#endif
764
765#endif /* _LINUX_SLAB_H */
766