1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_NODEMASK_H |
3 | #define __LINUX_NODEMASK_H |
4 | |
5 | /* |
6 | * Nodemasks provide a bitmap suitable for representing the |
7 | * set of Node's in a system, one bit position per Node number. |
8 | * |
9 | * See detailed comments in the file linux/bitmap.h describing the |
10 | * data type on which these nodemasks are based. |
11 | * |
12 | * For details of nodemask_parse_user(), see bitmap_parse_user() in |
13 | * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), |
14 | * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in |
15 | * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in |
16 | * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in |
17 | * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in |
18 | * lib/bitmap.c. |
19 | * |
20 | * The available nodemask operations are: |
21 | * |
22 | * void node_set(node, mask) turn on bit 'node' in mask |
23 | * void node_clear(node, mask) turn off bit 'node' in mask |
24 | * void nodes_setall(mask) set all bits |
25 | * void nodes_clear(mask) clear all bits |
26 | * int node_isset(node, mask) true iff bit 'node' set in mask |
27 | * int node_test_and_set(node, mask) test and set bit 'node' in mask |
28 | * |
29 | * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] |
30 | * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] |
31 | * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 |
32 | * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 |
33 | * void nodes_complement(dst, src) dst = ~src |
34 | * |
35 | * int nodes_equal(mask1, mask2) Does mask1 == mask2? |
36 | * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? |
37 | * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? |
38 | * int nodes_empty(mask) Is mask empty (no bits sets)? |
39 | * int nodes_full(mask) Is mask full (all bits sets)? |
40 | * int nodes_weight(mask) Hamming weight - number of set bits |
41 | * |
42 | * void nodes_shift_right(dst, src, n) Shift right |
43 | * void nodes_shift_left(dst, src, n) Shift left |
44 | * |
45 | * int first_node(mask) Number lowest set bit, or MAX_NUMNODES |
46 | * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES |
47 | * int next_node_in(node, mask) Next node past 'node', or wrap to first, |
48 | * or MAX_NUMNODES |
49 | * int first_unset_node(mask) First node not set in mask, or |
50 | * MAX_NUMNODES |
51 | * |
52 | * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set |
53 | * NODE_MASK_ALL Initializer - all bits set |
54 | * NODE_MASK_NONE Initializer - no bits set |
55 | * unsigned long *nodes_addr(mask) Array of unsigned long's in mask |
56 | * |
57 | * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask |
58 | * int nodelist_parse(buf, map) Parse ascii string as nodelist |
59 | * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) |
60 | * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) |
61 | * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap |
62 | * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz |
63 | * |
64 | * for_each_node_mask(node, mask) for-loop node over mask |
65 | * |
66 | * int num_online_nodes() Number of online Nodes |
67 | * int num_possible_nodes() Number of all possible Nodes |
68 | * |
69 | * int node_random(mask) Random node with set bit in mask |
70 | * |
71 | * int node_online(node) Is some node online? |
72 | * int node_possible(node) Is some node possible? |
73 | * |
74 | * node_set_online(node) set bit 'node' in node_online_map |
75 | * node_set_offline(node) clear bit 'node' in node_online_map |
76 | * |
77 | * for_each_node(node) for-loop node over node_possible_map |
78 | * for_each_online_node(node) for-loop node over node_online_map |
79 | * |
80 | * Subtlety: |
81 | * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) |
82 | * to generate slightly worse code. So use a simple one-line #define |
83 | * for node_isset(), instead of wrapping an inline inside a macro, the |
84 | * way we do the other calls. |
85 | * |
86 | * NODEMASK_SCRATCH |
87 | * When doing above logical AND, OR, XOR, Remap operations the callers tend to |
88 | * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, |
89 | * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper |
90 | * for such situations. See below and CPUMASK_ALLOC also. |
91 | */ |
92 | |
93 | #include <linux/kernel.h> |
94 | #include <linux/threads.h> |
95 | #include <linux/bitmap.h> |
96 | #include <linux/numa.h> |
97 | |
98 | typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; |
99 | extern nodemask_t _unused_nodemask_arg_; |
100 | |
101 | /** |
102 | * nodemask_pr_args - printf args to output a nodemask |
103 | * @maskp: nodemask to be printed |
104 | * |
105 | * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. |
106 | */ |
107 | #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ |
108 | __nodemask_pr_bits(maskp) |
109 | static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) |
110 | { |
111 | return m ? MAX_NUMNODES : 0; |
112 | } |
113 | static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) |
114 | { |
115 | return m ? m->bits : NULL; |
116 | } |
117 | |
118 | /* |
119 | * The inline keyword gives the compiler room to decide to inline, or |
120 | * not inline a function as it sees best. However, as these functions |
121 | * are called in both __init and non-__init functions, if they are not |
122 | * inlined we will end up with a section mis-match error (of the type of |
123 | * freeable items not being freed). So we must use __always_inline here |
124 | * to fix the problem. If other functions in the future also end up in |
125 | * this situation they will also need to be annotated as __always_inline |
126 | */ |
127 | #define node_set(node, dst) __node_set((node), &(dst)) |
128 | static __always_inline void __node_set(int node, volatile nodemask_t *dstp) |
129 | { |
130 | set_bit(node, dstp->bits); |
131 | } |
132 | |
133 | #define node_clear(node, dst) __node_clear((node), &(dst)) |
134 | static inline void __node_clear(int node, volatile nodemask_t *dstp) |
135 | { |
136 | clear_bit(node, dstp->bits); |
137 | } |
138 | |
139 | #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) |
140 | static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) |
141 | { |
142 | bitmap_fill(dstp->bits, nbits); |
143 | } |
144 | |
145 | #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) |
146 | static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) |
147 | { |
148 | bitmap_zero(dstp->bits, nbits); |
149 | } |
150 | |
151 | /* No static inline type checking - see Subtlety (1) above. */ |
152 | #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) |
153 | |
154 | #define node_test_and_set(node, nodemask) \ |
155 | __node_test_and_set((node), &(nodemask)) |
156 | static inline int __node_test_and_set(int node, nodemask_t *addr) |
157 | { |
158 | return test_and_set_bit(node, addr->bits); |
159 | } |
160 | |
161 | #define nodes_and(dst, src1, src2) \ |
162 | __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) |
163 | static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, |
164 | const nodemask_t *src2p, unsigned int nbits) |
165 | { |
166 | bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
167 | } |
168 | |
169 | #define nodes_or(dst, src1, src2) \ |
170 | __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) |
171 | static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, |
172 | const nodemask_t *src2p, unsigned int nbits) |
173 | { |
174 | bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); |
175 | } |
176 | |
177 | #define nodes_xor(dst, src1, src2) \ |
178 | __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) |
179 | static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, |
180 | const nodemask_t *src2p, unsigned int nbits) |
181 | { |
182 | bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); |
183 | } |
184 | |
185 | #define nodes_andnot(dst, src1, src2) \ |
186 | __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) |
187 | static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, |
188 | const nodemask_t *src2p, unsigned int nbits) |
189 | { |
190 | bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
191 | } |
192 | |
193 | #define nodes_complement(dst, src) \ |
194 | __nodes_complement(&(dst), &(src), MAX_NUMNODES) |
195 | static inline void __nodes_complement(nodemask_t *dstp, |
196 | const nodemask_t *srcp, unsigned int nbits) |
197 | { |
198 | bitmap_complement(dstp->bits, srcp->bits, nbits); |
199 | } |
200 | |
201 | #define nodes_equal(src1, src2) \ |
202 | __nodes_equal(&(src1), &(src2), MAX_NUMNODES) |
203 | static inline int __nodes_equal(const nodemask_t *src1p, |
204 | const nodemask_t *src2p, unsigned int nbits) |
205 | { |
206 | return bitmap_equal(src1p->bits, src2p->bits, nbits); |
207 | } |
208 | |
209 | #define nodes_intersects(src1, src2) \ |
210 | __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) |
211 | static inline int __nodes_intersects(const nodemask_t *src1p, |
212 | const nodemask_t *src2p, unsigned int nbits) |
213 | { |
214 | return bitmap_intersects(src1p->bits, src2p->bits, nbits); |
215 | } |
216 | |
217 | #define nodes_subset(src1, src2) \ |
218 | __nodes_subset(&(src1), &(src2), MAX_NUMNODES) |
219 | static inline int __nodes_subset(const nodemask_t *src1p, |
220 | const nodemask_t *src2p, unsigned int nbits) |
221 | { |
222 | return bitmap_subset(src1p->bits, src2p->bits, nbits); |
223 | } |
224 | |
225 | #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) |
226 | static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits) |
227 | { |
228 | return bitmap_empty(srcp->bits, nbits); |
229 | } |
230 | |
231 | #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) |
232 | static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits) |
233 | { |
234 | return bitmap_full(srcp->bits, nbits); |
235 | } |
236 | |
237 | #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) |
238 | static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) |
239 | { |
240 | return bitmap_weight(srcp->bits, nbits); |
241 | } |
242 | |
243 | #define nodes_shift_right(dst, src, n) \ |
244 | __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES) |
245 | static inline void __nodes_shift_right(nodemask_t *dstp, |
246 | const nodemask_t *srcp, int n, int nbits) |
247 | { |
248 | bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); |
249 | } |
250 | |
251 | #define nodes_shift_left(dst, src, n) \ |
252 | __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES) |
253 | static inline void __nodes_shift_left(nodemask_t *dstp, |
254 | const nodemask_t *srcp, int n, int nbits) |
255 | { |
256 | bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
257 | } |
258 | |
259 | /* FIXME: better would be to fix all architectures to never return |
260 | > MAX_NUMNODES, then the silly min_ts could be dropped. */ |
261 | |
262 | #define first_node(src) __first_node(&(src)) |
263 | static inline int __first_node(const nodemask_t *srcp) |
264 | { |
265 | return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); |
266 | } |
267 | |
268 | #define next_node(n, src) __next_node((n), &(src)) |
269 | static inline int __next_node(int n, const nodemask_t *srcp) |
270 | { |
271 | return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); |
272 | } |
273 | |
274 | /* |
275 | * Find the next present node in src, starting after node n, wrapping around to |
276 | * the first node in src if needed. Returns MAX_NUMNODES if src is empty. |
277 | */ |
278 | #define next_node_in(n, src) __next_node_in((n), &(src)) |
279 | int __next_node_in(int node, const nodemask_t *srcp); |
280 | |
281 | static inline void init_nodemask_of_node(nodemask_t *mask, int node) |
282 | { |
283 | nodes_clear(*mask); |
284 | node_set(node, *mask); |
285 | } |
286 | |
287 | #define nodemask_of_node(node) \ |
288 | ({ \ |
289 | typeof(_unused_nodemask_arg_) m; \ |
290 | if (sizeof(m) == sizeof(unsigned long)) { \ |
291 | m.bits[0] = 1UL << (node); \ |
292 | } else { \ |
293 | init_nodemask_of_node(&m, (node)); \ |
294 | } \ |
295 | m; \ |
296 | }) |
297 | |
298 | #define first_unset_node(mask) __first_unset_node(&(mask)) |
299 | static inline int __first_unset_node(const nodemask_t *maskp) |
300 | { |
301 | return min_t(int,MAX_NUMNODES, |
302 | find_first_zero_bit(maskp->bits, MAX_NUMNODES)); |
303 | } |
304 | |
305 | #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) |
306 | |
307 | #if MAX_NUMNODES <= BITS_PER_LONG |
308 | |
309 | #define NODE_MASK_ALL \ |
310 | ((nodemask_t) { { \ |
311 | [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ |
312 | } }) |
313 | |
314 | #else |
315 | |
316 | #define NODE_MASK_ALL \ |
317 | ((nodemask_t) { { \ |
318 | [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ |
319 | [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ |
320 | } }) |
321 | |
322 | #endif |
323 | |
324 | #define NODE_MASK_NONE \ |
325 | ((nodemask_t) { { \ |
326 | [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ |
327 | } }) |
328 | |
329 | #define nodes_addr(src) ((src).bits) |
330 | |
331 | #define nodemask_parse_user(ubuf, ulen, dst) \ |
332 | __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) |
333 | static inline int __nodemask_parse_user(const char __user *buf, int len, |
334 | nodemask_t *dstp, int nbits) |
335 | { |
336 | return bitmap_parse_user(buf, len, dstp->bits, nbits); |
337 | } |
338 | |
339 | #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) |
340 | static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) |
341 | { |
342 | return bitmap_parselist(buf, dstp->bits, nbits); |
343 | } |
344 | |
345 | #define node_remap(oldbit, old, new) \ |
346 | __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) |
347 | static inline int __node_remap(int oldbit, |
348 | const nodemask_t *oldp, const nodemask_t *newp, int nbits) |
349 | { |
350 | return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); |
351 | } |
352 | |
353 | #define nodes_remap(dst, src, old, new) \ |
354 | __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) |
355 | static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, |
356 | const nodemask_t *oldp, const nodemask_t *newp, int nbits) |
357 | { |
358 | bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); |
359 | } |
360 | |
361 | #define nodes_onto(dst, orig, relmap) \ |
362 | __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) |
363 | static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, |
364 | const nodemask_t *relmapp, int nbits) |
365 | { |
366 | bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); |
367 | } |
368 | |
369 | #define nodes_fold(dst, orig, sz) \ |
370 | __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) |
371 | static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, |
372 | int sz, int nbits) |
373 | { |
374 | bitmap_fold(dstp->bits, origp->bits, sz, nbits); |
375 | } |
376 | |
377 | #if MAX_NUMNODES > 1 |
378 | #define for_each_node_mask(node, mask) \ |
379 | for ((node) = first_node(mask); \ |
380 | (node) < MAX_NUMNODES; \ |
381 | (node) = next_node((node), (mask))) |
382 | #else /* MAX_NUMNODES == 1 */ |
383 | #define for_each_node_mask(node, mask) \ |
384 | if (!nodes_empty(mask)) \ |
385 | for ((node) = 0; (node) < 1; (node)++) |
386 | #endif /* MAX_NUMNODES */ |
387 | |
388 | /* |
389 | * Bitmasks that are kept for all the nodes. |
390 | */ |
391 | enum node_states { |
392 | N_POSSIBLE, /* The node could become online at some point */ |
393 | N_ONLINE, /* The node is online */ |
394 | N_NORMAL_MEMORY, /* The node has regular memory */ |
395 | #ifdef CONFIG_HIGHMEM |
396 | N_HIGH_MEMORY, /* The node has regular or high memory */ |
397 | #else |
398 | N_HIGH_MEMORY = N_NORMAL_MEMORY, |
399 | #endif |
400 | N_MEMORY, /* The node has memory(regular, high, movable) */ |
401 | N_CPU, /* The node has one or more cpus */ |
402 | NR_NODE_STATES |
403 | }; |
404 | |
405 | /* |
406 | * The following particular system nodemasks and operations |
407 | * on them manage all possible and online nodes. |
408 | */ |
409 | |
410 | extern nodemask_t node_states[NR_NODE_STATES]; |
411 | |
412 | #if MAX_NUMNODES > 1 |
413 | static inline int node_state(int node, enum node_states state) |
414 | { |
415 | return node_isset(node, node_states[state]); |
416 | } |
417 | |
418 | static inline void node_set_state(int node, enum node_states state) |
419 | { |
420 | __node_set(node, &node_states[state]); |
421 | } |
422 | |
423 | static inline void node_clear_state(int node, enum node_states state) |
424 | { |
425 | __node_clear(node, &node_states[state]); |
426 | } |
427 | |
428 | static inline int num_node_state(enum node_states state) |
429 | { |
430 | return nodes_weight(node_states[state]); |
431 | } |
432 | |
433 | #define for_each_node_state(__node, __state) \ |
434 | for_each_node_mask((__node), node_states[__state]) |
435 | |
436 | #define first_online_node first_node(node_states[N_ONLINE]) |
437 | #define first_memory_node first_node(node_states[N_MEMORY]) |
438 | static inline int next_online_node(int nid) |
439 | { |
440 | return next_node(nid, node_states[N_ONLINE]); |
441 | } |
442 | static inline int next_memory_node(int nid) |
443 | { |
444 | return next_node(nid, node_states[N_MEMORY]); |
445 | } |
446 | |
447 | extern unsigned int nr_node_ids; |
448 | extern unsigned int nr_online_nodes; |
449 | |
450 | static inline void node_set_online(int nid) |
451 | { |
452 | node_set_state(nid, N_ONLINE); |
453 | nr_online_nodes = num_node_state(N_ONLINE); |
454 | } |
455 | |
456 | static inline void node_set_offline(int nid) |
457 | { |
458 | node_clear_state(nid, N_ONLINE); |
459 | nr_online_nodes = num_node_state(N_ONLINE); |
460 | } |
461 | |
462 | #else |
463 | |
464 | static inline int node_state(int node, enum node_states state) |
465 | { |
466 | return node == 0; |
467 | } |
468 | |
469 | static inline void node_set_state(int node, enum node_states state) |
470 | { |
471 | } |
472 | |
473 | static inline void node_clear_state(int node, enum node_states state) |
474 | { |
475 | } |
476 | |
477 | static inline int num_node_state(enum node_states state) |
478 | { |
479 | return 1; |
480 | } |
481 | |
482 | #define for_each_node_state(node, __state) \ |
483 | for ( (node) = 0; (node) == 0; (node) = 1) |
484 | |
485 | #define first_online_node 0 |
486 | #define first_memory_node 0 |
487 | #define next_online_node(nid) (MAX_NUMNODES) |
488 | #define nr_node_ids 1U |
489 | #define nr_online_nodes 1U |
490 | |
491 | #define node_set_online(node) node_set_state((node), N_ONLINE) |
492 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) |
493 | |
494 | #endif |
495 | |
496 | #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) |
497 | extern int node_random(const nodemask_t *maskp); |
498 | #else |
499 | static inline int node_random(const nodemask_t *mask) |
500 | { |
501 | return 0; |
502 | } |
503 | #endif |
504 | |
505 | #define node_online_map node_states[N_ONLINE] |
506 | #define node_possible_map node_states[N_POSSIBLE] |
507 | |
508 | #define num_online_nodes() num_node_state(N_ONLINE) |
509 | #define num_possible_nodes() num_node_state(N_POSSIBLE) |
510 | #define node_online(node) node_state((node), N_ONLINE) |
511 | #define node_possible(node) node_state((node), N_POSSIBLE) |
512 | |
513 | #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) |
514 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) |
515 | |
516 | /* |
517 | * For nodemask scrach area. |
518 | * NODEMASK_ALLOC(type, name) allocates an object with a specified type and |
519 | * name. |
520 | */ |
521 | #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ |
522 | #define NODEMASK_ALLOC(type, name, gfp_flags) \ |
523 | type *name = kmalloc(sizeof(*name), gfp_flags) |
524 | #define NODEMASK_FREE(m) kfree(m) |
525 | #else |
526 | #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name |
527 | #define NODEMASK_FREE(m) do {} while (0) |
528 | #endif |
529 | |
530 | /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ |
531 | struct nodemask_scratch { |
532 | nodemask_t mask1; |
533 | nodemask_t mask2; |
534 | }; |
535 | |
536 | #define NODEMASK_SCRATCH(x) \ |
537 | NODEMASK_ALLOC(struct nodemask_scratch, x, \ |
538 | GFP_KERNEL | __GFP_NORETRY) |
539 | #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) |
540 | |
541 | |
542 | #endif /* __LINUX_NODEMASK_H */ |
543 | |