1 | /* |
2 | * mm_init.c - Memory initialisation verification and debugging |
3 | * |
4 | * Copyright 2008 IBM Corporation, 2008 |
5 | * Author Mel Gorman <mel@csn.ul.ie> |
6 | * |
7 | */ |
8 | #include <linux/kernel.h> |
9 | #include <linux/init.h> |
10 | #include <linux/kobject.h> |
11 | #include <linux/export.h> |
12 | #include <linux/memory.h> |
13 | #include <linux/notifier.h> |
14 | #include <linux/sched.h> |
15 | #include "internal.h" |
16 | |
17 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
18 | int __meminitdata mminit_loglevel; |
19 | |
20 | #ifndef SECTIONS_SHIFT |
21 | #define SECTIONS_SHIFT 0 |
22 | #endif |
23 | |
24 | /* The zonelists are simply reported, validation is manual. */ |
25 | void __init mminit_verify_zonelist(void) |
26 | { |
27 | int nid; |
28 | |
29 | if (mminit_loglevel < MMINIT_VERIFY) |
30 | return; |
31 | |
32 | for_each_online_node(nid) { |
33 | pg_data_t *pgdat = NODE_DATA(nid); |
34 | struct zone *zone; |
35 | struct zoneref *z; |
36 | struct zonelist *zonelist; |
37 | int i, listid, zoneid; |
38 | |
39 | BUG_ON(MAX_ZONELISTS > 2); |
40 | for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { |
41 | |
42 | /* Identify the zone and nodelist */ |
43 | zoneid = i % MAX_NR_ZONES; |
44 | listid = i / MAX_NR_ZONES; |
45 | zonelist = &pgdat->node_zonelists[listid]; |
46 | zone = &pgdat->node_zones[zoneid]; |
47 | if (!populated_zone(zone)) |
48 | continue; |
49 | |
50 | /* Print information about the zonelist */ |
51 | printk(KERN_DEBUG "mminit::zonelist %s %d:%s = " , |
52 | listid > 0 ? "thisnode" : "general" , nid, |
53 | zone->name); |
54 | |
55 | /* Iterate the zonelist */ |
56 | for_each_zone_zonelist(zone, z, zonelist, zoneid) |
57 | pr_cont("%d:%s " , zone_to_nid(zone), zone->name); |
58 | pr_cont("\n" ); |
59 | } |
60 | } |
61 | } |
62 | |
63 | void __init mminit_verify_pageflags_layout(void) |
64 | { |
65 | int shift, width; |
66 | unsigned long or_mask, add_mask; |
67 | |
68 | shift = 8 * sizeof(unsigned long); |
69 | width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT; |
70 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths" , |
71 | "Section %d Node %d Zone %d Lastcpupid %d Flags %d\n" , |
72 | SECTIONS_WIDTH, |
73 | NODES_WIDTH, |
74 | ZONES_WIDTH, |
75 | LAST_CPUPID_WIDTH, |
76 | NR_PAGEFLAGS); |
77 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts" , |
78 | "Section %d Node %d Zone %d Lastcpupid %d\n" , |
79 | SECTIONS_SHIFT, |
80 | NODES_SHIFT, |
81 | ZONES_SHIFT, |
82 | LAST_CPUPID_SHIFT); |
83 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts" , |
84 | "Section %lu Node %lu Zone %lu Lastcpupid %lu\n" , |
85 | (unsigned long)SECTIONS_PGSHIFT, |
86 | (unsigned long)NODES_PGSHIFT, |
87 | (unsigned long)ZONES_PGSHIFT, |
88 | (unsigned long)LAST_CPUPID_PGSHIFT); |
89 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid" , |
90 | "Node/Zone ID: %lu -> %lu\n" , |
91 | (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), |
92 | (unsigned long)ZONEID_PGOFF); |
93 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage" , |
94 | "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n" , |
95 | shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); |
96 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
97 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags" , |
98 | "Node not in page flags" ); |
99 | #endif |
100 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
101 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags" , |
102 | "Last cpupid not in page flags" ); |
103 | #endif |
104 | |
105 | if (SECTIONS_WIDTH) { |
106 | shift -= SECTIONS_WIDTH; |
107 | BUG_ON(shift != SECTIONS_PGSHIFT); |
108 | } |
109 | if (NODES_WIDTH) { |
110 | shift -= NODES_WIDTH; |
111 | BUG_ON(shift != NODES_PGSHIFT); |
112 | } |
113 | if (ZONES_WIDTH) { |
114 | shift -= ZONES_WIDTH; |
115 | BUG_ON(shift != ZONES_PGSHIFT); |
116 | } |
117 | |
118 | /* Check for bitmask overlaps */ |
119 | or_mask = (ZONES_MASK << ZONES_PGSHIFT) | |
120 | (NODES_MASK << NODES_PGSHIFT) | |
121 | (SECTIONS_MASK << SECTIONS_PGSHIFT); |
122 | add_mask = (ZONES_MASK << ZONES_PGSHIFT) + |
123 | (NODES_MASK << NODES_PGSHIFT) + |
124 | (SECTIONS_MASK << SECTIONS_PGSHIFT); |
125 | BUG_ON(or_mask != add_mask); |
126 | } |
127 | |
128 | static __init int set_mminit_loglevel(char *str) |
129 | { |
130 | get_option(&str, &mminit_loglevel); |
131 | return 0; |
132 | } |
133 | early_param("mminit_loglevel" , set_mminit_loglevel); |
134 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
135 | |
136 | struct kobject *mm_kobj; |
137 | EXPORT_SYMBOL_GPL(mm_kobj); |
138 | |
139 | #ifdef CONFIG_SMP |
140 | s32 vm_committed_as_batch = 32; |
141 | |
142 | static void __meminit mm_compute_batch(void) |
143 | { |
144 | u64 memsized_batch; |
145 | s32 nr = num_present_cpus(); |
146 | s32 batch = max_t(s32, nr*2, 32); |
147 | |
148 | /* batch size set to 0.4% of (total memory/#cpus), or max int32 */ |
149 | memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff); |
150 | |
151 | vm_committed_as_batch = max_t(s32, memsized_batch, batch); |
152 | } |
153 | |
154 | static int __meminit mm_compute_batch_notifier(struct notifier_block *self, |
155 | unsigned long action, void *arg) |
156 | { |
157 | switch (action) { |
158 | case MEM_ONLINE: |
159 | case MEM_OFFLINE: |
160 | mm_compute_batch(); |
161 | default: |
162 | break; |
163 | } |
164 | return NOTIFY_OK; |
165 | } |
166 | |
167 | static struct notifier_block compute_batch_nb __meminitdata = { |
168 | .notifier_call = mm_compute_batch_notifier, |
169 | .priority = IPC_CALLBACK_PRI, /* use lowest priority */ |
170 | }; |
171 | |
172 | static int __init mm_compute_batch_init(void) |
173 | { |
174 | mm_compute_batch(); |
175 | register_hotmemory_notifier(&compute_batch_nb); |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | __initcall(mm_compute_batch_init); |
181 | |
182 | #endif |
183 | |
184 | static int __init mm_sysfs_init(void) |
185 | { |
186 | mm_kobj = kobject_create_and_add("mm" , kernel_kobj); |
187 | if (!mm_kobj) |
188 | return -ENOMEM; |
189 | |
190 | return 0; |
191 | } |
192 | postcore_initcall(mm_sysfs_init); |
193 | |