1 | /* |
2 | * MTRR (Memory Type Range Register) cleanup |
3 | * |
4 | * Copyright (C) 2009 Yinghai Lu |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Library General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Library General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Library General Public |
17 | * License along with this library; if not, write to the Free |
18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | */ |
20 | #include <linux/init.h> |
21 | #include <linux/pci.h> |
22 | #include <linux/smp.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/mutex.h> |
25 | #include <linux/uaccess.h> |
26 | #include <linux/kvm_para.h> |
27 | #include <linux/range.h> |
28 | |
29 | #include <asm/processor.h> |
30 | #include <asm/e820/api.h> |
31 | #include <asm/mtrr.h> |
32 | #include <asm/msr.h> |
33 | |
34 | #include "mtrr.h" |
35 | |
36 | struct var_mtrr_range_state { |
37 | unsigned long base_pfn; |
38 | unsigned long size_pfn; |
39 | mtrr_type type; |
40 | }; |
41 | |
42 | struct var_mtrr_state { |
43 | unsigned long range_startk; |
44 | unsigned long range_sizek; |
45 | unsigned long chunk_sizek; |
46 | unsigned long gran_sizek; |
47 | unsigned int reg; |
48 | }; |
49 | |
50 | /* Should be related to MTRR_VAR_RANGES nums */ |
51 | #define RANGE_NUM 256 |
52 | |
53 | static struct range __initdata range[RANGE_NUM]; |
54 | static int __initdata nr_range; |
55 | |
56 | static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; |
57 | |
58 | #define BIOS_BUG_MSG \ |
59 | "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n" |
60 | |
61 | static int __init |
62 | x86_get_mtrr_mem_range(struct range *range, int nr_range, |
63 | unsigned long , |
64 | unsigned long ) |
65 | { |
66 | unsigned long base, size; |
67 | mtrr_type type; |
68 | int i; |
69 | |
70 | for (i = 0; i < num_var_ranges; i++) { |
71 | type = range_state[i].type; |
72 | if (type != MTRR_TYPE_WRBACK) |
73 | continue; |
74 | base = range_state[i].base_pfn; |
75 | size = range_state[i].size_pfn; |
76 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, |
77 | start: base, end: base + size); |
78 | } |
79 | |
80 | Dprintk("After WB checking\n" ); |
81 | for (i = 0; i < nr_range; i++) |
82 | Dprintk("MTRR MAP PFN: %016llx - %016llx\n" , |
83 | range[i].start, range[i].end); |
84 | |
85 | /* Take out UC ranges: */ |
86 | for (i = 0; i < num_var_ranges; i++) { |
87 | type = range_state[i].type; |
88 | if (type != MTRR_TYPE_UNCACHABLE && |
89 | type != MTRR_TYPE_WRPROT) |
90 | continue; |
91 | size = range_state[i].size_pfn; |
92 | if (!size) |
93 | continue; |
94 | base = range_state[i].base_pfn; |
95 | if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && |
96 | (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && |
97 | (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { |
98 | /* Var MTRR contains UC entry below 1M? Skip it: */ |
99 | pr_warn(BIOS_BUG_MSG, i); |
100 | if (base + size <= (1<<(20-PAGE_SHIFT))) |
101 | continue; |
102 | size -= (1<<(20-PAGE_SHIFT)) - base; |
103 | base = 1<<(20-PAGE_SHIFT); |
104 | } |
105 | subtract_range(range, RANGE_NUM, start: base, end: base + size); |
106 | } |
107 | if (extra_remove_size) |
108 | subtract_range(range, RANGE_NUM, start: extra_remove_base, |
109 | end: extra_remove_base + extra_remove_size); |
110 | |
111 | Dprintk("After UC checking\n" ); |
112 | for (i = 0; i < RANGE_NUM; i++) { |
113 | if (!range[i].end) |
114 | continue; |
115 | |
116 | Dprintk("MTRR MAP PFN: %016llx - %016llx\n" , |
117 | range[i].start, range[i].end); |
118 | } |
119 | |
120 | /* sort the ranges */ |
121 | nr_range = clean_sort_range(range, RANGE_NUM); |
122 | |
123 | Dprintk("After sorting\n" ); |
124 | for (i = 0; i < nr_range; i++) |
125 | Dprintk("MTRR MAP PFN: %016llx - %016llx\n" , |
126 | range[i].start, range[i].end); |
127 | |
128 | return nr_range; |
129 | } |
130 | |
131 | #ifdef CONFIG_MTRR_SANITIZER |
132 | |
133 | static unsigned long __init sum_ranges(struct range *range, int nr_range) |
134 | { |
135 | unsigned long sum = 0; |
136 | int i; |
137 | |
138 | for (i = 0; i < nr_range; i++) |
139 | sum += range[i].end - range[i].start; |
140 | |
141 | return sum; |
142 | } |
143 | |
144 | static int enable_mtrr_cleanup __initdata = |
145 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT; |
146 | |
147 | static int __init disable_mtrr_cleanup_setup(char *str) |
148 | { |
149 | enable_mtrr_cleanup = 0; |
150 | return 0; |
151 | } |
152 | early_param("disable_mtrr_cleanup" , disable_mtrr_cleanup_setup); |
153 | |
154 | static int __init enable_mtrr_cleanup_setup(char *str) |
155 | { |
156 | enable_mtrr_cleanup = 1; |
157 | return 0; |
158 | } |
159 | early_param("enable_mtrr_cleanup" , enable_mtrr_cleanup_setup); |
160 | |
161 | static void __init |
162 | set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
163 | unsigned char type) |
164 | { |
165 | u32 base_lo, base_hi, mask_lo, mask_hi; |
166 | u64 base, mask; |
167 | |
168 | if (!sizek) { |
169 | fill_mtrr_var_range(index: reg, base_lo: 0, base_hi: 0, mask_lo: 0, mask_hi: 0); |
170 | return; |
171 | } |
172 | |
173 | mask = (1ULL << boot_cpu_data.x86_phys_bits) - 1; |
174 | mask &= ~((((u64)sizek) << 10) - 1); |
175 | |
176 | base = ((u64)basek) << 10; |
177 | |
178 | base |= type; |
179 | mask |= 0x800; |
180 | |
181 | base_lo = base & ((1ULL<<32) - 1); |
182 | base_hi = base >> 32; |
183 | |
184 | mask_lo = mask & ((1ULL<<32) - 1); |
185 | mask_hi = mask >> 32; |
186 | |
187 | fill_mtrr_var_range(index: reg, base_lo, base_hi, mask_lo, mask_hi); |
188 | } |
189 | |
190 | static void __init |
191 | save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, |
192 | unsigned char type) |
193 | { |
194 | range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); |
195 | range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); |
196 | range_state[reg].type = type; |
197 | } |
198 | |
199 | static void __init set_var_mtrr_all(void) |
200 | { |
201 | unsigned long basek, sizek; |
202 | unsigned char type; |
203 | unsigned int reg; |
204 | |
205 | for (reg = 0; reg < num_var_ranges; reg++) { |
206 | basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10); |
207 | sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10); |
208 | type = range_state[reg].type; |
209 | |
210 | set_var_mtrr(reg, basek, sizek, type); |
211 | } |
212 | } |
213 | |
214 | static unsigned long to_size_factor(unsigned long sizek, char *factorp) |
215 | { |
216 | unsigned long base = sizek; |
217 | char factor; |
218 | |
219 | if (base & ((1<<10) - 1)) { |
220 | /* Not MB-aligned: */ |
221 | factor = 'K'; |
222 | } else if (base & ((1<<20) - 1)) { |
223 | factor = 'M'; |
224 | base >>= 10; |
225 | } else { |
226 | factor = 'G'; |
227 | base >>= 20; |
228 | } |
229 | |
230 | *factorp = factor; |
231 | |
232 | return base; |
233 | } |
234 | |
235 | static unsigned int __init |
236 | range_to_mtrr(unsigned int reg, unsigned long range_startk, |
237 | unsigned long range_sizek, unsigned char type) |
238 | { |
239 | if (!range_sizek || (reg >= num_var_ranges)) |
240 | return reg; |
241 | |
242 | while (range_sizek) { |
243 | unsigned long max_align, align; |
244 | unsigned long sizek; |
245 | |
246 | /* Compute the maximum size with which we can make a range: */ |
247 | if (range_startk) |
248 | max_align = __ffs(range_startk); |
249 | else |
250 | max_align = BITS_PER_LONG - 1; |
251 | |
252 | align = __fls(word: range_sizek); |
253 | if (align > max_align) |
254 | align = max_align; |
255 | |
256 | sizek = 1UL << align; |
257 | if (mtrr_debug) { |
258 | char start_factor = 'K', size_factor = 'K'; |
259 | unsigned long start_base, size_base; |
260 | |
261 | start_base = to_size_factor(sizek: range_startk, factorp: &start_factor); |
262 | size_base = to_size_factor(sizek, factorp: &size_factor); |
263 | |
264 | Dprintk("Setting variable MTRR %d, " |
265 | "base: %ld%cB, range: %ld%cB, type %s\n" , |
266 | reg, start_base, start_factor, |
267 | size_base, size_factor, |
268 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : |
269 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other" ) |
270 | ); |
271 | } |
272 | save_var_mtrr(reg: reg++, basek: range_startk, sizek, type); |
273 | range_startk += sizek; |
274 | range_sizek -= sizek; |
275 | if (reg >= num_var_ranges) |
276 | break; |
277 | } |
278 | return reg; |
279 | } |
280 | |
281 | static unsigned __init |
282 | range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, |
283 | unsigned long sizek) |
284 | { |
285 | unsigned long hole_basek, hole_sizek; |
286 | unsigned long second_sizek; |
287 | unsigned long range0_basek, range0_sizek; |
288 | unsigned long range_basek, range_sizek; |
289 | unsigned long chunk_sizek; |
290 | unsigned long gran_sizek; |
291 | |
292 | hole_basek = 0; |
293 | hole_sizek = 0; |
294 | second_sizek = 0; |
295 | chunk_sizek = state->chunk_sizek; |
296 | gran_sizek = state->gran_sizek; |
297 | |
298 | /* Align with gran size, prevent small block used up MTRRs: */ |
299 | range_basek = ALIGN(state->range_startk, gran_sizek); |
300 | if ((range_basek > basek) && basek) |
301 | return second_sizek; |
302 | |
303 | state->range_sizek -= (range_basek - state->range_startk); |
304 | range_sizek = ALIGN(state->range_sizek, gran_sizek); |
305 | |
306 | while (range_sizek > state->range_sizek) { |
307 | range_sizek -= gran_sizek; |
308 | if (!range_sizek) |
309 | return 0; |
310 | } |
311 | state->range_sizek = range_sizek; |
312 | |
313 | /* Try to append some small hole: */ |
314 | range0_basek = state->range_startk; |
315 | range0_sizek = ALIGN(state->range_sizek, chunk_sizek); |
316 | |
317 | /* No increase: */ |
318 | if (range0_sizek == state->range_sizek) { |
319 | Dprintk("rangeX: %016lx - %016lx\n" , |
320 | range0_basek<<10, |
321 | (range0_basek + state->range_sizek)<<10); |
322 | state->reg = range_to_mtrr(reg: state->reg, range_startk: range0_basek, |
323 | range_sizek: state->range_sizek, MTRR_TYPE_WRBACK); |
324 | return 0; |
325 | } |
326 | |
327 | /* Only cut back when it is not the last: */ |
328 | if (sizek) { |
329 | while (range0_basek + range0_sizek > (basek + sizek)) { |
330 | if (range0_sizek >= chunk_sizek) |
331 | range0_sizek -= chunk_sizek; |
332 | else |
333 | range0_sizek = 0; |
334 | |
335 | if (!range0_sizek) |
336 | break; |
337 | } |
338 | } |
339 | |
340 | second_try: |
341 | range_basek = range0_basek + range0_sizek; |
342 | |
343 | /* One hole in the middle: */ |
344 | if (range_basek > basek && range_basek <= (basek + sizek)) |
345 | second_sizek = range_basek - basek; |
346 | |
347 | if (range0_sizek > state->range_sizek) { |
348 | |
349 | /* One hole in middle or at the end: */ |
350 | hole_sizek = range0_sizek - state->range_sizek - second_sizek; |
351 | |
352 | /* Hole size should be less than half of range0 size: */ |
353 | if (hole_sizek >= (range0_sizek >> 1) && |
354 | range0_sizek >= chunk_sizek) { |
355 | range0_sizek -= chunk_sizek; |
356 | second_sizek = 0; |
357 | hole_sizek = 0; |
358 | |
359 | goto second_try; |
360 | } |
361 | } |
362 | |
363 | if (range0_sizek) { |
364 | Dprintk("range0: %016lx - %016lx\n" , |
365 | range0_basek<<10, |
366 | (range0_basek + range0_sizek)<<10); |
367 | state->reg = range_to_mtrr(reg: state->reg, range_startk: range0_basek, |
368 | range_sizek: range0_sizek, MTRR_TYPE_WRBACK); |
369 | } |
370 | |
371 | if (range0_sizek < state->range_sizek) { |
372 | /* Need to handle left over range: */ |
373 | range_sizek = state->range_sizek - range0_sizek; |
374 | |
375 | Dprintk("range: %016lx - %016lx\n" , |
376 | range_basek<<10, |
377 | (range_basek + range_sizek)<<10); |
378 | |
379 | state->reg = range_to_mtrr(reg: state->reg, range_startk: range_basek, |
380 | range_sizek, MTRR_TYPE_WRBACK); |
381 | } |
382 | |
383 | if (hole_sizek) { |
384 | hole_basek = range_basek - hole_sizek - second_sizek; |
385 | Dprintk("hole: %016lx - %016lx\n" , |
386 | hole_basek<<10, |
387 | (hole_basek + hole_sizek)<<10); |
388 | state->reg = range_to_mtrr(reg: state->reg, range_startk: hole_basek, |
389 | range_sizek: hole_sizek, MTRR_TYPE_UNCACHABLE); |
390 | } |
391 | |
392 | return second_sizek; |
393 | } |
394 | |
395 | static void __init |
396 | set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn, |
397 | unsigned long size_pfn) |
398 | { |
399 | unsigned long basek, sizek; |
400 | unsigned long second_sizek = 0; |
401 | |
402 | if (state->reg >= num_var_ranges) |
403 | return; |
404 | |
405 | basek = base_pfn << (PAGE_SHIFT - 10); |
406 | sizek = size_pfn << (PAGE_SHIFT - 10); |
407 | |
408 | /* See if I can merge with the last range: */ |
409 | if ((basek <= 1024) || |
410 | (state->range_startk + state->range_sizek == basek)) { |
411 | unsigned long endk = basek + sizek; |
412 | state->range_sizek = endk - state->range_startk; |
413 | return; |
414 | } |
415 | /* Write the range mtrrs: */ |
416 | if (state->range_sizek != 0) |
417 | second_sizek = range_to_mtrr_with_hole(state, basek, sizek); |
418 | |
419 | /* Allocate an msr: */ |
420 | state->range_startk = basek + second_sizek; |
421 | state->range_sizek = sizek - second_sizek; |
422 | } |
423 | |
424 | /* Minimum size of mtrr block that can take hole: */ |
425 | static u64 mtrr_chunk_size __initdata = (256ULL<<20); |
426 | |
427 | static int __init parse_mtrr_chunk_size_opt(char *p) |
428 | { |
429 | if (!p) |
430 | return -EINVAL; |
431 | mtrr_chunk_size = memparse(ptr: p, retptr: &p); |
432 | return 0; |
433 | } |
434 | early_param("mtrr_chunk_size" , parse_mtrr_chunk_size_opt); |
435 | |
436 | /* Granularity of mtrr of block: */ |
437 | static u64 mtrr_gran_size __initdata; |
438 | |
439 | static int __init parse_mtrr_gran_size_opt(char *p) |
440 | { |
441 | if (!p) |
442 | return -EINVAL; |
443 | mtrr_gran_size = memparse(ptr: p, retptr: &p); |
444 | return 0; |
445 | } |
446 | early_param("mtrr_gran_size" , parse_mtrr_gran_size_opt); |
447 | |
448 | static unsigned long nr_mtrr_spare_reg __initdata = |
449 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; |
450 | |
451 | static int __init parse_mtrr_spare_reg(char *arg) |
452 | { |
453 | if (arg) |
454 | nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); |
455 | return 0; |
456 | } |
457 | early_param("mtrr_spare_reg_nr" , parse_mtrr_spare_reg); |
458 | |
459 | static int __init |
460 | x86_setup_var_mtrrs(struct range *range, int nr_range, |
461 | u64 chunk_size, u64 gran_size) |
462 | { |
463 | struct var_mtrr_state var_state; |
464 | int num_reg; |
465 | int i; |
466 | |
467 | var_state.range_startk = 0; |
468 | var_state.range_sizek = 0; |
469 | var_state.reg = 0; |
470 | var_state.chunk_sizek = chunk_size >> 10; |
471 | var_state.gran_sizek = gran_size >> 10; |
472 | |
473 | memset(range_state, 0, sizeof(range_state)); |
474 | |
475 | /* Write the range: */ |
476 | for (i = 0; i < nr_range; i++) { |
477 | set_var_mtrr_range(state: &var_state, base_pfn: range[i].start, |
478 | size_pfn: range[i].end - range[i].start); |
479 | } |
480 | |
481 | /* Write the last range: */ |
482 | if (var_state.range_sizek != 0) |
483 | range_to_mtrr_with_hole(state: &var_state, basek: 0, sizek: 0); |
484 | |
485 | num_reg = var_state.reg; |
486 | /* Clear out the extra MTRR's: */ |
487 | while (var_state.reg < num_var_ranges) { |
488 | save_var_mtrr(reg: var_state.reg, basek: 0, sizek: 0, type: 0); |
489 | var_state.reg++; |
490 | } |
491 | |
492 | return num_reg; |
493 | } |
494 | |
495 | struct mtrr_cleanup_result { |
496 | unsigned long gran_sizek; |
497 | unsigned long chunk_sizek; |
498 | unsigned long lose_cover_sizek; |
499 | unsigned int num_reg; |
500 | int bad; |
501 | }; |
502 | |
503 | /* |
504 | * gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G |
505 | * chunk size: gran_size, ..., 2G |
506 | * so we need (1+16)*8 |
507 | */ |
508 | #define NUM_RESULT 136 |
509 | #define PSHIFT (PAGE_SHIFT - 10) |
510 | |
511 | static struct mtrr_cleanup_result __initdata result[NUM_RESULT]; |
512 | static unsigned long __initdata min_loss_pfn[RANGE_NUM]; |
513 | |
514 | static void __init print_out_mtrr_range_state(void) |
515 | { |
516 | char start_factor = 'K', size_factor = 'K'; |
517 | unsigned long start_base, size_base; |
518 | mtrr_type type; |
519 | int i; |
520 | |
521 | for (i = 0; i < num_var_ranges; i++) { |
522 | |
523 | size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10); |
524 | if (!size_base) |
525 | continue; |
526 | |
527 | size_base = to_size_factor(sizek: size_base, factorp: &size_factor); |
528 | start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10); |
529 | start_base = to_size_factor(sizek: start_base, factorp: &start_factor); |
530 | type = range_state[i].type; |
531 | |
532 | Dprintk("reg %d, base: %ld%cB, range: %ld%cB, type %s\n" , |
533 | i, start_base, start_factor, |
534 | size_base, size_factor, |
535 | (type == MTRR_TYPE_UNCACHABLE) ? "UC" : |
536 | ((type == MTRR_TYPE_WRPROT) ? "WP" : |
537 | ((type == MTRR_TYPE_WRBACK) ? "WB" : "Other" )) |
538 | ); |
539 | } |
540 | } |
541 | |
542 | static int __init mtrr_need_cleanup(void) |
543 | { |
544 | int i; |
545 | mtrr_type type; |
546 | unsigned long size; |
547 | /* Extra one for all 0: */ |
548 | int num[MTRR_NUM_TYPES + 1]; |
549 | |
550 | /* Check entries number: */ |
551 | memset(num, 0, sizeof(num)); |
552 | for (i = 0; i < num_var_ranges; i++) { |
553 | type = range_state[i].type; |
554 | size = range_state[i].size_pfn; |
555 | if (type >= MTRR_NUM_TYPES) |
556 | continue; |
557 | if (!size) |
558 | type = MTRR_NUM_TYPES; |
559 | num[type]++; |
560 | } |
561 | |
562 | /* Check if we got UC entries: */ |
563 | if (!num[MTRR_TYPE_UNCACHABLE]) |
564 | return 0; |
565 | |
566 | /* Check if we only had WB and UC */ |
567 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
568 | num_var_ranges - num[MTRR_NUM_TYPES]) |
569 | return 0; |
570 | |
571 | return 1; |
572 | } |
573 | |
574 | static unsigned long __initdata range_sums; |
575 | |
576 | static void __init |
577 | mtrr_calc_range_state(u64 chunk_size, u64 gran_size, |
578 | unsigned long x_remove_base, |
579 | unsigned long x_remove_size, int i) |
580 | { |
581 | /* |
582 | * range_new should really be an automatic variable, but |
583 | * putting 4096 bytes on the stack is frowned upon, to put it |
584 | * mildly. It is safe to make it a static __initdata variable, |
585 | * since mtrr_calc_range_state is only called during init and |
586 | * there's no way it will call itself recursively. |
587 | */ |
588 | static struct range range_new[RANGE_NUM] __initdata; |
589 | unsigned long range_sums_new; |
590 | int nr_range_new; |
591 | int num_reg; |
592 | |
593 | /* Convert ranges to var ranges state: */ |
594 | num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
595 | |
596 | /* We got new setting in range_state, check it: */ |
597 | memset(range_new, 0, sizeof(range_new)); |
598 | nr_range_new = x86_get_mtrr_mem_range(range: range_new, nr_range: 0, |
599 | extra_remove_base: x_remove_base, extra_remove_size: x_remove_size); |
600 | range_sums_new = sum_ranges(range: range_new, nr_range: nr_range_new); |
601 | |
602 | result[i].chunk_sizek = chunk_size >> 10; |
603 | result[i].gran_sizek = gran_size >> 10; |
604 | result[i].num_reg = num_reg; |
605 | |
606 | if (range_sums < range_sums_new) { |
607 | result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT; |
608 | result[i].bad = 1; |
609 | } else { |
610 | result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT; |
611 | } |
612 | |
613 | /* Double check it: */ |
614 | if (!result[i].bad && !result[i].lose_cover_sizek) { |
615 | if (nr_range_new != nr_range || memcmp(p: range, q: range_new, size: sizeof(range))) |
616 | result[i].bad = 1; |
617 | } |
618 | |
619 | if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg])) |
620 | min_loss_pfn[num_reg] = range_sums - range_sums_new; |
621 | } |
622 | |
623 | static void __init mtrr_print_out_one_result(int i) |
624 | { |
625 | unsigned long gran_base, chunk_base, lose_base; |
626 | char gran_factor, chunk_factor, lose_factor; |
627 | |
628 | gran_base = to_size_factor(sizek: result[i].gran_sizek, factorp: &gran_factor); |
629 | chunk_base = to_size_factor(sizek: result[i].chunk_sizek, factorp: &chunk_factor); |
630 | lose_base = to_size_factor(sizek: result[i].lose_cover_sizek, factorp: &lose_factor); |
631 | |
632 | pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t" , |
633 | result[i].bad ? "*BAD*" : " " , |
634 | gran_base, gran_factor, chunk_base, chunk_factor); |
635 | pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n" , |
636 | result[i].num_reg, result[i].bad ? "-" : "" , |
637 | lose_base, lose_factor); |
638 | } |
639 | |
640 | static int __init mtrr_search_optimal_index(void) |
641 | { |
642 | int num_reg_good; |
643 | int index_good; |
644 | int i; |
645 | |
646 | if (nr_mtrr_spare_reg >= num_var_ranges) |
647 | nr_mtrr_spare_reg = num_var_ranges - 1; |
648 | |
649 | num_reg_good = -1; |
650 | for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { |
651 | if (!min_loss_pfn[i]) |
652 | num_reg_good = i; |
653 | } |
654 | |
655 | index_good = -1; |
656 | if (num_reg_good != -1) { |
657 | for (i = 0; i < NUM_RESULT; i++) { |
658 | if (!result[i].bad && |
659 | result[i].num_reg == num_reg_good && |
660 | !result[i].lose_cover_sizek) { |
661 | index_good = i; |
662 | break; |
663 | } |
664 | } |
665 | } |
666 | |
667 | return index_good; |
668 | } |
669 | |
670 | int __init mtrr_cleanup(void) |
671 | { |
672 | unsigned long x_remove_base, x_remove_size; |
673 | unsigned long base, size, def, dummy; |
674 | u64 chunk_size, gran_size; |
675 | mtrr_type type; |
676 | int index_good; |
677 | int i; |
678 | |
679 | if (!mtrr_enabled()) |
680 | return 0; |
681 | |
682 | if (!cpu_feature_enabled(X86_FEATURE_MTRR) || enable_mtrr_cleanup < 1) |
683 | return 0; |
684 | |
685 | rdmsr(MSR_MTRRdefType, def, dummy); |
686 | def &= 0xff; |
687 | if (def != MTRR_TYPE_UNCACHABLE) |
688 | return 0; |
689 | |
690 | /* Get it and store it aside: */ |
691 | memset(range_state, 0, sizeof(range_state)); |
692 | for (i = 0; i < num_var_ranges; i++) { |
693 | mtrr_if->get(i, &base, &size, &type); |
694 | range_state[i].base_pfn = base; |
695 | range_state[i].size_pfn = size; |
696 | range_state[i].type = type; |
697 | } |
698 | |
699 | /* Check if we need handle it and can handle it: */ |
700 | if (!mtrr_need_cleanup()) |
701 | return 0; |
702 | |
703 | /* Print original var MTRRs at first, for debugging: */ |
704 | Dprintk("original variable MTRRs\n" ); |
705 | print_out_mtrr_range_state(); |
706 | |
707 | memset(range, 0, sizeof(range)); |
708 | x_remove_size = 0; |
709 | x_remove_base = 1 << (32 - PAGE_SHIFT); |
710 | if (mtrr_tom2) |
711 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
712 | |
713 | /* |
714 | * [0, 1M) should always be covered by var mtrr with WB |
715 | * and fixed mtrrs should take effect before var mtrr for it: |
716 | */ |
717 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range: 0, start: 0, |
718 | end: 1ULL<<(20 - PAGE_SHIFT)); |
719 | /* add from var mtrr at last */ |
720 | nr_range = x86_get_mtrr_mem_range(range, nr_range, |
721 | extra_remove_base: x_remove_base, extra_remove_size: x_remove_size); |
722 | |
723 | range_sums = sum_ranges(range, nr_range); |
724 | pr_info("total RAM covered: %ldM\n" , |
725 | range_sums >> (20 - PAGE_SHIFT)); |
726 | |
727 | if (mtrr_chunk_size && mtrr_gran_size) { |
728 | i = 0; |
729 | mtrr_calc_range_state(chunk_size: mtrr_chunk_size, gran_size: mtrr_gran_size, |
730 | x_remove_base, x_remove_size, i); |
731 | |
732 | mtrr_print_out_one_result(i); |
733 | |
734 | if (!result[i].bad) { |
735 | set_var_mtrr_all(); |
736 | Dprintk("New variable MTRRs\n" ); |
737 | print_out_mtrr_range_state(); |
738 | return 1; |
739 | } |
740 | pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n" ); |
741 | } |
742 | |
743 | i = 0; |
744 | memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn)); |
745 | memset(result, 0, sizeof(result)); |
746 | for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) { |
747 | |
748 | for (chunk_size = gran_size; chunk_size < (1ULL<<32); |
749 | chunk_size <<= 1) { |
750 | |
751 | if (i >= NUM_RESULT) |
752 | continue; |
753 | |
754 | mtrr_calc_range_state(chunk_size, gran_size, |
755 | x_remove_base, x_remove_size, i); |
756 | if (mtrr_debug) { |
757 | mtrr_print_out_one_result(i); |
758 | pr_info("\n" ); |
759 | } |
760 | |
761 | i++; |
762 | } |
763 | } |
764 | |
765 | /* Try to find the optimal index: */ |
766 | index_good = mtrr_search_optimal_index(); |
767 | |
768 | if (index_good != -1) { |
769 | pr_info("Found optimal setting for mtrr clean up\n" ); |
770 | i = index_good; |
771 | mtrr_print_out_one_result(i); |
772 | |
773 | /* Convert ranges to var ranges state: */ |
774 | chunk_size = result[i].chunk_sizek; |
775 | chunk_size <<= 10; |
776 | gran_size = result[i].gran_sizek; |
777 | gran_size <<= 10; |
778 | x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size); |
779 | set_var_mtrr_all(); |
780 | Dprintk("New variable MTRRs\n" ); |
781 | print_out_mtrr_range_state(); |
782 | return 1; |
783 | } else { |
784 | /* print out all */ |
785 | for (i = 0; i < NUM_RESULT; i++) |
786 | mtrr_print_out_one_result(i); |
787 | } |
788 | |
789 | pr_info("mtrr_cleanup: can not find optimal value\n" ); |
790 | pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n" ); |
791 | |
792 | return 0; |
793 | } |
794 | #else |
795 | int __init mtrr_cleanup(void) |
796 | { |
797 | return 0; |
798 | } |
799 | #endif |
800 | |
801 | static int disable_mtrr_trim; |
802 | |
803 | static int __init disable_mtrr_trim_setup(char *str) |
804 | { |
805 | disable_mtrr_trim = 1; |
806 | return 0; |
807 | } |
808 | early_param("disable_mtrr_trim" , disable_mtrr_trim_setup); |
809 | |
810 | /* |
811 | * Newer AMD K8s and later CPUs have a special magic MSR way to force WB |
812 | * for memory >4GB. Check for that here. |
813 | * Note this won't check if the MTRRs < 4GB where the magic bit doesn't |
814 | * apply to are wrong, but so far we don't know of any such case in the wild. |
815 | */ |
816 | #define Tom2Enabled (1U << 21) |
817 | #define Tom2ForceMemTypeWB (1U << 22) |
818 | |
819 | int __init amd_special_default_mtrr(void) |
820 | { |
821 | u32 l, h; |
822 | |
823 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
824 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) |
825 | return 0; |
826 | if (boot_cpu_data.x86 < 0xf) |
827 | return 0; |
828 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
829 | if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0) |
830 | return 0; |
831 | /* |
832 | * Memory between 4GB and top of mem is forced WB by this magic bit. |
833 | * Reserved before K8RevF, but should be zero there. |
834 | */ |
835 | if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == |
836 | (Tom2Enabled | Tom2ForceMemTypeWB)) |
837 | return 1; |
838 | return 0; |
839 | } |
840 | |
841 | static u64 __init |
842 | real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn) |
843 | { |
844 | u64 trim_start, trim_size; |
845 | |
846 | trim_start = start_pfn; |
847 | trim_start <<= PAGE_SHIFT; |
848 | |
849 | trim_size = limit_pfn; |
850 | trim_size <<= PAGE_SHIFT; |
851 | trim_size -= trim_start; |
852 | |
853 | return e820__range_update(start: trim_start, size: trim_size, old_type: E820_TYPE_RAM, new_type: E820_TYPE_RESERVED); |
854 | } |
855 | |
856 | /** |
857 | * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs |
858 | * @end_pfn: ending page frame number |
859 | * |
860 | * Some buggy BIOSes don't setup the MTRRs properly for systems with certain |
861 | * memory configurations. This routine checks that the highest MTRR matches |
862 | * the end of memory, to make sure the MTRRs having a write back type cover |
863 | * all of the memory the kernel is intending to use. If not, it'll trim any |
864 | * memory off the end by adjusting end_pfn, removing it from the kernel's |
865 | * allocation pools, warning the user with an obnoxious message. |
866 | */ |
867 | int __init mtrr_trim_uncached_memory(unsigned long end_pfn) |
868 | { |
869 | unsigned long i, base, size, highest_pfn = 0, def, dummy; |
870 | mtrr_type type; |
871 | u64 total_trim_size; |
872 | /* extra one for all 0 */ |
873 | int num[MTRR_NUM_TYPES + 1]; |
874 | |
875 | if (!mtrr_enabled()) |
876 | return 0; |
877 | |
878 | /* |
879 | * Make sure we only trim uncachable memory on machines that |
880 | * support the Intel MTRR architecture: |
881 | */ |
882 | if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim) |
883 | return 0; |
884 | |
885 | rdmsr(MSR_MTRRdefType, def, dummy); |
886 | def &= MTRR_DEF_TYPE_TYPE; |
887 | if (def != MTRR_TYPE_UNCACHABLE) |
888 | return 0; |
889 | |
890 | /* Get it and store it aside: */ |
891 | memset(range_state, 0, sizeof(range_state)); |
892 | for (i = 0; i < num_var_ranges; i++) { |
893 | mtrr_if->get(i, &base, &size, &type); |
894 | range_state[i].base_pfn = base; |
895 | range_state[i].size_pfn = size; |
896 | range_state[i].type = type; |
897 | } |
898 | |
899 | /* Find highest cached pfn: */ |
900 | for (i = 0; i < num_var_ranges; i++) { |
901 | type = range_state[i].type; |
902 | if (type != MTRR_TYPE_WRBACK) |
903 | continue; |
904 | base = range_state[i].base_pfn; |
905 | size = range_state[i].size_pfn; |
906 | if (highest_pfn < base + size) |
907 | highest_pfn = base + size; |
908 | } |
909 | |
910 | /* kvm/qemu doesn't have mtrr set right, don't trim them all: */ |
911 | if (!highest_pfn) { |
912 | pr_info("CPU MTRRs all blank - virtualized system.\n" ); |
913 | return 0; |
914 | } |
915 | |
916 | /* Check entries number: */ |
917 | memset(num, 0, sizeof(num)); |
918 | for (i = 0; i < num_var_ranges; i++) { |
919 | type = range_state[i].type; |
920 | if (type >= MTRR_NUM_TYPES) |
921 | continue; |
922 | size = range_state[i].size_pfn; |
923 | if (!size) |
924 | type = MTRR_NUM_TYPES; |
925 | num[type]++; |
926 | } |
927 | |
928 | /* No entry for WB? */ |
929 | if (!num[MTRR_TYPE_WRBACK]) |
930 | return 0; |
931 | |
932 | /* Check if we only had WB and UC: */ |
933 | if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != |
934 | num_var_ranges - num[MTRR_NUM_TYPES]) |
935 | return 0; |
936 | |
937 | memset(range, 0, sizeof(range)); |
938 | nr_range = 0; |
939 | if (mtrr_tom2) { |
940 | range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); |
941 | range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT; |
942 | if (highest_pfn < range[nr_range].end) |
943 | highest_pfn = range[nr_range].end; |
944 | nr_range++; |
945 | } |
946 | nr_range = x86_get_mtrr_mem_range(range, nr_range, extra_remove_base: 0, extra_remove_size: 0); |
947 | |
948 | /* Check the head: */ |
949 | total_trim_size = 0; |
950 | if (range[0].start) |
951 | total_trim_size += real_trim_memory(start_pfn: 0, limit_pfn: range[0].start); |
952 | |
953 | /* Check the holes: */ |
954 | for (i = 0; i < nr_range - 1; i++) { |
955 | if (range[i].end < range[i+1].start) |
956 | total_trim_size += real_trim_memory(start_pfn: range[i].end, |
957 | limit_pfn: range[i+1].start); |
958 | } |
959 | |
960 | /* Check the top: */ |
961 | i = nr_range - 1; |
962 | if (range[i].end < end_pfn) |
963 | total_trim_size += real_trim_memory(start_pfn: range[i].end, |
964 | limit_pfn: end_pfn); |
965 | |
966 | if (total_trim_size) { |
967 | pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n" , |
968 | total_trim_size >> 20); |
969 | |
970 | if (!changed_by_mtrr_cleanup) |
971 | WARN_ON(1); |
972 | |
973 | pr_info("update e820 for mtrr\n" ); |
974 | e820__update_table_print(); |
975 | |
976 | return 1; |
977 | } |
978 | |
979 | return 0; |
980 | } |
981 | |