1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * imr.c -- Intel Isolated Memory Region driver |
4 | * |
5 | * Copyright(c) 2013 Intel Corporation. |
6 | * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> |
7 | * |
8 | * IMR registers define an isolated region of memory that can |
9 | * be masked to prohibit certain system agents from accessing memory. |
10 | * When a device behind a masked port performs an access - snooped or |
11 | * not, an IMR may optionally prevent that transaction from changing |
12 | * the state of memory or from getting correct data in response to the |
13 | * operation. |
14 | * |
15 | * Write data will be dropped and reads will return 0xFFFFFFFF, the |
16 | * system will reset and system BIOS will print out an error message to |
17 | * inform the user that an IMR has been violated. |
18 | * |
19 | * This code is based on the Linux MTRR code and reference code from |
20 | * Intel's Quark BSP EFI, Linux and grub code. |
21 | * |
22 | * See quark-x1000-datasheet.pdf for register definitions. |
23 | * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf |
24 | */ |
25 | |
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
27 | |
28 | #include <asm-generic/sections.h> |
29 | #include <asm/cpu_device_id.h> |
30 | #include <asm/imr.h> |
31 | #include <asm/iosf_mbi.h> |
32 | #include <asm/io.h> |
33 | |
34 | #include <linux/debugfs.h> |
35 | #include <linux/init.h> |
36 | #include <linux/mm.h> |
37 | #include <linux/types.h> |
38 | |
39 | struct imr_device { |
40 | bool init; |
41 | struct mutex lock; |
42 | int max_imr; |
43 | int reg_base; |
44 | }; |
45 | |
46 | static struct imr_device imr_dev; |
47 | |
48 | /* |
49 | * IMR read/write mask control registers. |
50 | * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for |
51 | * bit definitions. |
52 | * |
53 | * addr_hi |
54 | * 31 Lock bit |
55 | * 30:24 Reserved |
56 | * 23:2 1 KiB aligned lo address |
57 | * 1:0 Reserved |
58 | * |
59 | * addr_hi |
60 | * 31:24 Reserved |
61 | * 23:2 1 KiB aligned hi address |
62 | * 1:0 Reserved |
63 | */ |
64 | #define IMR_LOCK BIT(31) |
65 | |
66 | struct imr_regs { |
67 | u32 addr_lo; |
68 | u32 addr_hi; |
69 | u32 rmask; |
70 | u32 wmask; |
71 | }; |
72 | |
73 | #define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) |
74 | #define IMR_SHIFT 8 |
75 | #define imr_to_phys(x) ((x) << IMR_SHIFT) |
76 | #define phys_to_imr(x) ((x) >> IMR_SHIFT) |
77 | |
78 | /** |
79 | * imr_is_enabled - true if an IMR is enabled false otherwise. |
80 | * |
81 | * Determines if an IMR is enabled based on address range and read/write |
82 | * mask. An IMR set with an address range set to zero and a read/write |
83 | * access mask set to all is considered to be disabled. An IMR in any |
84 | * other state - for example set to zero but without read/write access |
85 | * all is considered to be enabled. This definition of disabled is how |
86 | * firmware switches off an IMR and is maintained in kernel for |
87 | * consistency. |
88 | * |
89 | * @imr: pointer to IMR descriptor. |
90 | * @return: true if IMR enabled false if disabled. |
91 | */ |
92 | static inline int imr_is_enabled(struct imr_regs *imr) |
93 | { |
94 | return !(imr->rmask == IMR_READ_ACCESS_ALL && |
95 | imr->wmask == IMR_WRITE_ACCESS_ALL && |
96 | imr_to_phys(imr->addr_lo) == 0 && |
97 | imr_to_phys(imr->addr_hi) == 0); |
98 | } |
99 | |
100 | /** |
101 | * imr_read - read an IMR at a given index. |
102 | * |
103 | * Requires caller to hold imr mutex. |
104 | * |
105 | * @idev: pointer to imr_device structure. |
106 | * @imr_id: IMR entry to read. |
107 | * @imr: IMR structure representing address and access masks. |
108 | * @return: 0 on success or error code passed from mbi_iosf on failure. |
109 | */ |
110 | static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) |
111 | { |
112 | u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; |
113 | int ret; |
114 | |
115 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, offset: reg++, mdr: &imr->addr_lo); |
116 | if (ret) |
117 | return ret; |
118 | |
119 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, offset: reg++, mdr: &imr->addr_hi); |
120 | if (ret) |
121 | return ret; |
122 | |
123 | ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, offset: reg++, mdr: &imr->rmask); |
124 | if (ret) |
125 | return ret; |
126 | |
127 | return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, offset: reg++, mdr: &imr->wmask); |
128 | } |
129 | |
130 | /** |
131 | * imr_write - write an IMR at a given index. |
132 | * |
133 | * Requires caller to hold imr mutex. |
134 | * Note lock bits need to be written independently of address bits. |
135 | * |
136 | * @idev: pointer to imr_device structure. |
137 | * @imr_id: IMR entry to write. |
138 | * @imr: IMR structure representing address and access masks. |
139 | * @return: 0 on success or error code passed from mbi_iosf on failure. |
140 | */ |
141 | static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) |
142 | { |
143 | unsigned long flags; |
144 | u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; |
145 | int ret; |
146 | |
147 | local_irq_save(flags); |
148 | |
149 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, offset: reg++, mdr: imr->addr_lo); |
150 | if (ret) |
151 | goto failed; |
152 | |
153 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, offset: reg++, mdr: imr->addr_hi); |
154 | if (ret) |
155 | goto failed; |
156 | |
157 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, offset: reg++, mdr: imr->rmask); |
158 | if (ret) |
159 | goto failed; |
160 | |
161 | ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, offset: reg++, mdr: imr->wmask); |
162 | if (ret) |
163 | goto failed; |
164 | |
165 | local_irq_restore(flags); |
166 | return 0; |
167 | failed: |
168 | /* |
169 | * If writing to the IOSF failed then we're in an unknown state, |
170 | * likely a very bad state. An IMR in an invalid state will almost |
171 | * certainly lead to a memory access violation. |
172 | */ |
173 | local_irq_restore(flags); |
174 | WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n" , |
175 | imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); |
176 | |
177 | return ret; |
178 | } |
179 | |
180 | /** |
181 | * imr_dbgfs_state_show - print state of IMR registers. |
182 | * |
183 | * @s: pointer to seq_file for output. |
184 | * @unused: unused parameter. |
185 | * @return: 0 on success or error code passed from mbi_iosf on failure. |
186 | */ |
187 | static int imr_dbgfs_state_show(struct seq_file *s, void *unused) |
188 | { |
189 | phys_addr_t base; |
190 | phys_addr_t end; |
191 | int i; |
192 | struct imr_device *idev = s->private; |
193 | struct imr_regs imr; |
194 | size_t size; |
195 | int ret = -ENODEV; |
196 | |
197 | mutex_lock(&idev->lock); |
198 | |
199 | for (i = 0; i < idev->max_imr; i++) { |
200 | |
201 | ret = imr_read(idev, imr_id: i, imr: &imr); |
202 | if (ret) |
203 | break; |
204 | |
205 | /* |
206 | * Remember to add IMR_ALIGN bytes to size to indicate the |
207 | * inherent IMR_ALIGN size bytes contained in the masked away |
208 | * lower ten bits. |
209 | */ |
210 | if (imr_is_enabled(imr: &imr)) { |
211 | base = imr_to_phys(imr.addr_lo); |
212 | end = imr_to_phys(imr.addr_hi) + IMR_MASK; |
213 | size = end - base + 1; |
214 | } else { |
215 | base = 0; |
216 | end = 0; |
217 | size = 0; |
218 | } |
219 | seq_printf(m: s, fmt: "imr%02i: base=%pa, end=%pa, size=0x%08zx " |
220 | "rmask=0x%08x, wmask=0x%08x, %s, %s\n" , i, |
221 | &base, &end, size, imr.rmask, imr.wmask, |
222 | imr_is_enabled(imr: &imr) ? "enabled " : "disabled" , |
223 | imr.addr_lo & IMR_LOCK ? "locked" : "unlocked" ); |
224 | } |
225 | |
226 | mutex_unlock(lock: &idev->lock); |
227 | return ret; |
228 | } |
229 | DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state); |
230 | |
231 | /** |
232 | * imr_debugfs_register - register debugfs hooks. |
233 | * |
234 | * @idev: pointer to imr_device structure. |
235 | */ |
236 | static void imr_debugfs_register(struct imr_device *idev) |
237 | { |
238 | debugfs_create_file(name: "imr_state" , mode: 0444, NULL, data: idev, |
239 | fops: &imr_dbgfs_state_fops); |
240 | } |
241 | |
242 | /** |
243 | * imr_check_params - check passed address range IMR alignment and non-zero size |
244 | * |
245 | * @base: base address of intended IMR. |
246 | * @size: size of intended IMR. |
247 | * @return: zero on valid range -EINVAL on unaligned base/size. |
248 | */ |
249 | static int imr_check_params(phys_addr_t base, size_t size) |
250 | { |
251 | if ((base & IMR_MASK) || (size & IMR_MASK)) { |
252 | pr_err("base %pa size 0x%08zx must align to 1KiB\n" , |
253 | &base, size); |
254 | return -EINVAL; |
255 | } |
256 | if (size == 0) |
257 | return -EINVAL; |
258 | |
259 | return 0; |
260 | } |
261 | |
262 | /** |
263 | * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. |
264 | * |
265 | * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the |
266 | * value in the register. We need to subtract IMR_ALIGN bytes from input sizes |
267 | * as a result. |
268 | * |
269 | * @size: input size bytes. |
270 | * @return: reduced size. |
271 | */ |
272 | static inline size_t imr_raw_size(size_t size) |
273 | { |
274 | return size - IMR_ALIGN; |
275 | } |
276 | |
277 | /** |
278 | * imr_address_overlap - detects an address overlap. |
279 | * |
280 | * @addr: address to check against an existing IMR. |
281 | * @imr: imr being checked. |
282 | * @return: true for overlap false for no overlap. |
283 | */ |
284 | static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) |
285 | { |
286 | return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); |
287 | } |
288 | |
289 | /** |
290 | * imr_add_range - add an Isolated Memory Region. |
291 | * |
292 | * @base: physical base address of region aligned to 1KiB. |
293 | * @size: physical size of region in bytes must be aligned to 1KiB. |
294 | * @read_mask: read access mask. |
295 | * @write_mask: write access mask. |
296 | * @return: zero on success or negative value indicating error. |
297 | */ |
298 | int imr_add_range(phys_addr_t base, size_t size, |
299 | unsigned int rmask, unsigned int wmask) |
300 | { |
301 | phys_addr_t end; |
302 | unsigned int i; |
303 | struct imr_device *idev = &imr_dev; |
304 | struct imr_regs imr; |
305 | size_t raw_size; |
306 | int reg; |
307 | int ret; |
308 | |
309 | if (WARN_ONCE(idev->init == false, "driver not initialized" )) |
310 | return -ENODEV; |
311 | |
312 | ret = imr_check_params(base, size); |
313 | if (ret) |
314 | return ret; |
315 | |
316 | /* Tweak the size value. */ |
317 | raw_size = imr_raw_size(size); |
318 | end = base + raw_size; |
319 | |
320 | /* |
321 | * Check for reserved IMR value common to firmware, kernel and grub |
322 | * indicating a disabled IMR. |
323 | */ |
324 | imr.addr_lo = phys_to_imr(base); |
325 | imr.addr_hi = phys_to_imr(end); |
326 | imr.rmask = rmask; |
327 | imr.wmask = wmask; |
328 | if (!imr_is_enabled(imr: &imr)) |
329 | return -ENOTSUPP; |
330 | |
331 | mutex_lock(&idev->lock); |
332 | |
333 | /* |
334 | * Find a free IMR while checking for an existing overlapping range. |
335 | * Note there's no restriction in silicon to prevent IMR overlaps. |
336 | * For the sake of simplicity and ease in defining/debugging an IMR |
337 | * memory map we exclude IMR overlaps. |
338 | */ |
339 | reg = -1; |
340 | for (i = 0; i < idev->max_imr; i++) { |
341 | ret = imr_read(idev, imr_id: i, imr: &imr); |
342 | if (ret) |
343 | goto failed; |
344 | |
345 | /* Find overlap @ base or end of requested range. */ |
346 | ret = -EINVAL; |
347 | if (imr_is_enabled(imr: &imr)) { |
348 | if (imr_address_overlap(addr: base, imr: &imr)) |
349 | goto failed; |
350 | if (imr_address_overlap(addr: end, imr: &imr)) |
351 | goto failed; |
352 | } else { |
353 | reg = i; |
354 | } |
355 | } |
356 | |
357 | /* Error out if we have no free IMR entries. */ |
358 | if (reg == -1) { |
359 | ret = -ENOMEM; |
360 | goto failed; |
361 | } |
362 | |
363 | pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n" , |
364 | reg, &base, &end, raw_size, rmask, wmask); |
365 | |
366 | /* Enable IMR at specified range and access mask. */ |
367 | imr.addr_lo = phys_to_imr(base); |
368 | imr.addr_hi = phys_to_imr(end); |
369 | imr.rmask = rmask; |
370 | imr.wmask = wmask; |
371 | |
372 | ret = imr_write(idev, imr_id: reg, imr: &imr); |
373 | if (ret < 0) { |
374 | /* |
375 | * In the highly unlikely event iosf_mbi_write failed |
376 | * attempt to rollback the IMR setup skipping the trapping |
377 | * of further IOSF write failures. |
378 | */ |
379 | imr.addr_lo = 0; |
380 | imr.addr_hi = 0; |
381 | imr.rmask = IMR_READ_ACCESS_ALL; |
382 | imr.wmask = IMR_WRITE_ACCESS_ALL; |
383 | imr_write(idev, imr_id: reg, imr: &imr); |
384 | } |
385 | failed: |
386 | mutex_unlock(lock: &idev->lock); |
387 | return ret; |
388 | } |
389 | EXPORT_SYMBOL_GPL(imr_add_range); |
390 | |
391 | /** |
392 | * __imr_remove_range - delete an Isolated Memory Region. |
393 | * |
394 | * This function allows you to delete an IMR by its index specified by reg or |
395 | * by address range specified by base and size respectively. If you specify an |
396 | * index on its own the base and size parameters are ignored. |
397 | * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. |
398 | * imr_remove_range(-1, base, size); delete IMR from base to base+size. |
399 | * |
400 | * @reg: imr index to remove. |
401 | * @base: physical base address of region aligned to 1 KiB. |
402 | * @size: physical size of region in bytes aligned to 1 KiB. |
403 | * @return: -EINVAL on invalid range or out or range id |
404 | * -ENODEV if reg is valid but no IMR exists or is locked |
405 | * 0 on success. |
406 | */ |
407 | static int __imr_remove_range(int reg, phys_addr_t base, size_t size) |
408 | { |
409 | phys_addr_t end; |
410 | bool found = false; |
411 | unsigned int i; |
412 | struct imr_device *idev = &imr_dev; |
413 | struct imr_regs imr; |
414 | size_t raw_size; |
415 | int ret = 0; |
416 | |
417 | if (WARN_ONCE(idev->init == false, "driver not initialized" )) |
418 | return -ENODEV; |
419 | |
420 | /* |
421 | * Validate address range if deleting by address, else we are |
422 | * deleting by index where base and size will be ignored. |
423 | */ |
424 | if (reg == -1) { |
425 | ret = imr_check_params(base, size); |
426 | if (ret) |
427 | return ret; |
428 | } |
429 | |
430 | /* Tweak the size value. */ |
431 | raw_size = imr_raw_size(size); |
432 | end = base + raw_size; |
433 | |
434 | mutex_lock(&idev->lock); |
435 | |
436 | if (reg >= 0) { |
437 | /* If a specific IMR is given try to use it. */ |
438 | ret = imr_read(idev, imr_id: reg, imr: &imr); |
439 | if (ret) |
440 | goto failed; |
441 | |
442 | if (!imr_is_enabled(imr: &imr) || imr.addr_lo & IMR_LOCK) { |
443 | ret = -ENODEV; |
444 | goto failed; |
445 | } |
446 | found = true; |
447 | } else { |
448 | /* Search for match based on address range. */ |
449 | for (i = 0; i < idev->max_imr; i++) { |
450 | ret = imr_read(idev, imr_id: i, imr: &imr); |
451 | if (ret) |
452 | goto failed; |
453 | |
454 | if (!imr_is_enabled(imr: &imr) || imr.addr_lo & IMR_LOCK) |
455 | continue; |
456 | |
457 | if ((imr_to_phys(imr.addr_lo) == base) && |
458 | (imr_to_phys(imr.addr_hi) == end)) { |
459 | found = true; |
460 | reg = i; |
461 | break; |
462 | } |
463 | } |
464 | } |
465 | |
466 | if (!found) { |
467 | ret = -ENODEV; |
468 | goto failed; |
469 | } |
470 | |
471 | pr_debug("remove %d phys %pa-%pa size %zx\n" , reg, &base, &end, raw_size); |
472 | |
473 | /* Tear down the IMR. */ |
474 | imr.addr_lo = 0; |
475 | imr.addr_hi = 0; |
476 | imr.rmask = IMR_READ_ACCESS_ALL; |
477 | imr.wmask = IMR_WRITE_ACCESS_ALL; |
478 | |
479 | ret = imr_write(idev, imr_id: reg, imr: &imr); |
480 | |
481 | failed: |
482 | mutex_unlock(lock: &idev->lock); |
483 | return ret; |
484 | } |
485 | |
486 | /** |
487 | * imr_remove_range - delete an Isolated Memory Region by address |
488 | * |
489 | * This function allows you to delete an IMR by an address range specified |
490 | * by base and size respectively. |
491 | * imr_remove_range(base, size); delete IMR from base to base+size. |
492 | * |
493 | * @base: physical base address of region aligned to 1 KiB. |
494 | * @size: physical size of region in bytes aligned to 1 KiB. |
495 | * @return: -EINVAL on invalid range or out or range id |
496 | * -ENODEV if reg is valid but no IMR exists or is locked |
497 | * 0 on success. |
498 | */ |
499 | int imr_remove_range(phys_addr_t base, size_t size) |
500 | { |
501 | return __imr_remove_range(reg: -1, base, size); |
502 | } |
503 | EXPORT_SYMBOL_GPL(imr_remove_range); |
504 | |
505 | /** |
506 | * imr_clear - delete an Isolated Memory Region by index |
507 | * |
508 | * This function allows you to delete an IMR by an address range specified |
509 | * by the index of the IMR. Useful for initial sanitization of the IMR |
510 | * address map. |
511 | * imr_ge(base, size); delete IMR from base to base+size. |
512 | * |
513 | * @reg: imr index to remove. |
514 | * @return: -EINVAL on invalid range or out or range id |
515 | * -ENODEV if reg is valid but no IMR exists or is locked |
516 | * 0 on success. |
517 | */ |
518 | static inline int imr_clear(int reg) |
519 | { |
520 | return __imr_remove_range(reg, base: 0, size: 0); |
521 | } |
522 | |
523 | /** |
524 | * imr_fixup_memmap - Tear down IMRs used during bootup. |
525 | * |
526 | * BIOS and Grub both setup IMRs around compressed kernel, initrd memory |
527 | * that need to be removed before the kernel hands out one of the IMR |
528 | * encased addresses to a downstream DMA agent such as the SD or Ethernet. |
529 | * IMRs on Galileo are setup to immediately reset the system on violation. |
530 | * As a result if you're running a root filesystem from SD - you'll need |
531 | * the boot-time IMRs torn down or you'll find seemingly random resets when |
532 | * using your filesystem. |
533 | * |
534 | * @idev: pointer to imr_device structure. |
535 | * @return: |
536 | */ |
537 | static void __init imr_fixup_memmap(struct imr_device *idev) |
538 | { |
539 | phys_addr_t base = virt_to_phys(address: &_text); |
540 | size_t size = virt_to_phys(address: &__end_rodata) - base; |
541 | unsigned long start, end; |
542 | int i; |
543 | int ret; |
544 | |
545 | /* Tear down all existing unlocked IMRs. */ |
546 | for (i = 0; i < idev->max_imr; i++) |
547 | imr_clear(reg: i); |
548 | |
549 | start = (unsigned long)_text; |
550 | end = (unsigned long)__end_rodata - 1; |
551 | |
552 | /* |
553 | * Setup an unlocked IMR around the physical extent of the kernel |
554 | * from the beginning of the .text section to the end of the |
555 | * .rodata section as one physically contiguous block. |
556 | * |
557 | * We don't round up @size since it is already PAGE_SIZE aligned. |
558 | * See vmlinux.lds.S for details. |
559 | */ |
560 | ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); |
561 | if (ret < 0) { |
562 | pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n" , |
563 | size / 1024, start, end); |
564 | } else { |
565 | pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n" , |
566 | size / 1024, start, end); |
567 | } |
568 | |
569 | } |
570 | |
571 | static const struct x86_cpu_id imr_ids[] __initconst = { |
572 | X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL), |
573 | {} |
574 | }; |
575 | |
576 | /** |
577 | * imr_init - entry point for IMR driver. |
578 | * |
579 | * return: -ENODEV for no IMR support 0 if good to go. |
580 | */ |
581 | static int __init imr_init(void) |
582 | { |
583 | struct imr_device *idev = &imr_dev; |
584 | |
585 | if (!x86_match_cpu(match: imr_ids) || !iosf_mbi_available()) |
586 | return -ENODEV; |
587 | |
588 | idev->max_imr = QUARK_X1000_IMR_MAX; |
589 | idev->reg_base = QUARK_X1000_IMR_REGBASE; |
590 | idev->init = true; |
591 | |
592 | mutex_init(&idev->lock); |
593 | imr_debugfs_register(idev); |
594 | imr_fixup_memmap(idev); |
595 | return 0; |
596 | } |
597 | device_initcall(imr_init); |
598 | |