1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/sched.h> |
3 | #include <linux/kernel.h> |
4 | #include <linux/errno.h> |
5 | #include <linux/mm.h> |
6 | #include <linux/nmi.h> |
7 | #include <linux/swap.h> |
8 | #include <linux/smp.h> |
9 | #include <linux/highmem.h> |
10 | #include <linux/pagemap.h> |
11 | #include <linux/spinlock.h> |
12 | |
13 | #include <asm/cpu_entry_area.h> |
14 | #include <asm/fixmap.h> |
15 | #include <asm/e820/api.h> |
16 | #include <asm/tlb.h> |
17 | #include <asm/tlbflush.h> |
18 | #include <asm/io.h> |
19 | #include <linux/vmalloc.h> |
20 | |
21 | unsigned int __VMALLOC_RESERVE = 128 << 20; |
22 | |
23 | /* |
24 | * Associate a virtual page frame with a given physical page frame |
25 | * and protection flags for that frame. |
26 | */ |
27 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
28 | { |
29 | pgd_t *pgd; |
30 | p4d_t *p4d; |
31 | pud_t *pud; |
32 | pmd_t *pmd; |
33 | pte_t *pte; |
34 | |
35 | pgd = swapper_pg_dir + pgd_index(vaddr); |
36 | if (pgd_none(pgd: *pgd)) { |
37 | BUG(); |
38 | return; |
39 | } |
40 | p4d = p4d_offset(pgd, address: vaddr); |
41 | if (p4d_none(p4d: *p4d)) { |
42 | BUG(); |
43 | return; |
44 | } |
45 | pud = pud_offset(p4d, address: vaddr); |
46 | if (pud_none(pud: *pud)) { |
47 | BUG(); |
48 | return; |
49 | } |
50 | pmd = pmd_offset(pud, address: vaddr); |
51 | if (pmd_none(pmd: *pmd)) { |
52 | BUG(); |
53 | return; |
54 | } |
55 | pte = pte_offset_kernel(pmd, address: vaddr); |
56 | if (!pte_none(pte: pteval)) |
57 | set_pte_at(&init_mm, vaddr, pte, pteval); |
58 | else |
59 | pte_clear(mm: &init_mm, addr: vaddr, ptep: pte); |
60 | |
61 | /* |
62 | * It's enough to flush this one mapping. |
63 | * (PGE mappings get flushed as well) |
64 | */ |
65 | flush_tlb_one_kernel(addr: vaddr); |
66 | } |
67 | |
68 | unsigned long __FIXADDR_TOP = 0xfffff000; |
69 | EXPORT_SYMBOL(__FIXADDR_TOP); |
70 | |
71 | /* |
72 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
73 | * bytes. This can be used to increase (or decrease) the |
74 | * vmalloc area - the default is 128m. |
75 | */ |
76 | static int __init parse_vmalloc(char *arg) |
77 | { |
78 | if (!arg) |
79 | return -EINVAL; |
80 | |
81 | /* Add VMALLOC_OFFSET to the parsed value due to vm area guard hole*/ |
82 | __VMALLOC_RESERVE = memparse(ptr: arg, retptr: &arg) + VMALLOC_OFFSET; |
83 | return 0; |
84 | } |
85 | early_param("vmalloc" , parse_vmalloc); |
86 | |
87 | /* |
88 | * reservetop=size reserves a hole at the top of the kernel address space which |
89 | * a hypervisor can load into later. Needed for dynamically loaded hypervisors, |
90 | * so relocating the fixmap can be done before paging initialization. |
91 | */ |
92 | static int __init parse_reservetop(char *arg) |
93 | { |
94 | unsigned long address; |
95 | |
96 | if (!arg) |
97 | return -EINVAL; |
98 | |
99 | address = memparse(ptr: arg, retptr: &arg); |
100 | reserve_top_address(reserve: address); |
101 | early_ioremap_init(); |
102 | return 0; |
103 | } |
104 | early_param("reservetop" , parse_reservetop); |
105 | |