1// SPDX-License-Identifier: GPL-2.0
2
3#include "lkdtm.h"
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
6#include <asm/mmu.h>
7
8/* Inserts new slb entries */
9static void insert_slb_entry(unsigned long p, int ssize, int page_size)
10{
11 unsigned long flags;
12
13 flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
14 preempt_disable();
15
16 asm volatile("slbmte %0,%1" :
17 : "r" (mk_vsid_data(p, ssize, flags)),
18 "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
19 : "memory");
20
21 asm volatile("slbmte %0,%1" :
22 : "r" (mk_vsid_data(p, ssize, flags)),
23 "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
24 : "memory");
25 preempt_enable();
26}
27
28/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
29static int inject_vmalloc_slb_multihit(void)
30{
31 char *p;
32
33 p = vmalloc(PAGE_SIZE);
34 if (!p)
35 return -ENOMEM;
36
37 insert_slb_entry(p: (unsigned long)p, ssize: MMU_SEGSIZE_1T, page_size: mmu_vmalloc_psize);
38 /*
39 * This triggers exception, If handled correctly we must recover
40 * from this error.
41 */
42 p[0] = '!';
43 vfree(addr: p);
44 return 0;
45}
46
47/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
48static int inject_kmalloc_slb_multihit(void)
49{
50 char *p;
51
52 p = kmalloc(size: 2048, GFP_KERNEL);
53 if (!p)
54 return -ENOMEM;
55
56 insert_slb_entry(p: (unsigned long)p, ssize: MMU_SEGSIZE_1T, page_size: mmu_linear_psize);
57 /*
58 * This triggers exception, If handled correctly we must recover
59 * from this error.
60 */
61 p[0] = '!';
62 kfree(objp: p);
63 return 0;
64}
65
66/*
67 * Few initial SLB entries are bolted. Add a test to inject
68 * multihit in bolted entry 0.
69 */
70static void insert_dup_slb_entry_0(void)
71{
72 unsigned long test_address = PAGE_OFFSET, *test_ptr;
73 unsigned long esid, vsid;
74 unsigned long i = 0;
75
76 test_ptr = (unsigned long *)test_address;
77 preempt_disable();
78
79 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
80 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
81
82 /* for i !=0 we would need to mask out the old entry number */
83 asm volatile("slbmte %0,%1" :
84 : "r" (vsid),
85 "r" (esid | SLB_NUM_BOLTED)
86 : "memory");
87
88 asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
89 asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
90
91 /* for i !=0 we would need to mask out the old entry number */
92 asm volatile("slbmte %0,%1" :
93 : "r" (vsid),
94 "r" (esid | (SLB_NUM_BOLTED + 1))
95 : "memory");
96
97 pr_info("%s accessing test address 0x%lx: 0x%lx\n",
98 __func__, test_address, *test_ptr);
99
100 preempt_enable();
101}
102
103static void lkdtm_PPC_SLB_MULTIHIT(void)
104{
105 if (!radix_enabled()) {
106 pr_info("Injecting SLB multihit errors\n");
107 /*
108 * These need not be separate tests, And they do pretty
109 * much same thing. In any case we must recover from the
110 * errors introduced by these functions, machine would not
111 * survive these tests in case of failure to handle.
112 */
113 inject_vmalloc_slb_multihit();
114 inject_kmalloc_slb_multihit();
115 insert_dup_slb_entry_0();
116 pr_info("Recovered from SLB multihit errors\n");
117 } else {
118 pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
119 }
120}
121
122static struct crashtype crashtypes[] = {
123 CRASHTYPE(PPC_SLB_MULTIHIT),
124};
125
126struct crashtype_category powerpc_crashtypes = {
127 .crashtypes = crashtypes,
128 .len = ARRAY_SIZE(crashtypes),
129};
130

source code of linux/drivers/misc/lkdtm/powerpc.c