1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. |
5 | * |
6 | * MIPS SIMD Architecture (MSA) context handling code for KVM. |
7 | * |
8 | * Copyright (C) 2015 Imagination Technologies Ltd. |
9 | */ |
10 | |
11 | #include <asm/asm.h> |
12 | #include <asm/asm-offsets.h> |
13 | #include <asm/asmmacro.h> |
14 | #include <asm/regdef.h> |
15 | |
16 | .set noreorder |
17 | .set noat |
18 | |
19 | LEAF(__kvm_save_msa) |
20 | st_d 0, VCPU_FPR0, a0 |
21 | st_d 1, VCPU_FPR1, a0 |
22 | st_d 2, VCPU_FPR2, a0 |
23 | st_d 3, VCPU_FPR3, a0 |
24 | st_d 4, VCPU_FPR4, a0 |
25 | st_d 5, VCPU_FPR5, a0 |
26 | st_d 6, VCPU_FPR6, a0 |
27 | st_d 7, VCPU_FPR7, a0 |
28 | st_d 8, VCPU_FPR8, a0 |
29 | st_d 9, VCPU_FPR9, a0 |
30 | st_d 10, VCPU_FPR10, a0 |
31 | st_d 11, VCPU_FPR11, a0 |
32 | st_d 12, VCPU_FPR12, a0 |
33 | st_d 13, VCPU_FPR13, a0 |
34 | st_d 14, VCPU_FPR14, a0 |
35 | st_d 15, VCPU_FPR15, a0 |
36 | st_d 16, VCPU_FPR16, a0 |
37 | st_d 17, VCPU_FPR17, a0 |
38 | st_d 18, VCPU_FPR18, a0 |
39 | st_d 19, VCPU_FPR19, a0 |
40 | st_d 20, VCPU_FPR20, a0 |
41 | st_d 21, VCPU_FPR21, a0 |
42 | st_d 22, VCPU_FPR22, a0 |
43 | st_d 23, VCPU_FPR23, a0 |
44 | st_d 24, VCPU_FPR24, a0 |
45 | st_d 25, VCPU_FPR25, a0 |
46 | st_d 26, VCPU_FPR26, a0 |
47 | st_d 27, VCPU_FPR27, a0 |
48 | st_d 28, VCPU_FPR28, a0 |
49 | st_d 29, VCPU_FPR29, a0 |
50 | st_d 30, VCPU_FPR30, a0 |
51 | st_d 31, VCPU_FPR31, a0 |
52 | jr ra |
53 | nop |
54 | END(__kvm_save_msa) |
55 | |
56 | LEAF(__kvm_restore_msa) |
57 | ld_d 0, VCPU_FPR0, a0 |
58 | ld_d 1, VCPU_FPR1, a0 |
59 | ld_d 2, VCPU_FPR2, a0 |
60 | ld_d 3, VCPU_FPR3, a0 |
61 | ld_d 4, VCPU_FPR4, a0 |
62 | ld_d 5, VCPU_FPR5, a0 |
63 | ld_d 6, VCPU_FPR6, a0 |
64 | ld_d 7, VCPU_FPR7, a0 |
65 | ld_d 8, VCPU_FPR8, a0 |
66 | ld_d 9, VCPU_FPR9, a0 |
67 | ld_d 10, VCPU_FPR10, a0 |
68 | ld_d 11, VCPU_FPR11, a0 |
69 | ld_d 12, VCPU_FPR12, a0 |
70 | ld_d 13, VCPU_FPR13, a0 |
71 | ld_d 14, VCPU_FPR14, a0 |
72 | ld_d 15, VCPU_FPR15, a0 |
73 | ld_d 16, VCPU_FPR16, a0 |
74 | ld_d 17, VCPU_FPR17, a0 |
75 | ld_d 18, VCPU_FPR18, a0 |
76 | ld_d 19, VCPU_FPR19, a0 |
77 | ld_d 20, VCPU_FPR20, a0 |
78 | ld_d 21, VCPU_FPR21, a0 |
79 | ld_d 22, VCPU_FPR22, a0 |
80 | ld_d 23, VCPU_FPR23, a0 |
81 | ld_d 24, VCPU_FPR24, a0 |
82 | ld_d 25, VCPU_FPR25, a0 |
83 | ld_d 26, VCPU_FPR26, a0 |
84 | ld_d 27, VCPU_FPR27, a0 |
85 | ld_d 28, VCPU_FPR28, a0 |
86 | ld_d 29, VCPU_FPR29, a0 |
87 | ld_d 30, VCPU_FPR30, a0 |
88 | ld_d 31, VCPU_FPR31, a0 |
89 | jr ra |
90 | nop |
91 | END(__kvm_restore_msa) |
92 | |
93 | .macro kvm_restore_msa_upper wr, off, base |
94 | .set push |
95 | .set noat |
96 | #ifdef CONFIG_64BIT |
97 | ld $1, \off(\base) |
98 | insert_d \wr, 1 |
99 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) |
100 | lw $1, \off(\base) |
101 | insert_w \wr, 2 |
102 | lw $1, (\off+4)(\base) |
103 | insert_w \wr, 3 |
104 | #else /* CONFIG_CPU_BIG_ENDIAN */ |
105 | lw $1, (\off+4)(\base) |
106 | insert_w \wr, 2 |
107 | lw $1, \off(\base) |
108 | insert_w \wr, 3 |
109 | #endif |
110 | .set pop |
111 | .endm |
112 | |
113 | LEAF(__kvm_restore_msa_upper) |
114 | kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 |
115 | kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 |
116 | kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 |
117 | kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 |
118 | kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 |
119 | kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 |
120 | kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 |
121 | kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 |
122 | kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 |
123 | kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 |
124 | kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 |
125 | kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 |
126 | kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 |
127 | kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 |
128 | kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 |
129 | kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 |
130 | kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 |
131 | kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 |
132 | kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 |
133 | kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 |
134 | kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 |
135 | kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 |
136 | kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 |
137 | kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 |
138 | kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 |
139 | kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 |
140 | kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 |
141 | kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 |
142 | kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 |
143 | kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 |
144 | kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 |
145 | kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 |
146 | jr ra |
147 | nop |
148 | END(__kvm_restore_msa_upper) |
149 | |
150 | LEAF(__kvm_restore_msacsr) |
151 | lw t0, VCPU_MSA_CSR(a0) |
152 | /* |
153 | * The ctcmsa must stay at this offset in __kvm_restore_msacsr. |
154 | * See kvm_mips_csr_die_notify() which handles t0 containing a value |
155 | * which triggers an MSA FP Exception, which must be stepped over and |
156 | * ignored since the set cause bits must remain there for the guest. |
157 | */ |
158 | _ctcmsa MSA_CSR, t0 |
159 | jr ra |
160 | nop |
161 | END(__kvm_restore_msacsr) |
162 | |