1 | /* |
2 | * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Licensed under the GPL |
4 | */ |
5 | |
6 | #include <linux/mm.h> |
7 | #include <linux/sched.h> |
8 | #include <linux/slab.h> |
9 | #include <linux/syscalls.h> |
10 | #include <linux/uaccess.h> |
11 | #include <asm/unistd.h> |
12 | #include <os.h> |
13 | #include <skas.h> |
14 | #include <sysdep/tls.h> |
15 | |
16 | static inline int modify_ldt (int func, void *ptr, unsigned long bytecount) |
17 | { |
18 | return syscall(__NR_modify_ldt, func, ptr, bytecount); |
19 | } |
20 | |
21 | static long write_ldt_entry(struct mm_id *mm_idp, int func, |
22 | struct user_desc *desc, void **addr, int done) |
23 | { |
24 | long res; |
25 | void *stub_addr; |
26 | |
27 | BUILD_BUG_ON(sizeof(*desc) % sizeof(long)); |
28 | |
29 | res = syscall_stub_data(mm_idp, (unsigned long *)desc, |
30 | sizeof(*desc) / sizeof(long), |
31 | addr, &stub_addr); |
32 | if (!res) { |
33 | unsigned long args[] = { func, |
34 | (unsigned long)stub_addr, |
35 | sizeof(*desc), |
36 | 0, 0, 0 }; |
37 | res = run_syscall_stub(mm_idp, __NR_modify_ldt, args, |
38 | 0, addr, done); |
39 | } |
40 | |
41 | return res; |
42 | } |
43 | |
44 | /* |
45 | * In skas mode, we hold our own ldt data in UML. |
46 | * Thus, the code implementing sys_modify_ldt_skas |
47 | * is very similar to (and mostly stolen from) sys_modify_ldt |
48 | * for arch/i386/kernel/ldt.c |
49 | * The routines copied and modified in part are: |
50 | * - read_ldt |
51 | * - read_default_ldt |
52 | * - write_ldt |
53 | * - sys_modify_ldt_skas |
54 | */ |
55 | |
56 | static int read_ldt(void __user * ptr, unsigned long bytecount) |
57 | { |
58 | int i, err = 0; |
59 | unsigned long size; |
60 | uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; |
61 | |
62 | if (!ldt->entry_count) |
63 | goto out; |
64 | if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES) |
65 | bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES; |
66 | err = bytecount; |
67 | |
68 | mutex_lock(&ldt->lock); |
69 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) { |
70 | size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES; |
71 | if (size > bytecount) |
72 | size = bytecount; |
73 | if (copy_to_user(to: ptr, from: ldt->u.entries, n: size)) |
74 | err = -EFAULT; |
75 | bytecount -= size; |
76 | ptr += size; |
77 | } |
78 | else { |
79 | for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount; |
80 | i++) { |
81 | size = PAGE_SIZE; |
82 | if (size > bytecount) |
83 | size = bytecount; |
84 | if (copy_to_user(ptr, ldt->u.pages[i], size)) { |
85 | err = -EFAULT; |
86 | break; |
87 | } |
88 | bytecount -= size; |
89 | ptr += size; |
90 | } |
91 | } |
92 | mutex_unlock(lock: &ldt->lock); |
93 | |
94 | if (bytecount == 0 || err == -EFAULT) |
95 | goto out; |
96 | |
97 | if (clear_user(to: ptr, n: bytecount)) |
98 | err = -EFAULT; |
99 | |
100 | out: |
101 | return err; |
102 | } |
103 | |
104 | static int read_default_ldt(void __user * ptr, unsigned long bytecount) |
105 | { |
106 | int err; |
107 | |
108 | if (bytecount > 5*LDT_ENTRY_SIZE) |
109 | bytecount = 5*LDT_ENTRY_SIZE; |
110 | |
111 | err = bytecount; |
112 | /* |
113 | * UML doesn't support lcall7 and lcall27. |
114 | * So, we don't really have a default ldt, but emulate |
115 | * an empty ldt of common host default ldt size. |
116 | */ |
117 | if (clear_user(to: ptr, n: bytecount)) |
118 | err = -EFAULT; |
119 | |
120 | return err; |
121 | } |
122 | |
123 | static int write_ldt(void __user * ptr, unsigned long bytecount, int func) |
124 | { |
125 | uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; |
126 | struct mm_id * mm_idp = ¤t->mm->context.id; |
127 | int i, err; |
128 | struct user_desc ldt_info; |
129 | struct ldt_entry entry0, *ldt_p; |
130 | void *addr = NULL; |
131 | |
132 | err = -EINVAL; |
133 | if (bytecount != sizeof(ldt_info)) |
134 | goto out; |
135 | err = -EFAULT; |
136 | if (copy_from_user(to: &ldt_info, from: ptr, n: sizeof(ldt_info))) |
137 | goto out; |
138 | |
139 | err = -EINVAL; |
140 | if (ldt_info.entry_number >= LDT_ENTRIES) |
141 | goto out; |
142 | if (ldt_info.contents == 3) { |
143 | if (func == 1) |
144 | goto out; |
145 | if (ldt_info.seg_not_present == 0) |
146 | goto out; |
147 | } |
148 | |
149 | mutex_lock(&ldt->lock); |
150 | |
151 | err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1); |
152 | if (err) |
153 | goto out_unlock; |
154 | |
155 | if (ldt_info.entry_number >= ldt->entry_count && |
156 | ldt_info.entry_number >= LDT_DIRECT_ENTRIES) { |
157 | for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE; |
158 | i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number; |
159 | i++) { |
160 | if (i == 0) |
161 | memcpy(&entry0, ldt->u.entries, |
162 | sizeof(entry0)); |
163 | ldt->u.pages[i] = (struct ldt_entry *) |
164 | __get_free_page(GFP_KERNEL|__GFP_ZERO); |
165 | if (!ldt->u.pages[i]) { |
166 | err = -ENOMEM; |
167 | /* Undo the change in host */ |
168 | memset(&ldt_info, 0, sizeof(ldt_info)); |
169 | write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1); |
170 | goto out_unlock; |
171 | } |
172 | if (i == 0) { |
173 | memcpy(ldt->u.pages[0], &entry0, |
174 | sizeof(entry0)); |
175 | memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, |
176 | sizeof(entry0)*(LDT_DIRECT_ENTRIES-1)); |
177 | } |
178 | ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE; |
179 | } |
180 | } |
181 | if (ldt->entry_count <= ldt_info.entry_number) |
182 | ldt->entry_count = ldt_info.entry_number + 1; |
183 | |
184 | if (ldt->entry_count <= LDT_DIRECT_ENTRIES) |
185 | ldt_p = ldt->u.entries + ldt_info.entry_number; |
186 | else |
187 | ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] + |
188 | ldt_info.entry_number%LDT_ENTRIES_PER_PAGE; |
189 | |
190 | if (ldt_info.base_addr == 0 && ldt_info.limit == 0 && |
191 | (func == 1 || LDT_empty(&ldt_info))) { |
192 | ldt_p->a = 0; |
193 | ldt_p->b = 0; |
194 | } |
195 | else{ |
196 | if (func == 1) |
197 | ldt_info.useable = 0; |
198 | ldt_p->a = LDT_entry_a(&ldt_info); |
199 | ldt_p->b = LDT_entry_b(&ldt_info); |
200 | } |
201 | err = 0; |
202 | |
203 | out_unlock: |
204 | mutex_unlock(&ldt->lock); |
205 | out: |
206 | return err; |
207 | } |
208 | |
209 | static long do_modify_ldt_skas(int func, void __user *ptr, |
210 | unsigned long bytecount) |
211 | { |
212 | int ret = -ENOSYS; |
213 | |
214 | switch (func) { |
215 | case 0: |
216 | ret = read_ldt(ptr, bytecount); |
217 | break; |
218 | case 1: |
219 | case 0x11: |
220 | ret = write_ldt(ptr, bytecount, func); |
221 | break; |
222 | case 2: |
223 | ret = read_default_ldt(ptr, bytecount); |
224 | break; |
225 | } |
226 | return ret; |
227 | } |
228 | |
229 | static DEFINE_SPINLOCK(host_ldt_lock); |
230 | static short dummy_list[9] = {0, -1}; |
231 | static short * host_ldt_entries = NULL; |
232 | |
233 | static void ldt_get_host_info(void) |
234 | { |
235 | long ret; |
236 | struct ldt_entry * ldt; |
237 | short *tmp; |
238 | int i, size, k, order; |
239 | |
240 | spin_lock(lock: &host_ldt_lock); |
241 | |
242 | if (host_ldt_entries != NULL) { |
243 | spin_unlock(lock: &host_ldt_lock); |
244 | return; |
245 | } |
246 | host_ldt_entries = dummy_list+1; |
247 | |
248 | spin_unlock(lock: &host_ldt_lock); |
249 | |
250 | for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++) |
251 | ; |
252 | |
253 | ldt = (struct ldt_entry *) |
254 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, order); |
255 | if (ldt == NULL) { |
256 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer " |
257 | "for host ldt\n" ); |
258 | return; |
259 | } |
260 | |
261 | ret = modify_ldt(func: 0, ptr: ldt, bytecount: (1<<order)*PAGE_SIZE); |
262 | if (ret < 0) { |
263 | printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n" ); |
264 | goto out_free; |
265 | } |
266 | if (ret == 0) { |
267 | /* default_ldt is active, simply write an empty entry 0 */ |
268 | host_ldt_entries = dummy_list; |
269 | goto out_free; |
270 | } |
271 | |
272 | for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) { |
273 | if (ldt[i].a != 0 || ldt[i].b != 0) |
274 | size++; |
275 | } |
276 | |
277 | if (size < ARRAY_SIZE(dummy_list)) |
278 | host_ldt_entries = dummy_list; |
279 | else { |
280 | size = (size + 1) * sizeof(dummy_list[0]); |
281 | tmp = kmalloc(size, GFP_KERNEL); |
282 | if (tmp == NULL) { |
283 | printk(KERN_ERR "ldt_get_host_info: couldn't allocate " |
284 | "host ldt list\n" ); |
285 | goto out_free; |
286 | } |
287 | host_ldt_entries = tmp; |
288 | } |
289 | |
290 | for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) { |
291 | if (ldt[i].a != 0 || ldt[i].b != 0) |
292 | host_ldt_entries[k++] = i; |
293 | } |
294 | host_ldt_entries[k] = -1; |
295 | |
296 | out_free: |
297 | free_pages(addr: (unsigned long)ldt, order); |
298 | } |
299 | |
300 | long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm) |
301 | { |
302 | struct user_desc desc; |
303 | short * num_p; |
304 | int i; |
305 | long page, err=0; |
306 | void *addr = NULL; |
307 | |
308 | |
309 | mutex_init(&new_mm->arch.ldt.lock); |
310 | |
311 | if (!from_mm) { |
312 | memset(&desc, 0, sizeof(desc)); |
313 | /* |
314 | * Now we try to retrieve info about the ldt, we |
315 | * inherited from the host. All ldt-entries found |
316 | * will be reset in the following loop |
317 | */ |
318 | ldt_get_host_info(); |
319 | for (num_p=host_ldt_entries; *num_p != -1; num_p++) { |
320 | desc.entry_number = *num_p; |
321 | err = write_ldt_entry(dt: &new_mm->id, entry: 1, desc: &desc, |
322 | &addr, *(num_p + 1) == -1); |
323 | if (err) |
324 | break; |
325 | } |
326 | new_mm->arch.ldt.entry_count = 0; |
327 | |
328 | goto out; |
329 | } |
330 | |
331 | /* |
332 | * Our local LDT is used to supply the data for |
333 | * modify_ldt(READLDT), if PTRACE_LDT isn't available, |
334 | * i.e., we have to use the stub for modify_ldt, which |
335 | * can't handle the big read buffer of up to 64kB. |
336 | */ |
337 | mutex_lock(&from_mm->arch.ldt.lock); |
338 | if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES) |
339 | memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, |
340 | sizeof(new_mm->arch.ldt.u.entries)); |
341 | else { |
342 | i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; |
343 | while (i-->0) { |
344 | page = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
345 | if (!page) { |
346 | err = -ENOMEM; |
347 | break; |
348 | } |
349 | new_mm->arch.ldt.u.pages[i] = |
350 | (struct ldt_entry *) page; |
351 | memcpy(new_mm->arch.ldt.u.pages[i], |
352 | from_mm->arch.ldt.u.pages[i], PAGE_SIZE); |
353 | } |
354 | } |
355 | new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count; |
356 | mutex_unlock(lock: &from_mm->arch.ldt.lock); |
357 | |
358 | out: |
359 | return err; |
360 | } |
361 | |
362 | |
363 | void free_ldt(struct mm_context *mm) |
364 | { |
365 | int i; |
366 | |
367 | if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) { |
368 | i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE; |
369 | while (i-- > 0) |
370 | free_page((long) mm->arch.ldt.u.pages[i]); |
371 | } |
372 | mm->arch.ldt.entry_count = 0; |
373 | } |
374 | |
375 | SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , |
376 | unsigned long , bytecount) |
377 | { |
378 | /* See non-um modify_ldt() for why we do this cast */ |
379 | return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); |
380 | } |
381 | |