1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
4/*
5 * User space memory access functions
6 */
7#include <linux/compiler.h>
8#include <linux/kasan-checks.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13#include <asm/extable.h>
14
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28#define get_ds() (KERNEL_DS)
29#define get_fs() (current->thread.addr_limit)
30static inline void set_fs(mm_segment_t fs)
31{
32 current->thread.addr_limit = fs;
33 /* On user-mode return, check fs is correct */
34 set_thread_flag(TIF_FSCHECK);
35}
36
37#define segment_eq(a, b) ((a).seg == (b).seg)
38
39#define user_addr_max() (current->thread.addr_limit.seg)
40#define __addr_ok(addr) \
41 ((unsigned long __force)(addr) < user_addr_max())
42
43/*
44 * Test whether a block of memory is a valid user space address.
45 * Returns 0 if the range is valid, nonzero otherwise.
46 */
47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
48{
49 /*
50 * If we have used "sizeof()" for the size,
51 * we know it won't overflow the limit (but
52 * it might overflow the 'addr', so it's
53 * important to subtract the size from the
54 * limit, not add it to the address).
55 */
56 if (__builtin_constant_p(size))
57 return unlikely(addr > limit - size);
58
59 /* Arbitrary sizes? Be careful about overflow */
60 addr += size;
61 if (unlikely(addr < size))
62 return true;
63 return unlikely(addr > limit);
64}
65
66#define __range_not_ok(addr, size, limit) \
67({ \
68 __chk_user_ptr(addr); \
69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
70})
71
72#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
73# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74#else
75# define WARN_ON_IN_IRQ()
76#endif
77
78/**
79 * access_ok: - Checks if a user space pointer is valid
80 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
81 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
82 * to write to a block, it is always safe to read from it.
83 * @addr: User space pointer to start of block to check
84 * @size: Size of block to check
85 *
86 * Context: User context only. This function may sleep if pagefaults are
87 * enabled.
88 *
89 * Checks if a pointer to a block of memory in user space is valid.
90 *
91 * Returns true (nonzero) if the memory block may be valid, false (zero)
92 * if it is definitely invalid.
93 *
94 * Note that, depending on architecture, this function probably just
95 * checks that the pointer is in the user space range - after calling
96 * this function, memory access functions may still return -EFAULT.
97 */
98#define access_ok(type, addr, size) \
99({ \
100 WARN_ON_IN_IRQ(); \
101 likely(!__range_not_ok(addr, size, user_addr_max())); \
102})
103
104/*
105 * These are the main single-value transfer routines. They automatically
106 * use the right size if we just have the right pointer type.
107 *
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
112 *
113 * The "__xxx" versions of the user access functions are versions that
114 * do not verify the address space, that must have been done previously
115 * with a separate "access_ok()" call (this is used when we do multiple
116 * accesses to the same area of user memory).
117 */
118
119extern int __get_user_1(void);
120extern int __get_user_2(void);
121extern int __get_user_4(void);
122extern int __get_user_8(void);
123extern int __get_user_bad(void);
124
125#define __uaccess_begin() stac()
126#define __uaccess_end() clac()
127#define __uaccess_begin_nospec() \
128({ \
129 stac(); \
130 barrier_nospec(); \
131})
132
133/*
134 * This is a type: either unsigned long, if the argument fits into
135 * that type, or otherwise unsigned long long.
136 */
137#define __inttype(x) \
138__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
139
140/**
141 * get_user: - Get a simple variable from user space.
142 * @x: Variable to store result.
143 * @ptr: Source address, in user space.
144 *
145 * Context: User context only. This function may sleep if pagefaults are
146 * enabled.
147 *
148 * This macro copies a single simple variable from user space to kernel
149 * space. It supports simple types like char and int, but not larger
150 * data types like structures or arrays.
151 *
152 * @ptr must have pointer-to-simple-variable type, and the result of
153 * dereferencing @ptr must be assignable to @x without a cast.
154 *
155 * Returns zero on success, or -EFAULT on error.
156 * On error, the variable @x is set to zero.
157 */
158/*
159 * Careful: we have to cast the result to the type of the pointer
160 * for sign reasons.
161 *
162 * The use of _ASM_DX as the register specifier is a bit of a
163 * simplification, as gcc only cares about it as the starting point
164 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
165 * (%ecx being the next register in gcc's x86 register sequence), and
166 * %rdx on 64 bits.
167 *
168 * Clang/LLVM cares about the size of the register, but still wants
169 * the base register for something that ends up being a pair.
170 */
171#define get_user(x, ptr) \
172({ \
173 int __ret_gu; \
174 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
175 __chk_user_ptr(ptr); \
176 might_fault(); \
177 asm volatile("call __get_user_%P4" \
178 : "=a" (__ret_gu), "=r" (__val_gu), \
179 ASM_CALL_CONSTRAINT \
180 : "0" (ptr), "i" (sizeof(*(ptr)))); \
181 (x) = (__force __typeof__(*(ptr))) __val_gu; \
182 __builtin_expect(__ret_gu, 0); \
183})
184
185#define __put_user_x(size, x, ptr, __ret_pu) \
186 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
187 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
188
189
190
191#ifdef CONFIG_X86_32
192#define __put_user_asm_u64(x, addr, err, errret) \
193 asm volatile("\n" \
194 "1: movl %%eax,0(%2)\n" \
195 "2: movl %%edx,4(%2)\n" \
196 "3:" \
197 ".section .fixup,\"ax\"\n" \
198 "4: movl %3,%0\n" \
199 " jmp 3b\n" \
200 ".previous\n" \
201 _ASM_EXTABLE(1b, 4b) \
202 _ASM_EXTABLE(2b, 4b) \
203 : "=r" (err) \
204 : "A" (x), "r" (addr), "i" (errret), "0" (err))
205
206#define __put_user_asm_ex_u64(x, addr) \
207 asm volatile("\n" \
208 "1: movl %%eax,0(%1)\n" \
209 "2: movl %%edx,4(%1)\n" \
210 "3:" \
211 _ASM_EXTABLE_EX(1b, 2b) \
212 _ASM_EXTABLE_EX(2b, 3b) \
213 : : "A" (x), "r" (addr))
214
215#define __put_user_x8(x, ptr, __ret_pu) \
216 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
217 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
218#else
219#define __put_user_asm_u64(x, ptr, retval, errret) \
220 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
221#define __put_user_asm_ex_u64(x, addr) \
222 __put_user_asm_ex(x, addr, "q", "", "er")
223#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
224#endif
225
226extern void __put_user_bad(void);
227
228/*
229 * Strange magic calling convention: pointer in %ecx,
230 * value in %eax(:%edx), return value in %eax. clobbers %rbx
231 */
232extern void __put_user_1(void);
233extern void __put_user_2(void);
234extern void __put_user_4(void);
235extern void __put_user_8(void);
236
237/**
238 * put_user: - Write a simple value into user space.
239 * @x: Value to copy to user space.
240 * @ptr: Destination address, in user space.
241 *
242 * Context: User context only. This function may sleep if pagefaults are
243 * enabled.
244 *
245 * This macro copies a single simple value from kernel space to user
246 * space. It supports simple types like char and int, but not larger
247 * data types like structures or arrays.
248 *
249 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
250 * to the result of dereferencing @ptr.
251 *
252 * Returns zero on success, or -EFAULT on error.
253 */
254#define put_user(x, ptr) \
255({ \
256 int __ret_pu; \
257 __typeof__(*(ptr)) __pu_val; \
258 __chk_user_ptr(ptr); \
259 might_fault(); \
260 __pu_val = x; \
261 switch (sizeof(*(ptr))) { \
262 case 1: \
263 __put_user_x(1, __pu_val, ptr, __ret_pu); \
264 break; \
265 case 2: \
266 __put_user_x(2, __pu_val, ptr, __ret_pu); \
267 break; \
268 case 4: \
269 __put_user_x(4, __pu_val, ptr, __ret_pu); \
270 break; \
271 case 8: \
272 __put_user_x8(__pu_val, ptr, __ret_pu); \
273 break; \
274 default: \
275 __put_user_x(X, __pu_val, ptr, __ret_pu); \
276 break; \
277 } \
278 __builtin_expect(__ret_pu, 0); \
279})
280
281#define __put_user_size(x, ptr, size, retval, errret) \
282do { \
283 retval = 0; \
284 __chk_user_ptr(ptr); \
285 switch (size) { \
286 case 1: \
287 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
288 break; \
289 case 2: \
290 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
291 break; \
292 case 4: \
293 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
294 break; \
295 case 8: \
296 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
297 errret); \
298 break; \
299 default: \
300 __put_user_bad(); \
301 } \
302} while (0)
303
304/*
305 * This doesn't do __uaccess_begin/end - the exception handling
306 * around it must do that.
307 */
308#define __put_user_size_ex(x, ptr, size) \
309do { \
310 __chk_user_ptr(ptr); \
311 switch (size) { \
312 case 1: \
313 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
314 break; \
315 case 2: \
316 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
317 break; \
318 case 4: \
319 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
320 break; \
321 case 8: \
322 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
323 break; \
324 default: \
325 __put_user_bad(); \
326 } \
327} while (0)
328
329#ifdef CONFIG_X86_32
330#define __get_user_asm_u64(x, ptr, retval, errret) \
331({ \
332 __typeof__(ptr) __ptr = (ptr); \
333 asm volatile("\n" \
334 "1: movl %2,%%eax\n" \
335 "2: movl %3,%%edx\n" \
336 "3:\n" \
337 ".section .fixup,\"ax\"\n" \
338 "4: mov %4,%0\n" \
339 " xorl %%eax,%%eax\n" \
340 " xorl %%edx,%%edx\n" \
341 " jmp 3b\n" \
342 ".previous\n" \
343 _ASM_EXTABLE(1b, 4b) \
344 _ASM_EXTABLE(2b, 4b) \
345 : "=r" (retval), "=&A"(x) \
346 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
347 "i" (errret), "0" (retval)); \
348})
349
350#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
351#else
352#define __get_user_asm_u64(x, ptr, retval, errret) \
353 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
354#define __get_user_asm_ex_u64(x, ptr) \
355 __get_user_asm_ex(x, ptr, "q", "", "=r")
356#endif
357
358#define __get_user_size(x, ptr, size, retval, errret) \
359do { \
360 retval = 0; \
361 __chk_user_ptr(ptr); \
362 switch (size) { \
363 case 1: \
364 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
365 break; \
366 case 2: \
367 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
368 break; \
369 case 4: \
370 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
371 break; \
372 case 8: \
373 __get_user_asm_u64(x, ptr, retval, errret); \
374 break; \
375 default: \
376 (x) = __get_user_bad(); \
377 } \
378} while (0)
379
380#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
381 asm volatile("\n" \
382 "1: mov"itype" %2,%"rtype"1\n" \
383 "2:\n" \
384 ".section .fixup,\"ax\"\n" \
385 "3: mov %3,%0\n" \
386 " xor"itype" %"rtype"1,%"rtype"1\n" \
387 " jmp 2b\n" \
388 ".previous\n" \
389 _ASM_EXTABLE(1b, 3b) \
390 : "=r" (err), ltype(x) \
391 : "m" (__m(addr)), "i" (errret), "0" (err))
392
393#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
394 asm volatile("\n" \
395 "1: mov"itype" %2,%"rtype"1\n" \
396 "2:\n" \
397 ".section .fixup,\"ax\"\n" \
398 "3: mov %3,%0\n" \
399 " jmp 2b\n" \
400 ".previous\n" \
401 _ASM_EXTABLE(1b, 3b) \
402 : "=r" (err), ltype(x) \
403 : "m" (__m(addr)), "i" (errret), "0" (err))
404
405/*
406 * This doesn't do __uaccess_begin/end - the exception handling
407 * around it must do that.
408 */
409#define __get_user_size_ex(x, ptr, size) \
410do { \
411 __chk_user_ptr(ptr); \
412 switch (size) { \
413 case 1: \
414 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
415 break; \
416 case 2: \
417 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
418 break; \
419 case 4: \
420 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
421 break; \
422 case 8: \
423 __get_user_asm_ex_u64(x, ptr); \
424 break; \
425 default: \
426 (x) = __get_user_bad(); \
427 } \
428} while (0)
429
430#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
431 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
432 "2:\n" \
433 ".section .fixup,\"ax\"\n" \
434 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
435 " jmp 2b\n" \
436 ".previous\n" \
437 _ASM_EXTABLE_EX(1b, 3b) \
438 : ltype(x) : "m" (__m(addr)))
439
440#define __put_user_nocheck(x, ptr, size) \
441({ \
442 int __pu_err; \
443 __uaccess_begin(); \
444 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
445 __uaccess_end(); \
446 __builtin_expect(__pu_err, 0); \
447})
448
449#define __get_user_nocheck(x, ptr, size) \
450({ \
451 int __gu_err; \
452 __inttype(*(ptr)) __gu_val; \
453 __uaccess_begin_nospec(); \
454 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
455 __uaccess_end(); \
456 (x) = (__force __typeof__(*(ptr)))__gu_val; \
457 __builtin_expect(__gu_err, 0); \
458})
459
460/* FIXME: this hack is definitely wrong -AK */
461struct __large_struct { unsigned long buf[100]; };
462#define __m(x) (*(struct __large_struct __user *)(x))
463
464/*
465 * Tell gcc we read from memory instead of writing: this is because
466 * we do not write to any memory gcc knows about, so there are no
467 * aliasing issues.
468 */
469#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
470 asm volatile("\n" \
471 "1: mov"itype" %"rtype"1,%2\n" \
472 "2:\n" \
473 ".section .fixup,\"ax\"\n" \
474 "3: mov %3,%0\n" \
475 " jmp 2b\n" \
476 ".previous\n" \
477 _ASM_EXTABLE(1b, 3b) \
478 : "=r"(err) \
479 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
480
481#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
482 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
483 "2:\n" \
484 _ASM_EXTABLE_EX(1b, 2b) \
485 : : ltype(x), "m" (__m(addr)))
486
487/*
488 * uaccess_try and catch
489 */
490#define uaccess_try do { \
491 current->thread.uaccess_err = 0; \
492 __uaccess_begin(); \
493 barrier();
494
495#define uaccess_try_nospec do { \
496 current->thread.uaccess_err = 0; \
497 __uaccess_begin_nospec(); \
498
499#define uaccess_catch(err) \
500 __uaccess_end(); \
501 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
502} while (0)
503
504/**
505 * __get_user: - Get a simple variable from user space, with less checking.
506 * @x: Variable to store result.
507 * @ptr: Source address, in user space.
508 *
509 * Context: User context only. This function may sleep if pagefaults are
510 * enabled.
511 *
512 * This macro copies a single simple variable from user space to kernel
513 * space. It supports simple types like char and int, but not larger
514 * data types like structures or arrays.
515 *
516 * @ptr must have pointer-to-simple-variable type, and the result of
517 * dereferencing @ptr must be assignable to @x without a cast.
518 *
519 * Caller must check the pointer with access_ok() before calling this
520 * function.
521 *
522 * Returns zero on success, or -EFAULT on error.
523 * On error, the variable @x is set to zero.
524 */
525
526#define __get_user(x, ptr) \
527 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
528
529/**
530 * __put_user: - Write a simple value into user space, with less checking.
531 * @x: Value to copy to user space.
532 * @ptr: Destination address, in user space.
533 *
534 * Context: User context only. This function may sleep if pagefaults are
535 * enabled.
536 *
537 * This macro copies a single simple value from kernel space to user
538 * space. It supports simple types like char and int, but not larger
539 * data types like structures or arrays.
540 *
541 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
542 * to the result of dereferencing @ptr.
543 *
544 * Caller must check the pointer with access_ok() before calling this
545 * function.
546 *
547 * Returns zero on success, or -EFAULT on error.
548 */
549
550#define __put_user(x, ptr) \
551 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
552
553/*
554 * {get|put}_user_try and catch
555 *
556 * get_user_try {
557 * get_user_ex(...);
558 * } get_user_catch(err)
559 */
560#define get_user_try uaccess_try_nospec
561#define get_user_catch(err) uaccess_catch(err)
562
563#define get_user_ex(x, ptr) do { \
564 unsigned long __gue_val; \
565 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
566 (x) = (__force __typeof__(*(ptr)))__gue_val; \
567} while (0)
568
569#define put_user_try uaccess_try
570#define put_user_catch(err) uaccess_catch(err)
571
572#define put_user_ex(x, ptr) \
573 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
574
575extern unsigned long
576copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
577extern __must_check long
578strncpy_from_user(char *dst, const char __user *src, long count);
579
580extern __must_check long strnlen_user(const char __user *str, long n);
581
582unsigned long __must_check clear_user(void __user *mem, unsigned long len);
583unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
584
585extern void __cmpxchg_wrong_size(void)
586 __compiletime_error("Bad argument size for cmpxchg");
587
588#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
589({ \
590 int __ret = 0; \
591 __typeof__(ptr) __uval = (uval); \
592 __typeof__(*(ptr)) __old = (old); \
593 __typeof__(*(ptr)) __new = (new); \
594 __uaccess_begin_nospec(); \
595 switch (size) { \
596 case 1: \
597 { \
598 asm volatile("\n" \
599 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
600 "2:\n" \
601 "\t.section .fixup, \"ax\"\n" \
602 "3:\tmov %3, %0\n" \
603 "\tjmp 2b\n" \
604 "\t.previous\n" \
605 _ASM_EXTABLE(1b, 3b) \
606 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
607 : "i" (-EFAULT), "q" (__new), "1" (__old) \
608 : "memory" \
609 ); \
610 break; \
611 } \
612 case 2: \
613 { \
614 asm volatile("\n" \
615 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
616 "2:\n" \
617 "\t.section .fixup, \"ax\"\n" \
618 "3:\tmov %3, %0\n" \
619 "\tjmp 2b\n" \
620 "\t.previous\n" \
621 _ASM_EXTABLE(1b, 3b) \
622 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
623 : "i" (-EFAULT), "r" (__new), "1" (__old) \
624 : "memory" \
625 ); \
626 break; \
627 } \
628 case 4: \
629 { \
630 asm volatile("\n" \
631 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
632 "2:\n" \
633 "\t.section .fixup, \"ax\"\n" \
634 "3:\tmov %3, %0\n" \
635 "\tjmp 2b\n" \
636 "\t.previous\n" \
637 _ASM_EXTABLE(1b, 3b) \
638 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
639 : "i" (-EFAULT), "r" (__new), "1" (__old) \
640 : "memory" \
641 ); \
642 break; \
643 } \
644 case 8: \
645 { \
646 if (!IS_ENABLED(CONFIG_X86_64)) \
647 __cmpxchg_wrong_size(); \
648 \
649 asm volatile("\n" \
650 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
651 "2:\n" \
652 "\t.section .fixup, \"ax\"\n" \
653 "3:\tmov %3, %0\n" \
654 "\tjmp 2b\n" \
655 "\t.previous\n" \
656 _ASM_EXTABLE(1b, 3b) \
657 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
658 : "i" (-EFAULT), "r" (__new), "1" (__old) \
659 : "memory" \
660 ); \
661 break; \
662 } \
663 default: \
664 __cmpxchg_wrong_size(); \
665 } \
666 __uaccess_end(); \
667 *__uval = __old; \
668 __ret; \
669})
670
671#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
672({ \
673 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
674 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
675 (old), (new), sizeof(*(ptr))) : \
676 -EFAULT; \
677})
678
679/*
680 * movsl can be slow when source and dest are not both 8-byte aligned
681 */
682#ifdef CONFIG_X86_INTEL_USERCOPY
683extern struct movsl_mask {
684 int mask;
685} ____cacheline_aligned_in_smp movsl_mask;
686#endif
687
688#define ARCH_HAS_NOCACHE_UACCESS 1
689
690#ifdef CONFIG_X86_32
691# include <asm/uaccess_32.h>
692#else
693# include <asm/uaccess_64.h>
694#endif
695
696/*
697 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
698 * nested NMI paths are careful to preserve CR2.
699 *
700 * Caller must use pagefault_enable/disable, or run in interrupt context,
701 * and also do a uaccess_ok() check
702 */
703#define __copy_from_user_nmi __copy_from_user_inatomic
704
705/*
706 * The "unsafe" user accesses aren't really "unsafe", but the naming
707 * is a big fat warning: you have to not only do the access_ok()
708 * checking before using them, but you have to surround them with the
709 * user_access_begin/end() pair.
710 */
711#define user_access_begin() __uaccess_begin()
712#define user_access_end() __uaccess_end()
713
714#define unsafe_put_user(x, ptr, err_label) \
715do { \
716 int __pu_err; \
717 __typeof__(*(ptr)) __pu_val = (x); \
718 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
719 if (unlikely(__pu_err)) goto err_label; \
720} while (0)
721
722#define unsafe_get_user(x, ptr, err_label) \
723do { \
724 int __gu_err; \
725 __inttype(*(ptr)) __gu_val; \
726 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
727 (x) = (__force __typeof__(*(ptr)))__gu_val; \
728 if (unlikely(__gu_err)) goto err_label; \
729} while (0)
730
731#endif /* _ASM_X86_UACCESS_H */
732
733