mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "A couple of fixes from Kees concerning problems he spotted with our user access support" * 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm: ARM: 8658/1: uaccess: fix zeroing of 64-bit get_user() ARM: 8657/1: uaccess: consistently check object sizes
This commit is contained in:
commit
b92ce305fc
2 changed files with 33 additions and 13 deletions
|
@ -478,11 +478,10 @@ extern unsigned long __must_check
|
||||||
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||||
|
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
unsigned int __ua_flags;
|
unsigned int __ua_flags;
|
||||||
|
|
||||||
check_object_size(to, n, false);
|
|
||||||
__ua_flags = uaccess_save_and_enable();
|
__ua_flags = uaccess_save_and_enable();
|
||||||
n = arm_copy_from_user(to, from, n);
|
n = arm_copy_from_user(to, from, n);
|
||||||
uaccess_restore(__ua_flags);
|
uaccess_restore(__ua_flags);
|
||||||
|
@ -495,18 +494,15 @@ extern unsigned long __must_check
|
||||||
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||||
|
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||||
unsigned int __ua_flags;
|
unsigned int __ua_flags;
|
||||||
|
|
||||||
check_object_size(from, n, true);
|
|
||||||
__ua_flags = uaccess_save_and_enable();
|
__ua_flags = uaccess_save_and_enable();
|
||||||
n = arm_copy_to_user(to, from, n);
|
n = arm_copy_to_user(to, from, n);
|
||||||
uaccess_restore(__ua_flags);
|
uaccess_restore(__ua_flags);
|
||||||
return n;
|
return n;
|
||||||
#else
|
#else
|
||||||
check_object_size(from, n, true);
|
|
||||||
return arm_copy_to_user(to, from, n);
|
return arm_copy_to_user(to, from, n);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
#define __arch_copy_from_user(to, from, n) \
|
||||||
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
(memcpy(to, (void __force *)from, n), 0)
|
||||||
|
#define __arch_copy_to_user(to, from, n) \
|
||||||
|
(memcpy((void __force *)to, from, n), 0)
|
||||||
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
static inline unsigned long __must_check
|
||||||
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
|
{
|
||||||
|
check_object_size(to, n, false);
|
||||||
|
return __arch_copy_from_user(to, from, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
unsigned long res = n;
|
unsigned long res = n;
|
||||||
|
|
||||||
|
check_object_size(to, n, false);
|
||||||
|
|
||||||
if (likely(access_ok(VERIFY_READ, from, n)))
|
if (likely(access_ok(VERIFY_READ, from, n)))
|
||||||
res = __copy_from_user(to, from, n);
|
res = __arch_copy_from_user(to, from, n);
|
||||||
if (unlikely(res))
|
if (unlikely(res))
|
||||||
memset(to + (n - res), 0, res);
|
memset(to + (n - res), 0, res);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
static inline unsigned long __must_check
|
||||||
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
check_object_size(from, n, true);
|
||||||
|
|
||||||
|
return __arch_copy_to_user(to, from, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __must_check
|
||||||
|
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
|
{
|
||||||
|
check_object_size(from, n, true);
|
||||||
|
|
||||||
if (access_ok(VERIFY_WRITE, to, n))
|
if (access_ok(VERIFY_WRITE, to, n))
|
||||||
n = __copy_to_user(to, from, n);
|
n = __arch_copy_to_user(to, from, n);
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ ENTRY(__get_user_4)
|
||||||
ENDPROC(__get_user_4)
|
ENDPROC(__get_user_4)
|
||||||
|
|
||||||
ENTRY(__get_user_8)
|
ENTRY(__get_user_8)
|
||||||
check_uaccess r0, 8, r1, r2, __get_user_bad
|
check_uaccess r0, 8, r1, r2, __get_user_bad8
|
||||||
#ifdef CONFIG_THUMB2_KERNEL
|
#ifdef CONFIG_THUMB2_KERNEL
|
||||||
5: TUSER(ldr) r2, [r0]
|
5: TUSER(ldr) r2, [r0]
|
||||||
6: TUSER(ldr) r3, [r0, #4]
|
6: TUSER(ldr) r3, [r0, #4]
|
||||||
|
|
Loading…
Add table
Reference in a new issue