uaccess: always export _copy_[from|to]_user with CONFIG_RUST
Rust code needs to be able to access _copy_from_user and _copy_to_user so that it can skip the check_copy_size check in cases where the length is known at compile-time, mirroring the logic for when C code will skip check_copy_size. To do this, we ensure that exported versions of these methods are available when CONFIG_RUST is enabled. Alice has verified that this patch passes the CONFIG_TEST_USER_COPY test on x86 using the Android cuttlefish emulator. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Tested-by: Alice Ryhl <aliceryhl@google.com> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Alice Ryhl <aliceryhl@google.com> Acked-by: Andrew Morton <akpm@linux-foundation.org> Link: https://lore.kernel.org/r/20240528-alice-mm-v7-2-78222c31b8f4@google.com Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
This commit is contained in:
parent
1b580e7b9b
commit
1f9a8286bc
@ -5,6 +5,7 @@
|
||||
#include <linux/fault-inject-usercopy.h>
|
||||
#include <linux/instrumented.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
@ -138,13 +139,26 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
return raw_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
#ifdef INLINE_COPY_FROM_USER
|
||||
/*
|
||||
* Architectures that #define INLINE_COPY_TO_USER use this function
|
||||
* directly in the normal copy_to/from_user(), the other ones go
|
||||
* through an extern _copy_to/from_user(), which expands the same code
|
||||
* here.
|
||||
*
|
||||
* Rust code always uses the extern definition.
|
||||
*/
|
||||
static inline __must_check unsigned long
|
||||
_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
_inline_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
might_fault();
|
||||
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
|
||||
/*
|
||||
* Ensure that bad access_ok() speculation will not
|
||||
* lead to nasty side effects *after* the copy is
|
||||
* finished:
|
||||
*/
|
||||
barrier_nospec();
|
||||
instrument_copy_from_user_before(to, from, n);
|
||||
res = raw_copy_from_user(to, from, n);
|
||||
instrument_copy_from_user_after(to, from, n, res);
|
||||
@ -153,14 +167,11 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
}
|
||||
#else
|
||||
extern __must_check unsigned long
|
||||
_copy_from_user(void *, const void __user *, unsigned long);
|
||||
#endif
|
||||
|
||||
#ifdef INLINE_COPY_TO_USER
|
||||
static inline __must_check unsigned long
|
||||
_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
_inline_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
if (should_fail_usercopy())
|
||||
@ -171,25 +182,32 @@ _copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
}
|
||||
return n;
|
||||
}
|
||||
#else
|
||||
extern __must_check unsigned long
|
||||
_copy_to_user(void __user *, const void *, unsigned long);
|
||||
#endif
|
||||
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (check_copy_size(to, n, false))
|
||||
n = _copy_from_user(to, from, n);
|
||||
return n;
|
||||
if (!check_copy_size(to, n, false))
|
||||
return n;
|
||||
#ifdef INLINE_COPY_FROM_USER
|
||||
return _inline_copy_from_user(to, from, n);
|
||||
#else
|
||||
return _copy_from_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (check_copy_size(from, n, true))
|
||||
n = _copy_to_user(to, from, n);
|
||||
return n;
|
||||
if (!check_copy_size(from, n, true))
|
||||
return n;
|
||||
|
||||
#ifdef INLINE_COPY_TO_USER
|
||||
return _inline_copy_to_user(to, from, n);
|
||||
#else
|
||||
return _copy_to_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef copy_mc_to_kernel
|
||||
|
@ -12,40 +12,18 @@
|
||||
|
||||
/* out-of-line parts */
|
||||
|
||||
#ifndef INLINE_COPY_FROM_USER
|
||||
#if !defined(INLINE_COPY_FROM_USER) || defined(CONFIG_RUST)
|
||||
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
unsigned long res = n;
|
||||
might_fault();
|
||||
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
|
||||
/*
|
||||
* Ensure that bad access_ok() speculation will not
|
||||
* lead to nasty side effects *after* the copy is
|
||||
* finished:
|
||||
*/
|
||||
barrier_nospec();
|
||||
instrument_copy_from_user_before(to, from, n);
|
||||
res = raw_copy_from_user(to, from, n);
|
||||
instrument_copy_from_user_after(to, from, n, res);
|
||||
}
|
||||
if (unlikely(res))
|
||||
memset(to + (n - res), 0, res);
|
||||
return res;
|
||||
return _inline_copy_from_user(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(_copy_from_user);
|
||||
#endif
|
||||
|
||||
#ifndef INLINE_COPY_TO_USER
|
||||
#if !defined(INLINE_COPY_TO_USER) || defined(CONFIG_RUST)
|
||||
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_fault();
|
||||
if (should_fail_usercopy())
|
||||
return n;
|
||||
if (likely(access_ok(to, n))) {
|
||||
instrument_copy_to_user(to, from, n);
|
||||
n = raw_copy_to_user(to, from, n);
|
||||
}
|
||||
return n;
|
||||
return _inline_copy_to_user(to, from, n);
|
||||
}
|
||||
EXPORT_SYMBOL(_copy_to_user);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user