1

x86/uaccess: Avoid barrier_nospec() in 64-bit copy_from_user()

The barrier_nospec() in 64-bit copy_from_user() is slow. Instead use
pointer masking to force the user pointer to all 1's for an invalid
address.

The kernel test robot reports a 2.6% improvement in the per_thread_ops
benchmark [1].

This is a variation on a patch originally by Josh Poimboeuf [2].

Link: https://lore.kernel.org/202410281344.d02c72a2-oliver.sang@intel.com [1]
Link: https://lore.kernel.org/5b887fe4c580214900e21f6c61095adf9a142735.1730166635.git.jpoimboe@kernel.org [2]
Tested-and-reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2024-10-29 16:03:31 -10:00
parent 14b7d43c5c
commit 0fc810ae3a

View File

@ -38,6 +38,7 @@
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
#define mask_user_address(src) (src)
#endif
/*
@ -159,18 +160,26 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
if (should_fail_usercopy())
goto fail;
if (can_do_masked_user_access())
from = mask_user_address(from);
else {
if (!access_ok(from, n))
goto fail;
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
}
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
if (likely(!res))
return 0;
fail:
memset(to + (n - res), 0, res);
return res;
}