2019-05-19 05:08:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* Copyright 1997 Andi Kleen <ak@muc.de>
|
|
|
|
* Copyright 1997 Linus Torvalds
|
|
|
|
* Copyright 2002 Andi Kleen <ak@suse.de>
|
|
|
|
*/
|
2016-07-13 17:18:57 -07:00
|
|
|
#include <linux/export.h>
|
2016-07-14 13:22:57 -07:00
|
|
|
#include <linux/uaccess.h>
|
2017-05-29 12:22:50 -07:00
|
|
|
#include <linux/highmem.h>
|
2023-05-16 12:35:46 -07:00
|
|
|
#include <linux/libnvdimm.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero Userspace
|
|
|
|
*/
|
|
|
|
|
2017-05-29 12:22:50 -07:00
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
|
/**
|
|
|
|
* clean_cache_range - write back a cache range with CLWB
|
|
|
|
* @vaddr: virtual start address
|
|
|
|
* @size: number of bytes to write back
|
|
|
|
*
|
|
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
|
|
* instruction. Note that @size is internally rounded up to be cache
|
|
|
|
* line size aligned.
|
|
|
|
*/
|
|
|
|
static void clean_cache_range(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
|
|
|
void *vend = addr + size;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
for (p = (void *)((unsigned long)addr & ~clflush_mask);
|
|
|
|
p < vend; p += x86_clflush_size)
|
|
|
|
clwb(p);
|
|
|
|
}
|
|
|
|
|
2017-05-29 22:40:44 -07:00
|
|
|
void arch_wb_cache_pmem(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
clean_cache_range(addr, size);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
|
|
|
|
|
2017-05-29 12:22:50 -07:00
|
|
|
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
|
{
|
|
|
|
unsigned long flushed, dest = (unsigned long) dst;
|
2023-04-15 13:39:15 -07:00
|
|
|
long rc;
|
|
|
|
|
|
|
|
stac();
|
x86: remove 'zerorest' argument from __copy_user_nocache()
Every caller passes in zero, meaning they don't want any partial copy to
zero the remainder of the destination buffer.
Which is just as well, because the implementation of that function
didn't actually even look at that argument, and wasn't even aware it
existed, although some misleading comments did mention it still.
The 'zerorest' thing is a historical artifact of how "copy_from_user()"
worked, in that it would zero the rest of the kernel buffer that it
copied into.
That zeroing still exists, but it's long since been moved to generic
code, and the raw architecture-specific code doesn't do it. See
_copy_from_user() in lib/usercopy.c for this all.
However, while __copy_user_nocache() shares some history and superficial
other similarities with copy_from_user(), it is in many ways also very
different.
In particular, while the code makes it *look* similar to the generic
user copy functions that can copy both to and from user space, and take
faults on both reads and writes as a result, __copy_user_nocache() does
no such thing at all.
__copy_user_nocache() always copies to kernel space, and will never take
a page fault on the destination. What *can* happen, though, is that the
non-temporal stores take a machine check because one of the use cases is
for writing to stable memory, and any memory errors would then take
synchronous faults.
So __copy_user_nocache() does look a lot like copy_from_user(), but has
faulting behavior that is more akin to our old copy_in_user() (which no
longer exists, but copied from user space to user space and could fault
on both source and destination).
And it very much does not have the "zero the end of the destination
buffer", since a problem with the destination buffer is very possibly
the very source of the partial copy.
So this whole thing was just a confusing historical artifact from having
shared some code with a completely different function with completely
different use cases.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2023-04-19 19:09:52 -07:00
|
|
|
rc = __copy_user_nocache(dst, src, size);
|
2023-04-15 13:39:15 -07:00
|
|
|
clac();
|
2017-05-29 12:22:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* __copy_user_nocache() uses non-temporal stores for the bulk
|
|
|
|
* of the transfer, but we need to manually flush if the
|
|
|
|
* transfer is unaligned. A cached memory copy is used when
|
|
|
|
* destination or size is not naturally aligned. That is:
|
|
|
|
* - Require 8-byte alignment when size is 8 bytes or larger.
|
|
|
|
* - Require 4-byte alignment when size is 4 bytes.
|
|
|
|
*/
|
|
|
|
if (size < 8) {
|
|
|
|
if (!IS_ALIGNED(dest, 4) || size != 4)
|
2020-09-25 21:19:24 -07:00
|
|
|
clean_cache_range(dst, size);
|
2017-05-29 12:22:50 -07:00
|
|
|
} else {
|
|
|
|
if (!IS_ALIGNED(dest, 8)) {
|
|
|
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
|
|
|
clean_cache_range(dst, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
flushed = dest - (unsigned long) dst;
|
|
|
|
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
|
|
|
|
clean_cache_range(dst + size - 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-08-08 14:22:16 -07:00
|
|
|
void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
|
2017-05-29 12:22:50 -07:00
|
|
|
{
|
|
|
|
unsigned long dest = (unsigned long) _dst;
|
|
|
|
unsigned long source = (unsigned long) _src;
|
|
|
|
|
|
|
|
/* cache copy and flush to align dest */
|
|
|
|
if (!IS_ALIGNED(dest, 8)) {
|
x86: __memcpy_flushcache: fix wrong alignment if size > 2^32
The first "if" condition in __memcpy_flushcache is supposed to align the
"dest" variable to 8 bytes and copy data up to this alignment. However,
this condition may misbehave if "size" is greater than 4GiB.
The statement min_t(unsigned, size, ALIGN(dest, 8) - dest); casts both
arguments to unsigned int and selects the smaller one. However, the
cast truncates high bits in "size" and it results in misbehavior.
For example:
suppose that size == 0x100000001, dest == 0x200000002
min_t(unsigned, size, ALIGN(dest, 8) - dest) == min_t(0x1, 0xe) == 0x1;
...
dest += 0x1;
so we copy just one byte "and" dest remains unaligned.
This patch fixes the bug by replacing unsigned with size_t.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-04-19 06:56:23 -07:00
|
|
|
size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
|
2017-05-29 12:22:50 -07:00
|
|
|
|
|
|
|
memcpy((void *) dest, (void *) source, len);
|
|
|
|
clean_cache_range((void *) dest, len);
|
|
|
|
dest += len;
|
|
|
|
source += len;
|
|
|
|
size -= len;
|
|
|
|
if (!size)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 4x8 movnti loop */
|
|
|
|
while (size >= 32) {
|
|
|
|
asm("movq (%0), %%r8\n"
|
|
|
|
"movq 8(%0), %%r9\n"
|
|
|
|
"movq 16(%0), %%r10\n"
|
|
|
|
"movq 24(%0), %%r11\n"
|
|
|
|
"movnti %%r8, (%1)\n"
|
|
|
|
"movnti %%r9, 8(%1)\n"
|
|
|
|
"movnti %%r10, 16(%1)\n"
|
|
|
|
"movnti %%r11, 24(%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8", "r9", "r10", "r11");
|
|
|
|
dest += 32;
|
|
|
|
source += 32;
|
|
|
|
size -= 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 1x8 movnti loop */
|
|
|
|
while (size >= 8) {
|
|
|
|
asm("movq (%0), %%r8\n"
|
|
|
|
"movnti %%r8, (%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8");
|
|
|
|
dest += 8;
|
|
|
|
source += 8;
|
|
|
|
size -= 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 1x4 movnti loop */
|
|
|
|
while (size >= 4) {
|
|
|
|
asm("movl (%0), %%r8d\n"
|
|
|
|
"movnti %%r8d, (%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8");
|
|
|
|
dest += 4;
|
|
|
|
source += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cache copy for remaining bytes */
|
|
|
|
if (size) {
|
|
|
|
memcpy((void *) dest, (void *) source, size);
|
|
|
|
clean_cache_range((void *) dest, size);
|
|
|
|
}
|
|
|
|
}
|
2018-08-08 14:22:16 -07:00
|
|
|
EXPORT_SYMBOL_GPL(__memcpy_flushcache);
|
2017-05-29 12:22:50 -07:00
|
|
|
#endif
|