1

xtensa: enable KCSAN

Prefix arch-specific barrier macros with '__' to make use of instrumented
generic macros.
Prefix arch-specific bitops with 'arch_' to make use of instrumented
generic functions.
Provide stubs for 64-bit atomics when building with KCSAN.
Disable KCSAN instrumentation in arch/xtensa/boot.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: Marco Elver <elver@google.com>
This commit is contained in:
Max Filippov 2019-10-04 23:33:31 -07:00
parent 507185695e
commit 725aea8732
6 changed files with 73 additions and 7 deletions

View File

@ -29,6 +29,7 @@ config XTENSA
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_KCSAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CONTEXT_TRACKING

View File

@ -16,6 +16,7 @@ CFLAGS_REMOVE_inffast.o = -pg
endif
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong

View File

@ -11,9 +11,15 @@
#include <asm/core.h>
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#define __mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define __rmb() barrier()
#define __wmb() __mb()
#ifdef CONFIG_SMP
#define __smp_mb() __mb()
#define __smp_rmb() __rmb()
#define __smp_wmb() __wmb()
#endif
#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier()

View File

@ -99,7 +99,7 @@ static inline unsigned long __fls(unsigned long word)
#if XCHAL_HAVE_EXCLUSIVE
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp; \
unsigned long mask = 1UL << (bit & 31); \
@ -119,7 +119,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@ -142,7 +142,7 @@ test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
#elif XCHAL_HAVE_S32C1I
#define BIT_OP(op, insn, inv) \
static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
static inline void arch_##op##_bit(unsigned int bit, volatile unsigned long *p)\
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@ -163,7 +163,7 @@ static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
#define TEST_AND_BIT_OP(op, insn, inv) \
static inline int \
test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
arch_test_and_##op##_bit(unsigned int bit, volatile unsigned long *p) \
{ \
unsigned long tmp, value; \
unsigned long mask = 1UL << (bit & 31); \
@ -205,6 +205,8 @@ BIT_OPS(change, "xor", )
#undef BIT_OP
#undef TEST_AND_BIT_OP
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>

View File

@ -8,3 +8,5 @@ lib-y += memcopy.o memset.o checksum.o \
divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o \
usercopy.o strncpy_user.o strnlen_user.o
lib-$(CONFIG_PCI) += pci-auto.o
lib-$(CONFIG_KCSAN) += kcsan-stubs.o
KCSAN_SANITIZE_kcsan-stubs.o := n

View File

@ -0,0 +1,54 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/types.h>
void __atomic_store_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_load_8(const volatile void *p, int i)
{
BUG();
}
u64 __atomic_exchange_8(volatile void *p, u64 v, int i)
{
BUG();
}
bool __atomic_compare_exchange_8(volatile void *p1, void *p2, u64 v, bool b, int i1, int i2)
{
BUG();
}
u64 __atomic_fetch_add_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_sub_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_and_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_or_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_xor_8(volatile void *p, u64 v, int i)
{
BUG();
}
u64 __atomic_fetch_nand_8(volatile void *p, u64 v, int i)
{
BUG();
}