1

RISC-V: Add hwprobe vDSO function and data

Add a vDSO function __vdso_riscv_hwprobe, which can sit in front of the
riscv_hwprobe syscall and answer common queries. We stash a copy of
static answers for the "all CPUs" case in the vDSO data page. This data
is private to the vDSO, so we can decide later to change what's stored
there or under what conditions we defer to the syscall. Currently all
data can be discovered at boot, so the vDSO function answers all queries
when the cpumask is set to the "all CPUs" hint.

There's also a boolean in the data that lets the vDSO function know that
all CPUs are the same. In that case, the vDSO will also answer queries
for arbitrary CPU masks in addition to the "all CPUs" hint.

Signed-off-by: Evan Green <evan@rivosinc.com>
Link: https://lore.kernel.org/r/20230407231103.2622178-7-evan@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Evan Green 2023-04-07 16:11:03 -07:00 committed by Palmer Dabbelt
parent 287dcc2b0c
commit aa5af0aa90
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
10 changed files with 146 additions and 7 deletions

View File

@ -33,6 +33,7 @@ config RISCV
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __RISCV_ASM_VDSO_DATA_H
#define __RISCV_ASM_VDSO_DATA_H
#include <linux/types.h>
#include <vdso/datapage.h>
#include <asm/hwprobe.h>
struct arch_vdso_data {
/* Stash static answers to the hwprobe queries when all CPUs are selected. */
__u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
/* Boolean indicating all CPUs have the same static hwprobe values. */
__u8 homogeneous_cpus;
};
#endif /* __RISCV_ASM_VDSO_DATA_H */

View File

@ -9,6 +9,12 @@
#include <asm/csr.h>
#include <uapi/linux/time.h>
/*
* 32-bit land is lacking generic time vsyscalls as well as the legacy 32-bit
* time syscalls like gettimeofday. Skip these definitions since on 32-bit.
*/
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline
@ -60,6 +66,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_data *vd)
{

View File

@ -22,7 +22,7 @@ targets := $(obj-compat_vdso) compat_vdso.so compat_vdso.so.dbg compat_vdso.lds
obj-compat_vdso := $(addprefix $(obj)/, $(obj-compat_vdso))
obj-y += compat_vdso.o
CPPFLAGS_compat_vdso.lds += -P -C -U$(ARCH)
CPPFLAGS_compat_vdso.lds += -P -C -DCOMPAT_VDSO -U$(ARCH)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n

View File

@ -14,6 +14,7 @@
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm-generic/mman-common.h>
#include <vdso/vsyscall.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@ -243,6 +244,50 @@ static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
return 0;
}
#ifdef CONFIG_MMU
static int __init init_hwprobe_vdso_data(void)
{
struct vdso_data *vd = __arch_get_k_vdso_data();
struct arch_vdso_data *avd = &vd->arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
*/
for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
pair.key = key;
hwprobe_one_pair(&pair, cpu_online_mask);
WARN_ON_ONCE(pair.key < 0);
avd->all_cpu_hwprobe_values[key] = pair.value;
/*
* Smash together the vendor, arch, and impl IDs to see if
* they're all 0 or any negative.
*/
if (key <= RISCV_HWPROBE_KEY_MIMPID)
id_bitsmash |= pair.value;
}
/*
* If the arch, vendor, and implementation ID are all the same across
* all harts, then assume all CPUs are the same, and allow the vDSO to
* answer queries for arbitrary masks. However if all values are 0 (not
* populated) or any value returns -1 (varies across CPUs), then the
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = (id_bitsmash > 0);
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
#endif /* CONFIG_MMU */
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpu_count, unsigned long __user *,
cpus, unsigned int, flags)

View File

@ -14,13 +14,7 @@
#include <asm/page.h>
#include <asm/vdso.h>
#include <linux/time_namespace.h>
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
struct vdso_data {
};
#endif
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,

View File

@ -12,6 +12,8 @@ vdso-syms += vgettimeofday
endif
vdso-syms += getcpu
vdso-syms += flush_icache
vdso-syms += hwprobe
vdso-syms += sys_hwprobe
# Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
@ -23,6 +25,8 @@ ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
endif
CFLAGS_hwprobe.o += -fPIC
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))

View File

@ -0,0 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2023 Rivos, Inc
*/
#include <linux/types.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus,
unsigned int flags);
/* Add a prototype to avoid -Wmissing-prototypes warning. */
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus,
unsigned int flags);
int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpu_count, unsigned long *cpus,
unsigned int flags)
{
const struct vdso_data *vd = __arch_get_vdso_data();
const struct arch_vdso_data *avd = &vd->arch_data;
bool all_cpus = !cpu_count && !cpus;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
/*
* Defer to the syscall for exotic requests. The vdso has answers
* stashed away only for the "all cpus" case. If all CPUs are
* homogeneous, then this function can handle requests for arbitrary
* masks.
*/
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags);
/* This is something we can handle, fill out the pairs. */
while (p < end) {
if (p->key <= RISCV_HWPROBE_MAX_KEY) {
p->value = avd->all_cpu_hwprobe_values[p->key];
} else {
p->key = -1;
p->value = 0;
}
p++;
}
return 0;
}

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2023 Rivos, Inc */
#include <linux/linkage.h>
#include <asm/unistd.h>
.text
ENTRY(riscv_hwprobe)
.cfi_startproc
li a7, __NR_riscv_hwprobe
ecall
ret
.cfi_endproc
ENDPROC(riscv_hwprobe)

View File

@ -82,6 +82,9 @@ VERSION
#endif
__vdso_getcpu;
__vdso_flush_icache;
#ifndef COMPAT_VDSO
__vdso_riscv_hwprobe;
#endif
local: *;
};
}