1

perf tools fixes for v6.12: 2nd batch

- Update more header copies with the kernel sources, including const.h,
   msr-index.h, arm64's cputype.h, kvm's, bits.h and unaligned.h
 
 - The return from 'write' isn't a pid, fix cut'n'paste error in 'perf
   trace'.
 
 - Fix up the python binding build on architectures without
   HAVE_KVM_STAT_SUPPORT.
 
 - Add some more bounds checks to augmented_raw_syscalls.bpf.c (used to
   collect syscall pointer arguments in 'perf trace') to make the
   resulting bytecode to pass the kernel BPF verifier, allowing us to go
   back accepting clang 12.0.1 as the minimum version required for
   compiling BPF sources.
 
 - Add __NR_capget for x86 to fix a regression on running perf + intel PT
   (hw tracing) as non-root setting up the capabilities as described in
   https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html.
 
 - Fix missing syscalltbl in non-explicitly listed architectures, noticed
   on ARM 32-bit, that still needs a .tbl generator for the syscall
   id<->name tables, should be added for v6.13.
 
 - Handle 'perf test' failure when handling broken DWARF for ASM files.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCZyKQVQAKCRCyPKLppCJ+
 JxZKAQCOU0YgvvQ0LH6PfB9uGqRC/zOEHp9CnXxTK17rpKD/iAD/YYvH97Rrfx2V
 H5FdoyK7OtFrkV8WhNcKMKHFfBMl8Ac=
 =XDkJ
 -----END PGP SIGNATURE-----

Merge tag 'perf-tools-fixes-for-v6.12-2-2024-10-30' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools fixes from Arnaldo Carvalho de Melo:

 - Update more header copies with the kernel sources, including const.h,
   msr-index.h, arm64's cputype.h, kvm's, bits.h and unaligned.h

 - The return from 'write' isn't a pid, fix cut'n'paste error in 'perf
   trace'

 - Fix up the python binding build on architectures without
   HAVE_KVM_STAT_SUPPORT

 - Add some more bounds checks to augmented_raw_syscalls.bpf.c (used to
   collect syscall pointer arguments in 'perf trace') to make the
   resulting bytecode to pass the kernel BPF verifier, allowing us to go
   back accepting clang 12.0.1 as the minimum version required for
   compiling BPF sources

 - Add __NR_capget for x86 to fix a regression on running perf + intel
   PT (hw tracing) as non-root setting up the capabilities as described
   in https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html

 - Fix missing syscalltbl in non-explicitly listed architectures,
   noticed on ARM 32-bit, that still needs a .tbl generator for the
   syscall id<->name tables, should be added for v6.13

 - Handle 'perf test' failure when handling broken DWARF for ASM files

* tag 'perf-tools-fixes-for-v6.12-2-2024-10-30' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools:
  perf cap: Add __NR_capget to arch/x86 unistd
  tools headers: Update the linux/unaligned.h copy with the kernel sources
  tools headers arm64: Sync arm64's cputype.h with the kernel sources
  tools headers: Synchronize {uapi/}linux/bits.h with the kernel sources
  tools arch x86: Sync the msr-index.h copy with the kernel sources
  perf python: Fix up the build on architectures without HAVE_KVM_STAT_SUPPORT
  perf test: Handle perftool-testsuite_probe failure due to broken DWARF
  tools headers UAPI: Sync kvm headers with the kernel sources
  perf trace: Fix non-listed archs in the syscalltbl routines
  perf build: Change the clang check back to 12.0.1
  perf trace augmented_raw_syscalls: Add more checks to pass the verifier
  perf trace augmented_raw_syscalls: Add extra array index bounds checking to satisfy some BPF verifiers
  perf trace: The return from 'write' isn't a pid
  tools headers UAPI: Sync linux/const.h with the kernel headers
This commit is contained in:
Linus Torvalds 2024-10-30 11:17:47 -10:00
commit 14b7d43c5c
18 changed files with 172 additions and 49 deletions

View File

@ -94,6 +94,7 @@
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@ -176,6 +177,7 @@
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/*
* Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
* Most MSRs support/allow only a subset of memory types, but the values
* themselves are common across all relevant MSRs.
*/
#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
#define X86_MEMTYPE_WC 1ull /* Write Combining */
/* RESERVED 2 */
/* RESERVED 3 */
#define X86_MEMTYPE_WT 4ull /* Write Through */
#define X86_MEMTYPE_WP 5ull /* Write Protected */
#define X86_MEMTYPE_WB 6ull /* Write Back */
#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@ -365,6 +379,12 @@
#define MSR_IA32_CR_PAT 0x00000277
#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \
((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
(X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
(X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
(X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@ -1159,15 +1179,6 @@
#define MSR_IA32_VMX_VMFUNC 0x00000491
#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
/* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
#define VMX_BASIC_64 0x0001000000000000LLU
#define VMX_BASIC_MEM_TYPE_SHIFT 50
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
#define VMX_BASIC_MEM_TYPE_WB 6LLU
#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* Resctrl MSRs: */
/* - Intel: */
#define MSR_IA32_L3_QOS_CFG 0xc81
@ -1185,11 +1196,6 @@
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
/* MSR_IA32_VMX_MISC bits */
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115

View File

@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1

View File

@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 132
#endif
#ifndef __NR_capget
#define __NR_capget 184
#endif
#ifndef __NR_gettid
#define __NR_gettid 224
#endif

View File

@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 121
#endif
#ifndef __NR_capget
#define __NR_capget 125
#endif
#ifndef __NR_gettid
#define __NR_gettid 186
#endif

View File

@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
#if !defined(__ASSEMBLY__)
/*
* Missing asm support
*
* __GENMASK_U128() depends on _BIT128() which would not work
* in the asm code, as it shifts an 'unsigned __init128' data
* type instead of direct representation of 128 bit constants
* such as long and unsigned long. The fundamental problem is
* that a 128 bit constant will get silently truncated by the
* gcc compiler.
*/
#define GENMASK_U128(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
#endif
#endif /* __LINUX_BITS_H */

View File

@ -9,16 +9,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
#pragma GCC diagnostic ignored "-Wattributes"
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x; \
})
#define __put_unaligned_t(type, val, ptr) do { \
struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x = (val); \
} while (0)
#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))

View File

@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
#define __GENMASK_U128(h, l) \
((_BIT128((h)) << 1) - (_BIT128(l)))
#endif /* _UAPI_LINUX_BITS_H */

View File

@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
#if !defined(__ASSEMBLY__)
/*
* Missing asm support
*
* __BIT128() would not work in the asm code, as it shifts an
* 'unsigned __init128' data type as direct representation of
* 128 bit constants is not supported in the gcc compiler, as
* they get silently truncated.
*
* TODO: Please revisit this implementation when gcc compiler
* starts representing 128 bit constants directly like long
* and unsigned long etc. Subsequently drop the comment for
* GENMASK_U128() which would then start supporting asm code.
*/
#define _BIT128(x) ((unsigned __int128)(1) << (x))
#endif
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __VDSO_UNALIGNED_H
#define __VDSO_UNALIGNED_H
#define __get_unaligned_t(type, ptr) ({ \
const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x; \
})
#define __put_unaligned_t(type, val, ptr) do { \
struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
__pptr->x = (val); \
} while (0)
#endif /* __VDSO_UNALIGNED_H */

View File

@ -704,8 +704,8 @@ ifeq ($(BUILD_BPF_SKEL),1)
BUILD_BPF_SKEL := 0
else
CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
ifeq ($(call version-lt3,$(CLANG_VERSION),16.0.6),1)
$(warning Warning: Disabled BPF skeletons as at least $(CLANG) version 16.0.6 is reported to be a working setup with the current of BPF based perf features)
ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1)
$(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1)
BUILD_BPF_SKEL := 0
endif
endif

View File

@ -1399,7 +1399,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "waitid", .errpid = true,
.arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "write", .errpid = true,
{ .name = "write",
.arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
};

View File

@ -22,6 +22,7 @@ FILES=(
"include/vdso/bits.h"
"include/linux/const.h"
"include/vdso/const.h"
"include/vdso/unaligned.h"
"include/linux/hash.h"
"include/linux/list-sort.h"
"include/uapi/linux/hw_breakpoint.h"

View File

@ -19,35 +19,74 @@
TEST_RESULT=0
# skip if not supported
BLACKFUNC=`head -n 1 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC" ]; then
BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
exit 0
fi
# try to find vmlinux with DWARF debug info
VMLINUX_FILE=$(perf probe -v random_probe |& grep "Using.*for symbols" | sed -r 's/^Using (.*) for symbols$/\1/')
# remove all previously added probes
clear_all_probes
### adding blacklisted function
REGEX_SCOPE_FAIL="Failed to find scope of probe point"
REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
REGEX_NOT_FOUND_MESSAGE="Probe point \'$RE_EVENT\' not found."
REGEX_ERROR_MESSAGE="Error: Failed to add events."
REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
REGEX_OUT_SECTION="$RE_EVENT is out of \.\w+, skip it"
REGEX_MISSING_DECL_LINE="A function DIE doesn't have decl_line. Maybe broken DWARF?"
BLACKFUNC=""
SKIP_DWARF=0
for BLACKFUNC in $BLACKFUNC_LIST; do
echo "Probing $BLACKFUNC"
# functions from blacklist should be skipped by perf probe
! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
PERF_EXIT_CODE=$?
REGEX_SCOPE_FAIL="Failed to find scope of probe point"
REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
REGEX_NOT_FOUND_MESSAGE="Probe point \'$BLACKFUNC\' not found."
REGEX_ERROR_MESSAGE="Error: Failed to add events."
REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
REGEX_OUT_SECTION="$BLACKFUNC is out of \.\w+, skip it"
# check for bad DWARF polluting the result
../common/check_all_patterns_found.pl "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
if [ $? -eq 0 ]; then
SKIP_DWARF=1
echo "Result polluted by broken DWARF, trying another probe"
# confirm that the broken DWARF comes from assembler
if [ -n "$VMLINUX_FILE" ]; then
readelf -wi "$VMLINUX_FILE" |
awk -v probe="$BLACKFUNC" '/DW_AT_language/ { comp_lang = $0 }
$0 ~ probe { if (comp_lang) { print comp_lang }; exit }' |
grep -q "MIPS assembler"
CHECK_EXIT_CODE=$?
if [ $CHECK_EXIT_CODE -ne 0 ]; then
SKIP_DWARF=0 # broken DWARF while available
break
fi
fi
else
../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
CHECK_EXIT_CODE=$?
SKIP_DWARF=0
break
fi
done
if [ $SKIP_DWARF -eq 1 ]; then
print_testcase_skipped "adding blacklisted function $BLACKFUNC"
else
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
(( TEST_RESULT += $? ))
fi
### listing not-added probe

View File

@ -288,6 +288,10 @@ int sys_enter_rename(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
/* Every read from userspace is limited to value size */
if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
return 1; /* Failure: don't filter */
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@ -315,6 +319,10 @@ int sys_enter_renameat2(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
/* Every read from userspace is limited to value size */
if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
return 1; /* Failure: don't filter */
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@ -423,8 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
{
bool augmented, do_output = false;
int zero = 0, size, aug_size, index, output = 0,
int zero = 0, size, aug_size, index,
value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
unsigned int nr, *beauty_map;
struct beauty_payload_enter *payload;
void *arg, *payload_offset;
@ -477,6 +486,8 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
augmented = true;
} else if (size < 0 && size >= -6) { /* buffer */
index = -(size + 1);
barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
index &= 7; // Satisfy the bounds checking with the verifier in some kernels.
aug_size = args->args[index];
if (aug_size > TRACE_AUG_MAX_BUF)
@ -488,10 +499,17 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
/* Augmented data size is limited to sizeof(augmented_arg->unnamed union with value field) */
if (aug_size > value_size)
aug_size = value_size;
/* write data to payload */
if (augmented) {
int written = offsetof(struct augmented_arg, value) + aug_size;
if (written < 0 || written > sizeof(struct augmented_arg))
return 1;
((struct augmented_arg *)payload_offset)->size = aug_size;
output += written;
payload_offset += written;
@ -499,7 +517,7 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
if (!do_output)
if (!do_output || (sizeof(struct syscall_enter_args) + output) > sizeof(struct beauty_payload_enter))
return 1;
return augmented__beauty_output(ctx, payload, sizeof(struct syscall_enter_args) + output);

View File

@ -7,13 +7,9 @@
#include "debug.h"
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <linux/capability.h>
#include <sys/syscall.h>
#ifndef SYS_capget
#define SYS_capget 90
#endif
#include <unistd.h>
#define MAX_LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
@ -21,9 +17,9 @@ bool perf_cap__capable(int cap, bool *used_root)
{
struct __user_cap_header_struct header = {
.version = _LINUX_CAPABILITY_VERSION_3,
.pid = getpid(),
.pid = 0,
};
struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S];
struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S] = {};
__u32 cap_val;
*used_root = false;

View File

@ -19,6 +19,7 @@
#include "util/bpf-filter.h"
#include "util/env.h"
#include "util/kvm-stat.h"
#include "util/stat.h"
#include "util/kwork.h"
#include "util/sample.h"
#include "util/lock-contention.h"
@ -1355,6 +1356,7 @@ error:
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
#ifdef HAVE_KVM_STAT_SUPPORT
bool kvm_entry_event(struct evsel *evsel __maybe_unused)
{
return false;
@ -1384,6 +1386,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
char *decode __maybe_unused)
{
}
#endif // HAVE_KVM_STAT_SUPPORT
int find_scripts(char **scripts_array __maybe_unused, char **scripts_path_array __maybe_unused,
int num __maybe_unused, int pathlen __maybe_unused)

View File

@ -46,6 +46,11 @@ static const char *const *syscalltbl_native = syscalltbl_mips_n64;
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_LOONGARCH_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_loongarch;
#else
const int syscalltbl_native_max_id = 0;
static const char *const syscalltbl_native[] = {
[0] = "unknown",
};
#endif
struct syscall {
@ -182,6 +187,11 @@ int syscalltbl__id(struct syscalltbl *tbl, const char *name)
return audit_name_to_syscall(name, tbl->audit_machine);
}
int syscalltbl__id_at_idx(struct syscalltbl *tbl __maybe_unused, int idx)
{
return idx;
}
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
{