Merge branch 'bpf-support-dumping-kfunc-prototypes-from-btf'
Daniel Xu says: ==================== bpf: Support dumping kfunc prototypes from BTF This patchset enables both detecting as well as dumping compilable prototypes for kfuncs. The first commit instructs pahole to DECL_TAG kfuncs when available. This requires v1.27 which was released on 6/11/24. With it, users will be able to look at BTF inside vmlinux (or modules) and check if the kfunc they want is available. The final commit teaches bpftool how to dump kfunc prototypes. This is done for developer convenience. The rest of the commits are fixups to enable selftests to use the newly dumped kfunc prototypes. With these, selftests will regularly exercise the newly added codepaths. Tested with and without the required pahole changes: * https://github.com/kernel-patches/bpf/pull/7186 * https://github.com/kernel-patches/bpf/pull/7187 === Changelog === From v4: * Change bpf_session_cookie() return type * Only fixup used fentry test kfunc prototypes * Extract out projection detection into shared btf_is_projection_of() * Fix kernel test robot build warnings about doc comments From v3: * Teach selftests to use dumped prototypes From v2: * Update Makefile.btf with pahole flag * More error checking * Output formatting changes * Drop already-merged commit From v1: * Add __weak annotation * Use btf_dump for kfunc prototypes * Update kernel bpf_rdonly_cast() signature ==================== Link: https://lore.kernel.org/r/cover.1718207789.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
4ff5747158
@ -111,14 +111,15 @@ __bpf_kfunc_start_defs();
|
||||
/**
|
||||
* bpf_get_fsverity_digest: read fsverity digest of file
|
||||
* @file: file to get digest from
|
||||
* @digest_ptr: (out) dynptr for struct fsverity_digest
|
||||
* @digest_p: (out) dynptr for struct fsverity_digest
|
||||
*
|
||||
* Read fsverity_digest of *file* into *digest_ptr*.
|
||||
*
|
||||
* Return: 0 on success, a negative value on error.
|
||||
*/
|
||||
__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_kern *digest_ptr)
|
||||
__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_p)
|
||||
{
|
||||
struct bpf_dynptr_kern *digest_ptr = (struct bpf_dynptr_kern *)digest_p;
|
||||
const struct inode *inode = file_inode(file);
|
||||
u32 dynptr_sz = __bpf_dynptr_size(digest_ptr);
|
||||
struct fsverity_digest *arg;
|
||||
|
@ -3265,8 +3265,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
struct bpf_insn *insn_buf,
|
||||
struct bpf_prog *prog,
|
||||
u32 *target_size);
|
||||
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr_kern *ptr);
|
||||
int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr *ptr);
|
||||
#else
|
||||
static inline bool bpf_sock_common_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
@ -3288,8 +3288,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr_kern *ptr)
|
||||
static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr *ptr)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -531,6 +531,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
|
||||
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
|
||||
struct module *owner);
|
||||
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
|
||||
bool btf_is_projection_of(const char *pname, const char *tname);
|
||||
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, enum bpf_prog_type prog_type,
|
||||
int arg);
|
||||
|
@ -5820,6 +5820,15 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type)
|
||||
return ctx_type->type;
|
||||
}
|
||||
|
||||
bool btf_is_projection_of(const char *pname, const char *tname)
|
||||
{
|
||||
if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
|
||||
return true;
|
||||
if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, enum bpf_prog_type prog_type,
|
||||
int arg)
|
||||
@ -5882,9 +5891,7 @@ again:
|
||||
* int socket_filter_bpf_prog(struct __sk_buff *skb)
|
||||
* { // no fields of skb are ever used }
|
||||
*/
|
||||
if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0)
|
||||
return true;
|
||||
if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0)
|
||||
if (btf_is_projection_of(ctx_tname, tname))
|
||||
return true;
|
||||
if (strcmp(ctx_tname, tname)) {
|
||||
/* bpf_user_pt_regs_t is a typedef, so resolve it to
|
||||
|
@ -311,11 +311,15 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
|
||||
* Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
|
||||
*/
|
||||
__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
|
||||
const struct bpf_dynptr_kern *src,
|
||||
const struct bpf_dynptr_kern *dst,
|
||||
const struct bpf_dynptr_kern *siv)
|
||||
const struct bpf_dynptr *src,
|
||||
const struct bpf_dynptr *dst,
|
||||
const struct bpf_dynptr *siv)
|
||||
{
|
||||
return bpf_crypto_crypt(ctx, src, dst, siv, true);
|
||||
const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
|
||||
const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
|
||||
const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv;
|
||||
|
||||
return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -328,11 +332,15 @@ __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
|
||||
* Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
|
||||
*/
|
||||
__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
|
||||
const struct bpf_dynptr_kern *src,
|
||||
const struct bpf_dynptr_kern *dst,
|
||||
const struct bpf_dynptr_kern *siv)
|
||||
const struct bpf_dynptr *src,
|
||||
const struct bpf_dynptr *dst,
|
||||
const struct bpf_dynptr *siv)
|
||||
{
|
||||
return bpf_crypto_crypt(ctx, src, dst, siv, false);
|
||||
const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src;
|
||||
const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst;
|
||||
const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv;
|
||||
|
||||
return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false);
|
||||
}
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
@ -2459,9 +2459,10 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
|
||||
* provided buffer, with its contents containing the data, if unable to obtain
|
||||
* direct pointer)
|
||||
*/
|
||||
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
|
||||
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
|
||||
void *buffer__opt, u32 buffer__szk)
|
||||
{
|
||||
const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
enum bpf_dynptr_type type;
|
||||
u32 len = buffer__szk;
|
||||
int err;
|
||||
@ -2543,9 +2544,11 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
|
||||
* provided buffer, with its contents containing the data, if unable to obtain
|
||||
* direct pointer)
|
||||
*/
|
||||
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
|
||||
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
|
||||
void *buffer__opt, u32 buffer__szk)
|
||||
{
|
||||
const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
|
||||
if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
|
||||
return NULL;
|
||||
|
||||
@ -2571,11 +2574,12 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o
|
||||
* will be copied out into the buffer and the user will need to call
|
||||
* bpf_dynptr_write() to commit changes.
|
||||
*/
|
||||
return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
|
||||
return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
|
||||
__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
u32 size;
|
||||
|
||||
if (!ptr->data || start > end)
|
||||
@ -2592,36 +2596,45 @@ __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 en
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
|
||||
__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
|
||||
return !ptr->data;
|
||||
}
|
||||
|
||||
__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
|
||||
__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
|
||||
if (!ptr->data)
|
||||
return false;
|
||||
|
||||
return __bpf_dynptr_is_rdonly(ptr);
|
||||
}
|
||||
|
||||
__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
|
||||
__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
|
||||
if (!ptr->data)
|
||||
return -EINVAL;
|
||||
|
||||
return __bpf_dynptr_size(ptr);
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
|
||||
struct bpf_dynptr_kern *clone__uninit)
|
||||
__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
|
||||
struct bpf_dynptr *clone__uninit)
|
||||
{
|
||||
struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
|
||||
|
||||
if (!ptr->data) {
|
||||
bpf_dynptr_set_null(clone__uninit);
|
||||
bpf_dynptr_set_null(clone);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*clone__uninit = *ptr;
|
||||
*clone = *ptr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2986,7 +2999,9 @@ late_initcall(kfunc_init);
|
||||
*/
|
||||
const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
|
||||
{
|
||||
return bpf_dynptr_slice(ptr, 0, NULL, len);
|
||||
const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
|
||||
|
||||
return bpf_dynptr_slice(p, 0, NULL, len);
|
||||
}
|
||||
|
||||
/* Get a pointer to dynptr data up to len bytes for read write access. If
|
||||
|
@ -10914,7 +10914,7 @@ enum {
|
||||
};
|
||||
|
||||
BTF_ID_LIST(kf_arg_btf_ids)
|
||||
BTF_ID(struct, bpf_dynptr_kern)
|
||||
BTF_ID(struct, bpf_dynptr)
|
||||
BTF_ID(struct, bpf_list_head)
|
||||
BTF_ID(struct, bpf_list_node)
|
||||
BTF_ID(struct, bpf_rb_root)
|
||||
@ -11265,6 +11265,8 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
|
||||
bool strict_type_match = false;
|
||||
const struct btf *reg_btf;
|
||||
const char *reg_ref_tname;
|
||||
bool taking_projection;
|
||||
bool struct_same;
|
||||
u32 reg_ref_id;
|
||||
|
||||
if (base_type(reg->type) == PTR_TO_BTF_ID) {
|
||||
@ -11308,7 +11310,13 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
|
||||
|
||||
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id);
|
||||
reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
|
||||
if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
|
||||
struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match);
|
||||
/* If kfunc is accepting a projection type (ie. __sk_buff), it cannot
|
||||
* actually use it -- it must cast to the underlying type. So we allow
|
||||
* caller to pass in the underlying type.
|
||||
*/
|
||||
taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname);
|
||||
if (!taking_projection && !struct_same) {
|
||||
verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
|
||||
meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
|
||||
btf_type_str(reg_ref_t), reg_ref_tname);
|
||||
|
@ -1369,8 +1369,8 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
|
||||
#ifdef CONFIG_SYSTEM_DATA_VERIFICATION
|
||||
/**
|
||||
* bpf_verify_pkcs7_signature - verify a PKCS#7 signature
|
||||
* @data_ptr: data to verify
|
||||
* @sig_ptr: signature of the data
|
||||
* @data_p: data to verify
|
||||
* @sig_p: signature of the data
|
||||
* @trusted_keyring: keyring with keys trusted for signature verification
|
||||
*
|
||||
* Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr*
|
||||
@ -1378,10 +1378,12 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey)
|
||||
*
|
||||
* Return: 0 on success, a negative value on error.
|
||||
*/
|
||||
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
|
||||
struct bpf_dynptr_kern *sig_ptr,
|
||||
__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
|
||||
struct bpf_dynptr *sig_p,
|
||||
struct bpf_key *trusted_keyring)
|
||||
{
|
||||
struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p;
|
||||
struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p;
|
||||
const void *data, *sig;
|
||||
u32 data_len, sig_len;
|
||||
int ret;
|
||||
@ -1444,7 +1446,7 @@ __bpf_kfunc_start_defs();
|
||||
* bpf_get_file_xattr - get xattr of a file
|
||||
* @file: file to get xattr from
|
||||
* @name__str: name of the xattr
|
||||
* @value_ptr: output buffer of the xattr value
|
||||
* @value_p: output buffer of the xattr value
|
||||
*
|
||||
* Get xattr *name__str* of *file* and store the output in *value_ptr*.
|
||||
*
|
||||
@ -1453,8 +1455,9 @@ __bpf_kfunc_start_defs();
|
||||
* Return: 0 on success, a negative value on error.
|
||||
*/
|
||||
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
|
||||
struct bpf_dynptr_kern *value_ptr)
|
||||
struct bpf_dynptr *value_p)
|
||||
{
|
||||
struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
|
||||
struct dentry *dentry;
|
||||
u32 value_len;
|
||||
void *value;
|
||||
@ -3527,7 +3530,7 @@ __bpf_kfunc bool bpf_session_is_return(void)
|
||||
return session_ctx->is_return;
|
||||
}
|
||||
|
||||
__bpf_kfunc __u64 *bpf_session_cookie(void)
|
||||
__bpf_kfunc long *bpf_session_cookie(void)
|
||||
{
|
||||
struct bpf_session_run_ctx *session_ctx;
|
||||
|
||||
|
@ -11859,28 +11859,34 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
}
|
||||
|
||||
__bpf_kfunc_start_defs();
|
||||
__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr_kern *ptr__uninit)
|
||||
__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
|
||||
struct sk_buff *skb = (struct sk_buff *)s;
|
||||
|
||||
if (flags) {
|
||||
bpf_dynptr_set_null(ptr__uninit);
|
||||
bpf_dynptr_set_null(ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bpf_dynptr_init(ptr__uninit, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
|
||||
bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags,
|
||||
struct bpf_dynptr_kern *ptr__uninit)
|
||||
__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
|
||||
struct xdp_buff *xdp = (struct xdp_buff *)x;
|
||||
|
||||
if (flags) {
|
||||
bpf_dynptr_set_null(ptr__uninit);
|
||||
bpf_dynptr_set_null(ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
|
||||
bpf_dynptr_init(ptr, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -11906,10 +11912,11 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
|
||||
__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk,
|
||||
struct bpf_tcp_req_attrs *attrs, int attrs__sz)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_SYN_COOKIES)
|
||||
struct sk_buff *skb = (struct sk_buff *)s;
|
||||
const struct request_sock_ops *ops;
|
||||
struct inet_request_sock *ireq;
|
||||
struct tcp_request_sock *treq;
|
||||
@ -12004,16 +12011,17 @@ __bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk,
|
||||
|
||||
__bpf_kfunc_end_defs();
|
||||
|
||||
int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr_kern *ptr__uninit)
|
||||
int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit)
|
||||
{
|
||||
struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit;
|
||||
int err;
|
||||
|
||||
err = bpf_dynptr_from_skb(skb, flags, ptr__uninit);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
bpf_dynptr_set_rdonly(ptr__uninit);
|
||||
bpf_dynptr_set_rdonly(ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsis
|
||||
else
|
||||
|
||||
# Switch to using --btf_features for v1.26 and later.
|
||||
pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func
|
||||
pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs
|
||||
|
||||
endif
|
||||
|
||||
|
@ -20,6 +20,8 @@
|
||||
#include "json_writer.h"
|
||||
#include "main.h"
|
||||
|
||||
#define KFUNC_DECL_TAG "bpf_kfunc"
|
||||
|
||||
static const char * const btf_kind_str[NR_BTF_KINDS] = {
|
||||
[BTF_KIND_UNKN] = "UNKNOWN",
|
||||
[BTF_KIND_INT] = "INT",
|
||||
@ -461,6 +463,49 @@ static int dump_btf_raw(const struct btf *btf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf)
|
||||
{
|
||||
LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts);
|
||||
int cnt = btf__type_cnt(btf);
|
||||
int i;
|
||||
|
||||
printf("\n/* BPF kfuncs */\n");
|
||||
printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n");
|
||||
|
||||
for (i = 1; i < cnt; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
int err;
|
||||
|
||||
if (!btf_is_decl_tag(t))
|
||||
continue;
|
||||
|
||||
if (btf_decl_tag(t)->component_idx != -1)
|
||||
continue;
|
||||
|
||||
name = btf__name_by_offset(btf, t->name_off);
|
||||
if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG)))
|
||||
continue;
|
||||
|
||||
t = btf__type_by_id(btf, t->type);
|
||||
if (!btf_is_func(t))
|
||||
continue;
|
||||
|
||||
printf("extern ");
|
||||
|
||||
opts.field_name = btf__name_by_offset(btf, t->name_off);
|
||||
err = btf_dump__emit_type_decl(d, t->type, &opts);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
printf(" __weak __ksym;\n");
|
||||
}
|
||||
|
||||
printf("#endif\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __printf(2, 0) btf_dump_printf(void *ctx,
|
||||
const char *fmt, va_list args)
|
||||
{
|
||||
@ -596,6 +641,12 @@ static int dump_btf_c(const struct btf *btf,
|
||||
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
|
||||
printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
|
||||
printf("#endif\n\n");
|
||||
printf("#ifndef __ksym\n");
|
||||
printf("#define __ksym __attribute__((section(\".ksyms\")))\n");
|
||||
printf("#endif\n\n");
|
||||
printf("#ifndef __weak\n");
|
||||
printf("#define __weak __attribute__((weak))\n");
|
||||
printf("#endif\n\n");
|
||||
|
||||
if (root_type_cnt) {
|
||||
for (i = 0; i < root_type_cnt; i++) {
|
||||
@ -615,6 +666,10 @@ static int dump_btf_c(const struct btf *btf,
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
err = dump_btf_kfuncs(d, btf);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
|
||||
|
@ -163,7 +163,7 @@ struct bpf_iter_task_vma;
|
||||
|
||||
extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
|
||||
struct task_struct *task,
|
||||
unsigned long addr) __ksym;
|
||||
__u64 addr) __ksym;
|
||||
extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym;
|
||||
extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym;
|
||||
|
||||
|
@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca)
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
void BPF_PROG(dctcp_init, struct sock *sk)
|
||||
void BPF_PROG(bpf_dctcp_init, struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bpf_dctcp *ca = inet_csk_ca(sk);
|
||||
@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
|
||||
(void *)fallback, sizeof(fallback)) == -EBUSY)
|
||||
ebusy_cnt++;
|
||||
|
||||
/* Switch back to myself and the recurred dctcp_init()
|
||||
/* Switch back to myself and the recurred bpf_dctcp_init()
|
||||
* will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
|
||||
* except the last "cdg" one.
|
||||
*/
|
||||
@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk)
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
|
||||
__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk)
|
||||
{
|
||||
struct bpf_dctcp *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
|
||||
void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bpf_dctcp *ca = inet_csk_ca(sk);
|
||||
@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk)
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
|
||||
void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state)
|
||||
{
|
||||
if (new_state == TCP_CA_Recovery &&
|
||||
new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
|
||||
dctcp_react_to_loss(sk);
|
||||
/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
|
||||
/* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only
|
||||
* one loss-adjustment per RTT.
|
||||
*/
|
||||
}
|
||||
@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
|
||||
void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
|
||||
{
|
||||
struct bpf_dctcp *ca = inet_csk_ca(sk);
|
||||
|
||||
@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
|
||||
}
|
||||
|
||||
SEC("struct_ops")
|
||||
__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
|
||||
__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk)
|
||||
{
|
||||
const struct bpf_dctcp *ca = inet_csk_ca(sk);
|
||||
|
||||
@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
|
||||
extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
|
||||
|
||||
SEC("struct_ops")
|
||||
void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
|
||||
void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
|
||||
{
|
||||
tcp_reno_cong_avoid(sk, ack, acked);
|
||||
}
|
||||
|
||||
SEC(".struct_ops")
|
||||
struct tcp_congestion_ops dctcp_nouse = {
|
||||
.init = (void *)dctcp_init,
|
||||
.set_state = (void *)dctcp_state,
|
||||
.init = (void *)bpf_dctcp_init,
|
||||
.set_state = (void *)bpf_dctcp_state,
|
||||
.flags = TCP_CONG_NEEDS_ECN,
|
||||
.name = "bpf_dctcp_nouse",
|
||||
};
|
||||
|
||||
SEC(".struct_ops")
|
||||
struct tcp_congestion_ops dctcp = {
|
||||
.init = (void *)dctcp_init,
|
||||
.in_ack_event = (void *)dctcp_update_alpha,
|
||||
.cwnd_event = (void *)dctcp_cwnd_event,
|
||||
.ssthresh = (void *)dctcp_ssthresh,
|
||||
.cong_avoid = (void *)dctcp_cong_avoid,
|
||||
.undo_cwnd = (void *)dctcp_cwnd_undo,
|
||||
.set_state = (void *)dctcp_state,
|
||||
.init = (void *)bpf_dctcp_init,
|
||||
.in_ack_event = (void *)bpf_dctcp_update_alpha,
|
||||
.cwnd_event = (void *)bpf_dctcp_cwnd_event,
|
||||
.ssthresh = (void *)bpf_dctcp_ssthresh,
|
||||
.cong_avoid = (void *)bpf_dctcp_cong_avoid,
|
||||
.undo_cwnd = (void *)bpf_dctcp_cwnd_undo,
|
||||
.set_state = (void *)bpf_dctcp_state,
|
||||
.flags = TCP_CONG_NEEDS_ECN,
|
||||
.name = "bpf_dctcp",
|
||||
};
|
||||
|
@ -5,13 +5,12 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
extern const void bpf_fentry_test1 __ksym;
|
||||
extern int bpf_fentry_test1(int a) __ksym;
|
||||
extern int bpf_modify_return_test(int a, int *b) __ksym;
|
||||
|
||||
extern const void bpf_fentry_test2 __ksym;
|
||||
extern const void bpf_fentry_test3 __ksym;
|
||||
extern const void bpf_fentry_test4 __ksym;
|
||||
extern const void bpf_modify_return_test __ksym;
|
||||
extern const void bpf_fentry_test6 __ksym;
|
||||
extern const void bpf_fentry_test7 __ksym;
|
||||
|
||||
extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak;
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
#define IP_OFFSET 0x1FFF
|
||||
#define NEXTHDR_FRAGMENT 44
|
||||
|
||||
extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
|
||||
extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit) __ksym;
|
||||
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
|
||||
void *buffer, uint32_t buffer__sz) __ksym;
|
||||
@ -42,7 +42,7 @@ static bool is_frag_v6(struct ipv6hdr *ip6h)
|
||||
return ip6h->nexthdr == NEXTHDR_FRAGMENT;
|
||||
}
|
||||
|
||||
static int handle_v4(struct sk_buff *skb)
|
||||
static int handle_v4(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
u8 iph_buf[20] = {};
|
||||
@ -64,7 +64,7 @@ static int handle_v4(struct sk_buff *skb)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
static int handle_v6(struct sk_buff *skb)
|
||||
static int handle_v6(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_dynptr ptr;
|
||||
struct ipv6hdr *ip6h;
|
||||
@ -89,9 +89,9 @@ static int handle_v6(struct sk_buff *skb)
|
||||
SEC("netfilter")
|
||||
int defrag(struct bpf_nf_ctx *ctx)
|
||||
{
|
||||
struct sk_buff *skb = ctx->skb;
|
||||
struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
|
||||
|
||||
switch (bpf_ntohs(skb->protocol)) {
|
||||
switch (bpf_ntohs(ctx->skb->protocol)) {
|
||||
case ETH_P_IP:
|
||||
return handle_v4(skb);
|
||||
case ETH_P_IPV6:
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
__u32 target_id;
|
||||
|
||||
__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
|
||||
__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym;
|
||||
|
||||
SEC("iter/bpf_map")
|
||||
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
|
||||
|
@ -7,6 +7,6 @@
|
||||
#include <stdbool.h>
|
||||
|
||||
bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym;
|
||||
bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
|
||||
__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
|
||||
|
||||
#endif /* _NESTED_TRUST_COMMON_H */
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#define BPF_NO_KFUNC_PROTOTYPES
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_endian.h>
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#define BPF_NO_KFUNC_PROTOTYPES
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
@ -79,7 +79,7 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx)
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags,
|
||||
extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags,
|
||||
struct bpf_dynptr *ptr__uninit) __ksym;
|
||||
extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset,
|
||||
void *buffer, uint32_t buffer__sz) __ksym;
|
||||
@ -90,8 +90,8 @@ __success __failure_unpriv
|
||||
__retval(0)
|
||||
int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
|
||||
{
|
||||
struct __sk_buff *skb = (struct __sk_buff *)ctx->skb;
|
||||
const struct nf_hook_state *state = ctx->state;
|
||||
struct sk_buff *skb = ctx->skb;
|
||||
const struct iphdr *iph;
|
||||
const struct tcphdr *th;
|
||||
u8 buffer_iph[20] = {};
|
||||
@ -99,7 +99,7 @@ int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx)
|
||||
struct bpf_dynptr ptr;
|
||||
uint8_t ihl;
|
||||
|
||||
if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
|
||||
if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr))
|
||||
return NF_ACCEPT;
|
||||
|
||||
iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph));
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
|
||||
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
|
||||
|
||||
#define BPF_NO_KFUNC_PROTOTYPES
|
||||
#include "vmlinux.h"
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#define BPF_NO_KFUNC_PROTOTYPES
|
||||
#include "vmlinux.h"
|
||||
#include "bpf_tracing_net.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
Loading…
Reference in New Issue
Block a user