bpf: add support for bpf_wq user type
Mostly a copy/paste from the bpf_timer API, without the initialization and free, as they will be done in a separate patch. Signed-off-by: Benjamin Tissoires <bentiss@kernel.org> Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-5-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
fc22d9495f
commit
d56b63cf0c
@ -185,7 +185,7 @@ struct bpf_map_ops {
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
/* Support at most 10 fields in a BTF type */
|
/* Support at most 10 fields in a BTF type */
|
||||||
BTF_FIELDS_MAX = 10,
|
BTF_FIELDS_MAX = 11,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum btf_field_type {
|
enum btf_field_type {
|
||||||
@ -202,6 +202,7 @@ enum btf_field_type {
|
|||||||
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
|
BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
|
||||||
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
|
BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
|
||||||
BPF_REFCOUNT = (1 << 9),
|
BPF_REFCOUNT = (1 << 9),
|
||||||
|
BPF_WORKQUEUE = (1 << 10),
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef void (*btf_dtor_kfunc_t)(void *);
|
typedef void (*btf_dtor_kfunc_t)(void *);
|
||||||
@ -238,6 +239,7 @@ struct btf_record {
|
|||||||
u32 field_mask;
|
u32 field_mask;
|
||||||
int spin_lock_off;
|
int spin_lock_off;
|
||||||
int timer_off;
|
int timer_off;
|
||||||
|
int wq_off;
|
||||||
int refcount_off;
|
int refcount_off;
|
||||||
struct btf_field fields[];
|
struct btf_field fields[];
|
||||||
};
|
};
|
||||||
@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
|
|||||||
return "bpf_spin_lock";
|
return "bpf_spin_lock";
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
return "bpf_timer";
|
return "bpf_timer";
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
|
return "bpf_wq";
|
||||||
case BPF_KPTR_UNREF:
|
case BPF_KPTR_UNREF:
|
||||||
case BPF_KPTR_REF:
|
case BPF_KPTR_REF:
|
||||||
return "kptr";
|
return "kptr";
|
||||||
@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
|
|||||||
return sizeof(struct bpf_spin_lock);
|
return sizeof(struct bpf_spin_lock);
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
return sizeof(struct bpf_timer);
|
return sizeof(struct bpf_timer);
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
|
return sizeof(struct bpf_wq);
|
||||||
case BPF_KPTR_UNREF:
|
case BPF_KPTR_UNREF:
|
||||||
case BPF_KPTR_REF:
|
case BPF_KPTR_REF:
|
||||||
case BPF_KPTR_PERCPU:
|
case BPF_KPTR_PERCPU:
|
||||||
@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
|
|||||||
return __alignof__(struct bpf_spin_lock);
|
return __alignof__(struct bpf_spin_lock);
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
return __alignof__(struct bpf_timer);
|
return __alignof__(struct bpf_timer);
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
|
return __alignof__(struct bpf_wq);
|
||||||
case BPF_KPTR_UNREF:
|
case BPF_KPTR_UNREF:
|
||||||
case BPF_KPTR_REF:
|
case BPF_KPTR_REF:
|
||||||
case BPF_KPTR_PERCPU:
|
case BPF_KPTR_PERCPU:
|
||||||
@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
|
|||||||
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
|
/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
|
||||||
case BPF_SPIN_LOCK:
|
case BPF_SPIN_LOCK:
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
case BPF_KPTR_UNREF:
|
case BPF_KPTR_UNREF:
|
||||||
case BPF_KPTR_REF:
|
case BPF_KPTR_REF:
|
||||||
case BPF_KPTR_PERCPU:
|
case BPF_KPTR_PERCPU:
|
||||||
|
@ -7306,6 +7306,10 @@ struct bpf_timer {
|
|||||||
__u64 __opaque[2];
|
__u64 __opaque[2];
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
|
struct bpf_wq {
|
||||||
|
__u64 __opaque[2];
|
||||||
|
} __attribute__((aligned(8)));
|
||||||
|
|
||||||
struct bpf_dynptr {
|
struct bpf_dynptr {
|
||||||
__u64 __opaque[2];
|
__u64 __opaque[2];
|
||||||
} __attribute__((aligned(8)));
|
} __attribute__((aligned(8)));
|
||||||
|
@ -3464,6 +3464,15 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (field_mask & BPF_WORKQUEUE) {
|
||||||
|
if (!strcmp(name, "bpf_wq")) {
|
||||||
|
if (*seen_mask & BPF_WORKQUEUE)
|
||||||
|
return -E2BIG;
|
||||||
|
*seen_mask |= BPF_WORKQUEUE;
|
||||||
|
type = BPF_WORKQUEUE;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
}
|
||||||
field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
|
field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
|
||||||
field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
|
field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
|
||||||
field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
|
field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root");
|
||||||
@ -3515,6 +3524,7 @@ static int btf_find_struct_field(const struct btf *btf,
|
|||||||
switch (field_type) {
|
switch (field_type) {
|
||||||
case BPF_SPIN_LOCK:
|
case BPF_SPIN_LOCK:
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
case BPF_LIST_NODE:
|
case BPF_LIST_NODE:
|
||||||
case BPF_RB_NODE:
|
case BPF_RB_NODE:
|
||||||
case BPF_REFCOUNT:
|
case BPF_REFCOUNT:
|
||||||
@ -3582,6 +3592,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
|
|||||||
switch (field_type) {
|
switch (field_type) {
|
||||||
case BPF_SPIN_LOCK:
|
case BPF_SPIN_LOCK:
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
case BPF_LIST_NODE:
|
case BPF_LIST_NODE:
|
||||||
case BPF_RB_NODE:
|
case BPF_RB_NODE:
|
||||||
case BPF_REFCOUNT:
|
case BPF_REFCOUNT:
|
||||||
@ -3816,6 +3827,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
|||||||
|
|
||||||
rec->spin_lock_off = -EINVAL;
|
rec->spin_lock_off = -EINVAL;
|
||||||
rec->timer_off = -EINVAL;
|
rec->timer_off = -EINVAL;
|
||||||
|
rec->wq_off = -EINVAL;
|
||||||
rec->refcount_off = -EINVAL;
|
rec->refcount_off = -EINVAL;
|
||||||
for (i = 0; i < cnt; i++) {
|
for (i = 0; i < cnt; i++) {
|
||||||
field_type_size = btf_field_type_size(info_arr[i].type);
|
field_type_size = btf_field_type_size(info_arr[i].type);
|
||||||
@ -3846,6 +3858,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
|
|||||||
/* Cache offset for faster lookup at runtime */
|
/* Cache offset for faster lookup at runtime */
|
||||||
rec->timer_off = rec->fields[i].offset;
|
rec->timer_off = rec->fields[i].offset;
|
||||||
break;
|
break;
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
|
WARN_ON_ONCE(rec->wq_off >= 0);
|
||||||
|
/* Cache offset for faster lookup at runtime */
|
||||||
|
rec->wq_off = rec->fields[i].offset;
|
||||||
|
break;
|
||||||
case BPF_REFCOUNT:
|
case BPF_REFCOUNT:
|
||||||
WARN_ON_ONCE(rec->refcount_off >= 0);
|
WARN_ON_ONCE(rec->refcount_off >= 0);
|
||||||
/* Cache offset for faster lookup at runtime */
|
/* Cache offset for faster lookup at runtime */
|
||||||
|
@ -559,6 +559,7 @@ void btf_record_free(struct btf_record *rec)
|
|||||||
case BPF_SPIN_LOCK:
|
case BPF_SPIN_LOCK:
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
case BPF_REFCOUNT:
|
case BPF_REFCOUNT:
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
/* Nothing to release */
|
/* Nothing to release */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -608,6 +609,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
|
|||||||
case BPF_SPIN_LOCK:
|
case BPF_SPIN_LOCK:
|
||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
case BPF_REFCOUNT:
|
case BPF_REFCOUNT:
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
/* Nothing to acquire */
|
/* Nothing to acquire */
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -679,6 +681,8 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
|||||||
case BPF_TIMER:
|
case BPF_TIMER:
|
||||||
bpf_timer_cancel_and_free(field_ptr);
|
bpf_timer_cancel_and_free(field_ptr);
|
||||||
break;
|
break;
|
||||||
|
case BPF_WORKQUEUE:
|
||||||
|
break;
|
||||||
case BPF_KPTR_UNREF:
|
case BPF_KPTR_UNREF:
|
||||||
WRITE_ONCE(*(u64 *)field_ptr, 0);
|
WRITE_ONCE(*(u64 *)field_ptr, 0);
|
||||||
break;
|
break;
|
||||||
@ -1085,7 +1089,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,
|
|||||||
|
|
||||||
map->record = btf_parse_fields(btf, value_type,
|
map->record = btf_parse_fields(btf, value_type,
|
||||||
BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
|
BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
|
||||||
BPF_RB_ROOT | BPF_REFCOUNT,
|
BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
|
||||||
map->value_size);
|
map->value_size);
|
||||||
if (!IS_ERR_OR_NULL(map->record)) {
|
if (!IS_ERR_OR_NULL(map->record)) {
|
||||||
int i;
|
int i;
|
||||||
|
@ -1838,6 +1838,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
|
|||||||
*/
|
*/
|
||||||
if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
|
if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
|
||||||
reg->map_uid = reg->id;
|
reg->map_uid = reg->id;
|
||||||
|
if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE))
|
||||||
|
reg->map_uid = reg->id;
|
||||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||||
reg->type = PTR_TO_XDP_SOCK;
|
reg->type = PTR_TO_XDP_SOCK;
|
||||||
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
|
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
|
||||||
@ -18141,6 +18143,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (btf_record_has_field(map->record, BPF_WORKQUEUE)) {
|
||||||
|
if (is_tracing_prog_type(prog_type)) {
|
||||||
|
verbose(env, "tracing progs cannot use bpf_wq yet\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
|
if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
|
||||||
!bpf_offload_prog_map_match(prog, map)) {
|
!bpf_offload_prog_map_match(prog, map)) {
|
||||||
verbose(env, "offload device mismatch between prog and map\n");
|
verbose(env, "offload device mismatch between prog and map\n");
|
||||||
|
Loading…
Reference in New Issue
Block a user