selftests/bpf: Add more test cases for bpf memory allocator
Add the following 3 test cases for bpf memory allocator: 1) Do allocation in bpf program and free through map free 2) Do batch per-cpu allocation and per-cpu free in bpf program 3) Do per-cpu allocation in bpf program and free through map free For per-cpu allocation, because per-cpu allocation can not refill timely sometimes, so test 2) and test 3) consider it is OK for bpf_percpu_obj_new_impl() to return NULL. Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20231020133202.4043247-8-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
e383a45902
commit
d440ba91ca
@ -9,9 +9,10 @@
|
||||
|
||||
#include "test_bpf_ma.skel.h"
|
||||
|
||||
void test_test_bpf_ma(void)
|
||||
static void do_bpf_ma_test(const char *name)
|
||||
{
|
||||
struct test_bpf_ma *skel;
|
||||
struct bpf_program *prog;
|
||||
struct btf *btf;
|
||||
int i, err;
|
||||
|
||||
@ -34,6 +35,11 @@ void test_test_bpf_ma(void)
|
||||
skel->rodata->data_btf_ids[i] = id;
|
||||
}
|
||||
|
||||
prog = bpf_object__find_program_by_name(skel->obj, name);
|
||||
if (!ASSERT_OK_PTR(prog, "invalid prog name"))
|
||||
goto out;
|
||||
bpf_program__set_autoload(prog, true);
|
||||
|
||||
err = test_bpf_ma__load(skel);
|
||||
if (!ASSERT_OK(err, "load"))
|
||||
goto out;
|
||||
@ -48,3 +54,15 @@ void test_test_bpf_ma(void)
|
||||
out:
|
||||
test_bpf_ma__destroy(skel);
|
||||
}
|
||||
|
||||
void test_test_bpf_ma(void)
|
||||
{
|
||||
if (test__start_subtest("batch_alloc_free"))
|
||||
do_bpf_ma_test("test_batch_alloc_free");
|
||||
if (test__start_subtest("free_through_map_free"))
|
||||
do_bpf_ma_test("test_free_through_map_free");
|
||||
if (test__start_subtest("batch_percpu_alloc_free"))
|
||||
do_bpf_ma_test("test_batch_percpu_alloc_free");
|
||||
if (test__start_subtest("percpu_free_through_map_free"))
|
||||
do_bpf_ma_test("test_percpu_free_through_map_free");
|
||||
}
|
||||
|
@ -37,10 +37,20 @@ int pid = 0;
|
||||
__type(key, int); \
|
||||
__type(value, struct map_value_##_size); \
|
||||
__uint(max_entries, 128); \
|
||||
} array_##_size SEC(".maps");
|
||||
} array_##_size SEC(".maps")
|
||||
|
||||
static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch,
|
||||
unsigned int idx)
|
||||
#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
|
||||
struct map_value_percpu_##_size { \
|
||||
struct bin_data_##_size __percpu_kptr * data; \
|
||||
}; \
|
||||
struct { \
|
||||
__uint(type, BPF_MAP_TYPE_ARRAY); \
|
||||
__type(key, int); \
|
||||
__type(value, struct map_value_percpu_##_size); \
|
||||
__uint(max_entries, 128); \
|
||||
} array_percpu_##_size SEC(".maps")
|
||||
|
||||
static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
|
||||
{
|
||||
struct generic_map_value *value;
|
||||
unsigned int i, key;
|
||||
@ -65,6 +75,14 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
|
||||
{
|
||||
struct generic_map_value *value;
|
||||
unsigned int i, key;
|
||||
void *old;
|
||||
|
||||
for (i = 0; i < batch; i++) {
|
||||
key = i;
|
||||
value = bpf_map_lookup_elem(map, &key);
|
||||
@ -81,8 +99,72 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
|
||||
unsigned int idx)
|
||||
{
|
||||
struct generic_map_value *value;
|
||||
unsigned int i, key;
|
||||
void *old, *new;
|
||||
|
||||
for (i = 0; i < batch; i++) {
|
||||
key = i;
|
||||
value = bpf_map_lookup_elem(map, &key);
|
||||
if (!value) {
|
||||
err = 1;
|
||||
return;
|
||||
}
|
||||
/* per-cpu allocator may not be able to refill in time */
|
||||
new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL);
|
||||
if (!new)
|
||||
continue;
|
||||
|
||||
old = bpf_kptr_xchg(&value->data, new);
|
||||
if (old) {
|
||||
bpf_percpu_obj_drop(old);
|
||||
err = 2;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
|
||||
unsigned int idx)
|
||||
{
|
||||
struct generic_map_value *value;
|
||||
unsigned int i, key;
|
||||
void *old;
|
||||
|
||||
for (i = 0; i < batch; i++) {
|
||||
key = i;
|
||||
value = bpf_map_lookup_elem(map, &key);
|
||||
if (!value) {
|
||||
err = 3;
|
||||
return;
|
||||
}
|
||||
old = bpf_kptr_xchg(&value->data, NULL);
|
||||
if (!old)
|
||||
continue;
|
||||
bpf_percpu_obj_drop(old);
|
||||
}
|
||||
}
|
||||
|
||||
#define CALL_BATCH_ALLOC(size, batch, idx) \
|
||||
batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
|
||||
|
||||
#define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
|
||||
batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx)
|
||||
do { \
|
||||
batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
|
||||
batch_free((struct bpf_map *)(&array_##size), batch, idx); \
|
||||
} while (0)
|
||||
|
||||
#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
|
||||
batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
|
||||
|
||||
#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
|
||||
do { \
|
||||
batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
|
||||
batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
|
||||
} while (0)
|
||||
|
||||
DEFINE_ARRAY_WITH_KPTR(8);
|
||||
DEFINE_ARRAY_WITH_KPTR(16);
|
||||
@ -97,8 +179,21 @@ DEFINE_ARRAY_WITH_KPTR(1024);
|
||||
DEFINE_ARRAY_WITH_KPTR(2048);
|
||||
DEFINE_ARRAY_WITH_KPTR(4096);
|
||||
|
||||
SEC("fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int test_bpf_mem_alloc_free(void *ctx)
|
||||
/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
|
||||
DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
|
||||
|
||||
SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int test_batch_alloc_free(void *ctx)
|
||||
{
|
||||
if ((u32)bpf_get_current_pid_tgid() != pid)
|
||||
return 0;
|
||||
@ -121,3 +216,76 @@ int test_bpf_mem_alloc_free(void *ctx)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int test_free_through_map_free(void *ctx)
|
||||
{
|
||||
if ((u32)bpf_get_current_pid_tgid() != pid)
|
||||
return 0;
|
||||
|
||||
/* Alloc 128 8-bytes objects in batch to trigger refilling,
|
||||
* then free these objects through map free.
|
||||
*/
|
||||
CALL_BATCH_ALLOC(8, 128, 0);
|
||||
CALL_BATCH_ALLOC(16, 128, 1);
|
||||
CALL_BATCH_ALLOC(32, 128, 2);
|
||||
CALL_BATCH_ALLOC(64, 128, 3);
|
||||
CALL_BATCH_ALLOC(96, 128, 4);
|
||||
CALL_BATCH_ALLOC(128, 128, 5);
|
||||
CALL_BATCH_ALLOC(192, 128, 6);
|
||||
CALL_BATCH_ALLOC(256, 128, 7);
|
||||
CALL_BATCH_ALLOC(512, 64, 8);
|
||||
CALL_BATCH_ALLOC(1024, 32, 9);
|
||||
CALL_BATCH_ALLOC(2048, 16, 10);
|
||||
CALL_BATCH_ALLOC(4096, 8, 11);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int test_batch_percpu_alloc_free(void *ctx)
|
||||
{
|
||||
if ((u32)bpf_get_current_pid_tgid() != pid)
|
||||
return 0;
|
||||
|
||||
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
|
||||
* then free 128 16-bytes per-cpu objects in batch to trigger freeing.
|
||||
*/
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
|
||||
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
|
||||
int test_percpu_free_through_map_free(void *ctx)
|
||||
{
|
||||
if ((u32)bpf_get_current_pid_tgid() != pid)
|
||||
return 0;
|
||||
|
||||
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
|
||||
* then free these object through map free.
|
||||
*/
|
||||
CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
|
||||
CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
|
||||
CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
|
||||
CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
|
||||
CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
|
||||
CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
|
||||
CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
|
||||
CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
|
||||
CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
|
||||
CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
|
||||
CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user