1

net: Reference bpf_redirect_info via task_struct on PREEMPT_RT.

The XDP redirect process is two staged:
- bpf_prog_run_xdp() is invoked to run a eBPF program which inspects the
  packet and makes decisions. While doing that, the per-CPU variable
  bpf_redirect_info is used.

- Afterwards xdp_do_redirect() is invoked and accesses bpf_redirect_info
  and it may also access other per-CPU variables like xskmap_flush_list.

At the very end of the NAPI callback, xdp_do_flush() is invoked which
does not access bpf_redirect_info but will touch the individual per-CPU
lists.

The per-CPU variables are only used in the NAPI callback hence disabling
bottom halves is the only protection mechanism. Users from preemptible
context (like cpu_map_kthread_run()) explicitly disable bottom halves
for protections reasons.
Without locking in local_bh_disable() on PREEMPT_RT this data structure
requires explicit locking.

PREEMPT_RT has forced-threaded interrupts enabled and every
NAPI-callback runs in a thread. If each thread has its own data
structure then locking can be avoided.

Create a struct bpf_net_context which contains struct bpf_redirect_info.
Define the variable on stack, use bpf_net_ctx_set() to save a pointer to
it, bpf_net_ctx_clear() removes it again.
The bpf_net_ctx_set() may nest. For instance a function can be used from
within NET_RX_SOFTIRQ/ net_rx_action which uses bpf_net_ctx_set() and
NET_TX_SOFTIRQ which does not. Therefore only the first invocations
updates the pointer.
Use bpf_net_ctx_get_ri() as a wrapper to retrieve the current struct
bpf_redirect_info. The returned data structure is zero initialized to
ensure nothing is leaked from stack. This is done on first usage of the
struct. bpf_net_ctx_set() sets bpf_redirect_info::kern_flags to 0 to
note that initialisation is required. First invocation of
bpf_net_ctx_get_ri() will memset() the data structure and update
bpf_redirect_info::kern_flags.
bpf_redirect_info::nh is excluded from memset because it is only used
once BPF_F_NEIGH is set which also sets the nh member. The kern_flags is
moved past nh to exclude it from memset.

The pointer to bpf_net_context is saved task's task_struct. Using
always the bpf_net_context approach has the advantage that there is
almost zero differences between PREEMPT_RT and non-PREEMPT_RT builds.

Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Hao Luo <haoluo@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Song Liu <song@kernel.org>
Cc: Stanislav Fomichev <sdf@google.com>
Cc: Yonghong Song <yonghong.song@linux.dev>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-15-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Sebastian Andrzej Siewior 2024-06-20 15:22:04 +02:00 committed by Jakub Kicinski
parent 78f520b7bb
commit 401cb7dae8
9 changed files with 114 additions and 45 deletions

View File

@ -733,21 +733,59 @@ struct bpf_nh_params {
}; };
}; };
/* flags for bpf_redirect_info kern_flags */
#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
#define BPF_RI_F_RI_INIT BIT(1)
struct bpf_redirect_info { struct bpf_redirect_info {
u64 tgt_index; u64 tgt_index;
void *tgt_value; void *tgt_value;
struct bpf_map *map; struct bpf_map *map;
u32 flags; u32 flags;
u32 kern_flags;
u32 map_id; u32 map_id;
enum bpf_map_type map_type; enum bpf_map_type map_type;
struct bpf_nh_params nh; struct bpf_nh_params nh;
u32 kern_flags;
}; };
DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); struct bpf_net_context {
struct bpf_redirect_info ri;
};
/* flags for bpf_redirect_info kern_flags */ static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ {
struct task_struct *tsk = current;
if (tsk->bpf_net_context != NULL)
return NULL;
bpf_net_ctx->ri.kern_flags = 0;
tsk->bpf_net_context = bpf_net_ctx;
return bpf_net_ctx;
}
static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
{
if (bpf_net_ctx)
current->bpf_net_context = NULL;
}
static inline struct bpf_net_context *bpf_net_ctx_get(void)
{
return current->bpf_net_context;
}
static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
{
struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
}
return &bpf_net_ctx->ri;
}
/* Compute the linear packet data range [data, data_end) which /* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf, * will be accessed by various program types (cls_bpf, act_bpf,
@ -1018,25 +1056,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len); const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
void bpf_clear_redirect_map(struct bpf_map *map);
static inline bool xdp_return_frame_no_direct(void) static inline bool xdp_return_frame_no_direct(void)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
} }
static inline void xdp_set_return_frame_no_direct(void) static inline void xdp_set_return_frame_no_direct(void)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
} }
static inline void xdp_clear_return_frame_no_direct(void) static inline void xdp_clear_return_frame_no_direct(void)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
} }
@ -1592,7 +1628,7 @@ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 inde
u64 flags, const u64 flag_mask, u64 flags, const u64 flag_mask,
void *lookup_elem(struct bpf_map *map, u32 key)) void *lookup_elem(struct bpf_map *map, u32 key))
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
/* Lower bits of the flags are used as return code on lookup failure */ /* Lower bits of the flags are used as return code on lookup failure */

View File

@ -54,6 +54,7 @@ struct bio_list;
struct blk_plug; struct blk_plug;
struct bpf_local_storage; struct bpf_local_storage;
struct bpf_run_ctx; struct bpf_run_ctx;
struct bpf_net_context;
struct capture_control; struct capture_control;
struct cfs_rq; struct cfs_rq;
struct fs_struct; struct fs_struct;
@ -1509,6 +1510,8 @@ struct task_struct {
/* Used for BPF run context */ /* Used for BPF run context */
struct bpf_run_ctx *bpf_ctx; struct bpf_run_ctx *bpf_ctx;
#endif #endif
/* Used by BPF for per-TASK xdp storage */
struct bpf_net_context *bpf_net_context;
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
unsigned long lowest_stack; unsigned long lowest_stack;

View File

@ -240,12 +240,14 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
int xdp_n, struct xdp_cpumap_stats *stats, int xdp_n, struct xdp_cpumap_stats *stats,
struct list_head *list) struct list_head *list)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int nframes; int nframes;
if (!rcpu->prog) if (!rcpu->prog)
return xdp_n; return xdp_n;
rcu_read_lock_bh(); rcu_read_lock_bh();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
@ -255,6 +257,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
if (unlikely(!list_empty(list))) if (unlikely(!list_empty(list)))
cpu_map_bpf_prog_run_skb(rcpu, list, stats); cpu_map_bpf_prog_run_skb(rcpu, list, stats);
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
return nframes; return nframes;

View File

@ -196,7 +196,14 @@ static void dev_map_free(struct bpf_map *map)
list_del_rcu(&dtab->list); list_del_rcu(&dtab->list);
spin_unlock(&dev_map_lock); spin_unlock(&dev_map_lock);
bpf_clear_redirect_map(map); /* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
* during NAPI callback and cleared after the XDP redirect. There is no
* explicit RCU read section which protects bpf_redirect_info->map but
* local_bh_disable() also marks the beginning an RCU section. This
* makes the complete softirq callback RCU protected. Thus after
* following synchronize_rcu() there no bpf_redirect_info->map == map
* assignment.
*/
synchronize_rcu(); synchronize_rcu();
/* Make sure prior __dev_map_entry_free() have completed. */ /* Make sure prior __dev_map_entry_free() have completed. */

View File

@ -2355,6 +2355,7 @@ __latent_entropy struct task_struct *copy_process(
RCU_INIT_POINTER(p->bpf_storage, NULL); RCU_INIT_POINTER(p->bpf_storage, NULL);
p->bpf_ctx = NULL; p->bpf_ctx = NULL;
#endif #endif
p->bpf_net_context = NULL;
/* Perform scheduler related setup. Assign this task to a CPU. */ /* Perform scheduler related setup. Assign this task to a CPU. */
retval = sched_fork(clone_flags, p); retval = sched_fork(clone_flags, p);

View File

@ -283,9 +283,10 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
u32 repeat) u32 repeat)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int err = 0, act, ret, i, nframes = 0, batch_sz; int err = 0, act, ret, i, nframes = 0, batch_sz;
struct xdp_frame **frames = xdp->frames; struct xdp_frame **frames = xdp->frames;
struct bpf_redirect_info *ri;
struct xdp_page_head *head; struct xdp_page_head *head;
struct xdp_frame *frm; struct xdp_frame *frm;
bool redirect = false; bool redirect = false;
@ -295,6 +296,8 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
batch_sz = min_t(u32, repeat, xdp->batch_size); batch_sz = min_t(u32, repeat, xdp->batch_size);
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
ri = bpf_net_ctx_get_ri();
xdp_set_return_frame_no_direct(); xdp_set_return_frame_no_direct();
for (i = 0; i < batch_sz; i++) { for (i = 0; i < batch_sz; i++) {
@ -359,6 +362,7 @@ out:
} }
xdp_clear_return_frame_no_direct(); xdp_clear_return_frame_no_direct();
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
return err; return err;
} }
@ -394,6 +398,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
u32 *retval, u32 *time, bool xdp) u32 *retval, u32 *time, bool xdp)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog_array_item item = {.prog = prog}; struct bpf_prog_array_item item = {.prog = prog};
struct bpf_run_ctx *old_ctx; struct bpf_run_ctx *old_ctx;
struct bpf_cg_run_ctx run_ctx; struct bpf_cg_run_ctx run_ctx;
@ -419,10 +424,14 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
do { do {
run_ctx.prog_item = &item; run_ctx.prog_item = &item;
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (xdp) if (xdp)
*retval = bpf_prog_run_xdp(prog, ctx); *retval = bpf_prog_run_xdp(prog, ctx);
else else
*retval = bpf_prog_run(prog, ctx); *retval = bpf_prog_run(prog, ctx);
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
bpf_reset_run_ctx(old_ctx); bpf_reset_run_ctx(old_ctx);

View File

@ -4045,10 +4045,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
{ {
struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS; enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret; int sch_ret;
if (!entry) if (!entry)
return skb; return skb;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (*pt_prev) { if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev); *ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL; *pt_prev = NULL;
@ -4077,10 +4080,12 @@ ingress_verdict:
break; break;
} }
*ret = NET_RX_SUCCESS; *ret = NET_RX_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason); kfree_skb_reason(skb, drop_reason);
*ret = NET_RX_DROP; *ret = NET_RX_DROP;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
/* used by tc_run */ /* used by tc_run */
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
@ -4090,8 +4095,10 @@ ingress_verdict:
fallthrough; fallthrough;
case TC_ACT_CONSUMED: case TC_ACT_CONSUMED:
*ret = NET_RX_SUCCESS; *ret = NET_RX_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
} }
bpf_net_ctx_clear(bpf_net_ctx);
return skb; return skb;
} }
@ -4101,11 +4108,14 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
{ {
struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS; enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int sch_ret; int sch_ret;
if (!entry) if (!entry)
return skb; return skb;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
/* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
* already set by the caller. * already set by the caller.
*/ */
@ -4121,10 +4131,12 @@ egress_verdict:
/* No need to push/pop skb's mac_header here on egress! */ /* No need to push/pop skb's mac_header here on egress! */
skb_do_redirect(skb); skb_do_redirect(skb);
*ret = NET_XMIT_SUCCESS; *ret = NET_XMIT_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
case TC_ACT_SHOT: case TC_ACT_SHOT:
kfree_skb_reason(skb, drop_reason); kfree_skb_reason(skb, drop_reason);
*ret = NET_XMIT_DROP; *ret = NET_XMIT_DROP;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
/* used by tc_run */ /* used by tc_run */
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
@ -4134,8 +4146,10 @@ egress_verdict:
fallthrough; fallthrough;
case TC_ACT_CONSUMED: case TC_ACT_CONSUMED:
*ret = NET_XMIT_SUCCESS; *ret = NET_XMIT_SUCCESS;
bpf_net_ctx_clear(bpf_net_ctx);
return NULL; return NULL;
} }
bpf_net_ctx_clear(bpf_net_ctx);
return skb; return skb;
} }
@ -6325,6 +6339,7 @@ enum {
static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
unsigned flags, u16 budget) unsigned flags, u16 budget)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
bool skip_schedule = false; bool skip_schedule = false;
unsigned long timeout; unsigned long timeout;
int rc; int rc;
@ -6342,6 +6357,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (flags & NAPI_F_PREFER_BUSY_POLL) { if (flags & NAPI_F_PREFER_BUSY_POLL) {
napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
@ -6364,6 +6380,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
netpoll_poll_unlock(have_poll_lock); netpoll_poll_unlock(have_poll_lock);
if (rc == budget) if (rc == budget)
__busy_poll_stop(napi, skip_schedule); __busy_poll_stop(napi, skip_schedule);
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
} }
@ -6373,6 +6390,7 @@ static void __napi_busy_loop(unsigned int napi_id,
{ {
unsigned long start_time = loop_end ? busy_loop_current_time() : 0; unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget); int (*napi_poll)(struct napi_struct *napi, int budget);
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
void *have_poll_lock = NULL; void *have_poll_lock = NULL;
struct napi_struct *napi; struct napi_struct *napi;
@ -6391,6 +6409,7 @@ restart:
int work = 0; int work = 0;
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
if (!napi_poll) { if (!napi_poll) {
unsigned long val = READ_ONCE(napi->state); unsigned long val = READ_ONCE(napi->state);
@ -6421,6 +6440,7 @@ count:
__NET_ADD_STATS(dev_net(napi->dev), __NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work); LINUX_MIB_BUSYPOLLRXPACKETS, work);
skb_defer_free_flush(this_cpu_ptr(&softnet_data)); skb_defer_free_flush(this_cpu_ptr(&softnet_data));
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
if (!loop_end || loop_end(loop_end_arg, start_time)) if (!loop_end || loop_end(loop_end_arg, start_time))
@ -6848,6 +6868,7 @@ static int napi_thread_wait(struct napi_struct *napi)
static void napi_threaded_poll_loop(struct napi_struct *napi) static void napi_threaded_poll_loop(struct napi_struct *napi)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct softnet_data *sd; struct softnet_data *sd;
unsigned long last_qs = jiffies; unsigned long last_qs = jiffies;
@ -6856,6 +6877,8 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
void *have; void *have;
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
sd = this_cpu_ptr(&softnet_data); sd = this_cpu_ptr(&softnet_data);
sd->in_napi_threaded_poll = true; sd->in_napi_threaded_poll = true;
@ -6871,6 +6894,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi)
net_rps_action_and_irq_enable(sd); net_rps_action_and_irq_enable(sd);
} }
skb_defer_free_flush(sd); skb_defer_free_flush(sd);
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
if (!repoll) if (!repoll)
@ -6896,10 +6920,12 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
struct softnet_data *sd = this_cpu_ptr(&softnet_data); struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + unsigned long time_limit = jiffies +
usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs)); usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int budget = READ_ONCE(net_hotdata.netdev_budget); int budget = READ_ONCE(net_hotdata.netdev_budget);
LIST_HEAD(list); LIST_HEAD(list);
LIST_HEAD(repoll); LIST_HEAD(repoll);
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
start: start:
sd->in_net_rx_action = true; sd->in_net_rx_action = true;
local_irq_disable(); local_irq_disable();
@ -6952,7 +6978,8 @@ start:
sd->in_net_rx_action = false; sd->in_net_rx_action = false;
net_rps_action_and_irq_enable(sd); net_rps_action_and_irq_enable(sd);
end:; end:
bpf_net_ctx_clear(bpf_net_ctx);
} }
struct netdev_adjacent { struct netdev_adjacent {

View File

@ -2483,9 +2483,6 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
.arg3_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING,
}; };
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
static struct net_device *skb_get_peer_dev(struct net_device *dev) static struct net_device *skb_get_peer_dev(struct net_device *dev)
{ {
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
@ -2498,7 +2495,7 @@ static struct net_device *skb_get_peer_dev(struct net_device *dev)
int skb_do_redirect(struct sk_buff *skb) int skb_do_redirect(struct sk_buff *skb)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net *net = dev_net(skb->dev); struct net *net = dev_net(skb->dev);
struct net_device *dev; struct net_device *dev;
u32 flags = ri->flags; u32 flags = ri->flags;
@ -2531,7 +2528,7 @@ out_drop:
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT; return TC_ACT_SHOT;
@ -2552,7 +2549,7 @@ static const struct bpf_func_proto bpf_redirect_proto = {
BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags)) if (unlikely(flags))
return TC_ACT_SHOT; return TC_ACT_SHOT;
@ -2574,7 +2571,7 @@ static const struct bpf_func_proto bpf_redirect_peer_proto = {
BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
int, plen, u64, flags) int, plen, u64, flags)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely((plen && plen < sizeof(*params)) || flags)) if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT; return TC_ACT_SHOT;
@ -4300,30 +4297,13 @@ void xdp_do_check_flushed(struct napi_struct *napi)
} }
#endif #endif
void bpf_clear_redirect_map(struct bpf_map *map)
{
struct bpf_redirect_info *ri;
int cpu;
for_each_possible_cpu(cpu) {
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
/* Avoid polluting remote cacheline due to writes if
* not needed. Once we pass this test, we need the
* cmpxchg() to make sure it hasn't been changed in
* the meantime by remote CPU.
*/
if (unlikely(READ_ONCE(ri->map) == map))
cmpxchg(&ri->map, map, NULL);
}
}
DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp) u32 xdp_master_redirect(struct xdp_buff *xdp)
{ {
struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct net_device *master, *slave; struct net_device *master, *slave;
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
@ -4395,7 +4375,7 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
map = READ_ONCE(ri->map); map = READ_ONCE(ri->map);
/* The map pointer is cleared when the map is being torn /* The map pointer is cleared when the map is being torn
* down by bpf_clear_redirect_map() * down by dev_map_free()
*/ */
if (unlikely(!map)) { if (unlikely(!map)) {
err = -ENOENT; err = -ENOENT;
@ -4440,7 +4420,7 @@ err:
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog) struct bpf_prog *xdp_prog)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type; enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP) if (map_type == BPF_MAP_TYPE_XSKMAP)
@ -4454,7 +4434,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type; enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP) if (map_type == BPF_MAP_TYPE_XSKMAP)
@ -4471,7 +4451,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
enum bpf_map_type map_type, u32 map_id, enum bpf_map_type map_type, u32 map_id,
u32 flags) u32 flags)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct bpf_map *map; struct bpf_map *map;
int err; int err;
@ -4483,7 +4463,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
map = READ_ONCE(ri->map); map = READ_ONCE(ri->map);
/* The map pointer is cleared when the map is being torn /* The map pointer is cleared when the map is being torn
* down by bpf_clear_redirect_map() * down by dev_map_free()
*/ */
if (unlikely(!map)) { if (unlikely(!map)) {
err = -ENOENT; err = -ENOENT;
@ -4525,7 +4505,7 @@ err:
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog) struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type; enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value; void *fwd = ri->tgt_value;
u32 map_id = ri->map_id; u32 map_id = ri->map_id;
@ -4561,7 +4541,7 @@ err:
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{ {
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
if (unlikely(flags)) if (unlikely(flags))
return XDP_ABORTED; return XDP_ABORTED;

View File

@ -38,12 +38,14 @@ static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
struct dst_entry *dst, bool can_redirect) struct dst_entry *dst, bool can_redirect)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
int ret; int ret;
/* Disabling BH is needed to protect per-CPU bpf_redirect_info between /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
* BPF prog and skb_do_redirect(). * BPF prog and skb_do_redirect().
*/ */
local_bh_disable(); local_bh_disable();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
bpf_compute_data_pointers(skb); bpf_compute_data_pointers(skb);
ret = bpf_prog_run_save_cb(lwt->prog, skb); ret = bpf_prog_run_save_cb(lwt->prog, skb);
@ -76,6 +78,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
break; break;
} }
bpf_net_ctx_clear(bpf_net_ctx);
local_bh_enable(); local_bh_enable();
return ret; return ret;