2019-05-19 05:08:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2020-06-04 16:50:20 -07:00
|
|
|
#define pr_fmt(fmt) "%s: " fmt, __func__
|
2013-05-31 15:26:45 -07:00
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
2014-09-24 10:31:49 -07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/wait.h>
|
2020-10-01 08:48:41 -07:00
|
|
|
#include <linux/slab.h>
|
2020-12-08 15:43:43 -07:00
|
|
|
#include <linux/mm.h>
|
2013-05-31 15:26:45 -07:00
|
|
|
#include <linux/percpu-refcount.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initially, a percpu refcount is just a set of percpu counters. Initially, we
|
|
|
|
* don't try to detect the ref hitting 0 - which means that get/put can just
|
|
|
|
* increment or decrement the local counter. Note that the counter on a
|
|
|
|
* particular cpu can (and will) wrap - this is fine, when we go to shutdown the
|
|
|
|
* percpu counters will all sum to the correct value
|
|
|
|
*
|
2015-12-27 06:58:23 -07:00
|
|
|
* (More precisely: because modular arithmetic is commutative the sum of all the
|
2014-09-24 10:31:48 -07:00
|
|
|
* percpu_count vars will be equal to what it would have been if all the gets
|
|
|
|
* and puts were done to a single integer, even if some of the percpu integers
|
2013-05-31 15:26:45 -07:00
|
|
|
* overflow or underflow).
|
|
|
|
*
|
|
|
|
* The real trick to implementing percpu refcounts is shutdown. We can't detect
|
|
|
|
* the ref hitting 0 on every put - this would require global synchronization
|
|
|
|
* and defeat the whole purpose of using percpu refs.
|
|
|
|
*
|
|
|
|
* What we do is require the user to keep track of the initial refcount; we know
|
|
|
|
* the ref can't hit 0 before the user drops the initial ref, so as long as we
|
|
|
|
* convert to non percpu mode before the initial ref is dropped everything
|
|
|
|
* works.
|
|
|
|
*
|
|
|
|
* Converting to non percpu mode is done with some RCUish stuff in
|
2014-09-19 22:27:25 -07:00
|
|
|
* percpu_ref_kill. Additionally, we need a bias value so that the
|
|
|
|
* atomic_long_t can't hit 0 before we've added up all the percpu refs.
|
2013-05-31 15:26:45 -07:00
|
|
|
*/
|
|
|
|
|
2014-09-24 10:31:48 -07:00
|
|
|
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2015-09-29 14:47:20 -07:00
|
|
|
static DEFINE_SPINLOCK(percpu_ref_switch_lock);
|
2014-09-24 10:31:49 -07:00
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
|
|
|
|
|
2014-09-24 10:31:48 -07:00
|
|
|
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
|
2014-06-28 05:10:13 -07:00
|
|
|
{
|
2014-09-24 10:31:48 -07:00
|
|
|
return (unsigned long __percpu *)
|
2014-09-24 10:31:49 -07:00
|
|
|
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
|
2014-06-28 05:10:13 -07:00
|
|
|
}
|
|
|
|
|
2013-05-31 15:26:45 -07:00
|
|
|
/**
|
|
|
|
* percpu_ref_init - initialize a percpu refcount
|
2013-06-12 20:43:06 -07:00
|
|
|
* @ref: percpu_ref to initialize
|
|
|
|
* @release: function which will be called when refcount hits 0
|
2014-09-24 10:31:50 -07:00
|
|
|
* @flags: PERCPU_REF_INIT_* flags
|
2014-09-07 17:51:30 -07:00
|
|
|
* @gfp: allocation mask to use
|
2013-05-31 15:26:45 -07:00
|
|
|
*
|
2020-02-21 16:16:07 -07:00
|
|
|
* Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
|
|
|
|
* @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
|
|
|
|
* change the start state to atomic with the latter setting the initial refcount
|
|
|
|
* to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
|
2013-05-31 15:26:45 -07:00
|
|
|
*
|
|
|
|
* Note that @release must not sleep - it may potentially be called from RCU
|
|
|
|
* callback context by percpu_ref_kill().
|
|
|
|
*/
|
2014-09-07 17:51:30 -07:00
|
|
|
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
|
2014-09-24 10:31:50 -07:00
|
|
|
unsigned int flags, gfp_t gfp)
|
2013-05-31 15:26:45 -07:00
|
|
|
{
|
2014-09-24 10:31:49 -07:00
|
|
|
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
|
|
|
|
__alignof__(unsigned long));
|
2014-09-24 10:31:50 -07:00
|
|
|
unsigned long start_count = 0;
|
2020-10-01 08:48:41 -07:00
|
|
|
struct percpu_ref_data *data;
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
ref->percpu_count_ptr = (unsigned long)
|
|
|
|
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
|
2014-09-24 10:31:48 -07:00
|
|
|
if (!ref->percpu_count_ptr)
|
2013-05-31 15:26:45 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
data = kzalloc(sizeof(*ref->data), gfp);
|
|
|
|
if (!data) {
|
|
|
|
free_percpu((void __percpu *)ref->percpu_count_ptr);
|
2022-05-17 23:13:40 -07:00
|
|
|
ref->percpu_count_ptr = 0;
|
2020-10-01 08:48:41 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
|
|
|
|
data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
|
2014-09-24 10:31:50 -07:00
|
|
|
|
2019-05-07 10:01:50 -07:00
|
|
|
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
|
2014-09-24 10:31:50 -07:00
|
|
|
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
2020-10-01 08:48:41 -07:00
|
|
|
data->allow_reinit = true;
|
2019-05-07 10:01:50 -07:00
|
|
|
} else {
|
2014-09-24 10:31:50 -07:00
|
|
|
start_count += PERCPU_COUNT_BIAS;
|
2019-05-07 10:01:50 -07:00
|
|
|
}
|
2014-09-24 10:31:50 -07:00
|
|
|
|
|
|
|
if (flags & PERCPU_REF_INIT_DEAD)
|
|
|
|
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
|
|
|
|
else
|
|
|
|
start_count++;
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
atomic_long_set(&data->count, start_count);
|
2014-09-24 10:31:50 -07:00
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
data->release = release;
|
|
|
|
data->confirm_switch = NULL;
|
|
|
|
data->ref = ref;
|
|
|
|
ref->data = data;
|
2013-05-31 15:26:45 -07:00
|
|
|
return 0;
|
|
|
|
}
|
2013-10-16 13:47:01 -07:00
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_init);
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
static void __percpu_ref_exit(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
|
|
|
|
|
|
|
if (percpu_count) {
|
|
|
|
/* non-NULL confirm_switch indicates switching in progress */
|
2020-10-08 21:03:56 -07:00
|
|
|
WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
|
2020-10-01 08:48:41 -07:00
|
|
|
free_percpu(percpu_count);
|
|
|
|
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-12 20:52:35 -07:00
|
|
|
/**
|
2014-06-28 05:10:14 -07:00
|
|
|
* percpu_ref_exit - undo percpu_ref_init()
|
|
|
|
* @ref: percpu_ref to exit
|
2013-06-12 20:52:35 -07:00
|
|
|
*
|
2014-06-28 05:10:14 -07:00
|
|
|
* This function exits @ref. The caller is responsible for ensuring that
|
|
|
|
* @ref is no longer in active use. The usual places to invoke this
|
|
|
|
* function from are the @ref->release() callback or in init failure path
|
|
|
|
* where percpu_ref_init() succeeded but other parts of the initialization
|
|
|
|
* of the embedding object failed.
|
2013-06-12 20:52:35 -07:00
|
|
|
*/
|
2014-06-28 05:10:14 -07:00
|
|
|
void percpu_ref_exit(struct percpu_ref *ref)
|
2013-06-12 20:52:35 -07:00
|
|
|
{
|
2020-10-01 08:48:41 -07:00
|
|
|
struct percpu_ref_data *data = ref->data;
|
|
|
|
unsigned long flags;
|
2013-06-12 20:52:35 -07:00
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
__percpu_ref_exit(ref);
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
|
|
|
|
__PERCPU_REF_FLAG_BITS;
|
|
|
|
ref->data = NULL;
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
|
|
|
|
|
kfree(data);
|
2013-06-12 20:52:35 -07:00
|
|
|
}
|
2014-06-28 05:10:14 -07:00
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_exit);
|
2013-06-12 20:52:35 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
2020-10-01 08:48:41 -07:00
|
|
|
struct percpu_ref_data *data = container_of(rcu,
|
|
|
|
struct percpu_ref_data, rcu);
|
|
|
|
struct percpu_ref *ref = data->ref;
|
2014-09-24 10:31:49 -07:00
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
data->confirm_switch(ref);
|
|
|
|
data->confirm_switch = NULL;
|
2014-09-24 10:31:49 -07:00
|
|
|
wake_up_all(&percpu_ref_switch_waitq);
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
if (!data->allow_reinit)
|
|
|
|
__percpu_ref_exit(ref);
|
2019-05-07 10:01:50 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
/* drop ref from percpu_ref_switch_to_atomic() */
|
|
|
|
percpu_ref_put(ref);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
|
2013-05-31 15:26:45 -07:00
|
|
|
{
|
2020-10-01 08:48:41 -07:00
|
|
|
struct percpu_ref_data *data = container_of(rcu,
|
|
|
|
struct percpu_ref_data, rcu);
|
|
|
|
struct percpu_ref *ref = data->ref;
|
2014-09-24 10:31:48 -07:00
|
|
|
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
2020-12-08 15:43:43 -07:00
|
|
|
static atomic_t underflows;
|
2014-09-19 22:27:25 -07:00
|
|
|
unsigned long count = 0;
|
2013-05-31 15:26:45 -07:00
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu)
|
2014-09-24 10:31:48 -07:00
|
|
|
count += *per_cpu_ptr(percpu_count, cpu);
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2020-06-04 16:50:20 -07:00
|
|
|
pr_debug("global %lu percpu %lu\n",
|
2020-10-01 08:48:41 -07:00
|
|
|
atomic_long_read(&data->count), count);
|
2013-05-31 15:26:45 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's crucial that we sum the percpu counters _before_ adding the sum
|
|
|
|
* to &ref->count; since gets could be happening on one cpu while puts
|
|
|
|
* happen on another, adding a single cpu's count could cause
|
|
|
|
* @ref->count to hit 0 before we've got a consistent value - but the
|
|
|
|
* sum of all the counts will be consistent and correct.
|
|
|
|
*
|
|
|
|
* Subtracting the bias value then has to happen _after_ adding count to
|
|
|
|
* &ref->count; we need the bias value to prevent &ref->count from
|
|
|
|
* reaching 0 before we add the percpu counts. But doing it at the same
|
|
|
|
* time is equivalent and saves us atomic operations:
|
|
|
|
*/
|
2020-10-01 08:48:41 -07:00
|
|
|
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2020-12-08 15:43:43 -07:00
|
|
|
if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
|
|
|
|
"percpu ref (%ps) <= 0 (%ld) after switching to atomic",
|
|
|
|
data->release, atomic_long_read(&data->count)) &&
|
|
|
|
atomic_inc_return(&underflows) < 4) {
|
|
|
|
pr_err("%s(): percpu_ref underflow", __func__);
|
|
|
|
mem_dump_obj(data);
|
|
|
|
}
|
2014-01-06 14:13:26 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
|
|
|
|
percpu_ref_call_confirm_rcu(rcu);
|
|
|
|
}
|
2013-06-13 19:23:53 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
|
percpu_ref_func_t *confirm_switch)
|
|
|
|
{
|
2015-09-29 14:47:17 -07:00
|
|
|
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
|
2015-09-29 14:47:18 -07:00
|
|
|
if (confirm_switch)
|
2015-09-29 14:47:17 -07:00
|
|
|
confirm_switch(ref);
|
|
|
|
return;
|
2014-09-24 10:31:49 -07:00
|
|
|
}
|
2013-05-31 15:26:45 -07:00
|
|
|
|
2015-09-29 14:47:17 -07:00
|
|
|
/* switching from percpu to atomic */
|
|
|
|
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Non-NULL ->confirm_switch is used to indicate that switching is
|
|
|
|
* in progress. Use noop one if unspecified.
|
|
|
|
*/
|
2020-10-01 08:48:41 -07:00
|
|
|
ref->data->confirm_switch = confirm_switch ?:
|
|
|
|
percpu_ref_noop_confirm_switch;
|
2015-09-29 14:47:17 -07:00
|
|
|
|
|
|
|
percpu_ref_get(ref); /* put after confirmation */
|
percpu-refcount: Use call_rcu_hurry() for atomic switch
Earlier commits in this series allow battery-powered systems to build
their kernels with the default-disabled CONFIG_RCU_LAZY=y Kconfig option.
This Kconfig option causes call_rcu() to delay its callbacks in order to
batch callbacks. This means that a given RCU grace period covers more
callbacks, thus reducing the number of grace periods, in turn reducing
the amount of energy consumed, which increases battery lifetime which
can be a very good thing. This is not a subtle effect: In some important
use cases, the battery lifetime is increased by more than 10%.
This CONFIG_RCU_LAZY=y option is available only for CPUs that offload
callbacks, for example, CPUs mentioned in the rcu_nocbs kernel boot
parameter passed to kernels built with CONFIG_RCU_NOCB_CPU=y.
Delaying callbacks is normally not a problem because most callbacks do
nothing but free memory. If the system is short on memory, a shrinker
will kick all currently queued lazy callbacks out of their laziness,
thus freeing their memory in short order. Similarly, the rcu_barrier()
function, which blocks until all currently queued callbacks are invoked,
will also kick lazy callbacks, thus enabling rcu_barrier() to complete
in a timely manner.
However, there are some cases where laziness is not a good option.
For example, synchronize_rcu() invokes call_rcu(), and blocks until
the newly queued callback is invoked. It would not be a good for
synchronize_rcu() to block for ten seconds, even on an idle system.
Therefore, synchronize_rcu() invokes call_rcu_hurry() instead of
call_rcu(). The arrival of a non-lazy call_rcu_hurry() callback on a
given CPU kicks any lazy callbacks that might be already queued on that
CPU. After all, if there is going to be a grace period, all callbacks
might as well get full benefit from it.
Yes, this could be done the other way around by creating a
call_rcu_lazy(), but earlier experience with this approach and
feedback at the 2022 Linux Plumbers Conference shifted the approach
to call_rcu() being lazy with call_rcu_hurry() for the few places
where laziness is inappropriate.
And another call_rcu() instance that cannot be lazy is the one on the
percpu refcounter's "per-CPU to atomic switch" code path, which
uses RCU when switching to atomic mode. The enqueued callback
wakes up waiters waiting in the percpu_ref_switch_waitq. Allowing
this callback to be lazy would result in unacceptable slowdowns for
users of per-CPU refcounts, such as blk_pre_runtime_suspend().
Therefore, make __percpu_ref_switch_to_atomic() use call_rcu_hurry()
in order to revert to the old behavior.
[ paulmck: Apply s/call_rcu_flush/call_rcu_hurry/ feedback from Tejun Heo. ]
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: <linux-mm@kvack.org>
2022-10-16 09:22:58 -07:00
|
|
|
call_rcu_hurry(&ref->data->rcu,
|
|
|
|
percpu_ref_switch_to_atomic_rcu);
|
2013-05-31 15:26:45 -07:00
|
|
|
}
|
2014-09-24 10:31:48 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
2014-09-24 10:31:48 -07:00
|
|
|
{
|
2014-09-24 10:31:48 -07:00
|
|
|
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
|
2014-09-24 10:31:48 -07:00
|
|
|
int cpu;
|
|
|
|
|
2014-09-24 10:31:48 -07:00
|
|
|
BUG_ON(!percpu_count);
|
2014-09-24 10:31:48 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
|
|
|
|
return;
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
if (WARN_ON_ONCE(!ref->data->allow_reinit))
|
2019-05-07 10:01:50 -07:00
|
|
|
return;
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
|
2014-09-24 10:31:48 -07:00
|
|
|
|
|
|
|
/*
|
2017-10-09 10:20:44 -07:00
|
|
|
* Restore per-cpu operation. smp_store_release() is paired
|
|
|
|
* with READ_ONCE() in __ref_is_percpu() and guarantees that the
|
|
|
|
* zeroing is visible to all percpu accesses which can see the
|
|
|
|
* following __PERCPU_REF_ATOMIC clearing.
|
2014-09-24 10:31:48 -07:00
|
|
|
*/
|
|
|
|
for_each_possible_cpu(cpu)
|
2014-09-24 10:31:48 -07:00
|
|
|
*per_cpu_ptr(percpu_count, cpu) = 0;
|
2014-09-24 10:31:48 -07:00
|
|
|
|
2014-09-24 10:31:48 -07:00
|
|
|
smp_store_release(&ref->percpu_count_ptr,
|
2014-09-24 10:31:49 -07:00
|
|
|
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
|
|
|
|
}
|
|
|
|
|
2015-09-29 14:47:19 -07:00
|
|
|
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
|
|
|
|
percpu_ref_func_t *confirm_switch)
|
|
|
|
{
|
2020-10-01 08:48:41 -07:00
|
|
|
struct percpu_ref_data *data = ref->data;
|
|
|
|
|
2015-09-29 14:47:20 -07:00
|
|
|
lockdep_assert_held(&percpu_ref_switch_lock);
|
|
|
|
|
2015-09-29 14:47:19 -07:00
|
|
|
/*
|
|
|
|
* If the previous ATOMIC switching hasn't finished yet, wait for
|
|
|
|
* its completion. If the caller ensures that ATOMIC switching
|
|
|
|
* isn't in progress, this function can be called from any context.
|
|
|
|
*/
|
2020-10-01 08:48:41 -07:00
|
|
|
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
|
2015-09-29 14:47:20 -07:00
|
|
|
percpu_ref_switch_lock);
|
2015-09-29 14:47:19 -07:00
|
|
|
|
2021-05-11 06:16:33 -07:00
|
|
|
if (data->force_atomic || percpu_ref_is_dying(ref))
|
2015-09-29 14:47:19 -07:00
|
|
|
__percpu_ref_switch_to_atomic(ref, confirm_switch);
|
|
|
|
else
|
|
|
|
__percpu_ref_switch_to_percpu(ref);
|
|
|
|
}
|
|
|
|
|
2015-09-29 14:47:17 -07:00
|
|
|
/**
|
|
|
|
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
|
|
|
|
* @ref: percpu_ref to switch to atomic mode
|
|
|
|
* @confirm_switch: optional confirmation callback
|
|
|
|
*
|
|
|
|
* There's no reason to use this function for the usual reference counting.
|
|
|
|
* Use percpu_ref_kill[_and_confirm]().
|
|
|
|
*
|
|
|
|
* Schedule switching of @ref to atomic mode. All its percpu counts will
|
|
|
|
* be collected to the main atomic counter. On completion, when all CPUs
|
|
|
|
* are guaraneed to be in atomic mode, @confirm_switch, which may not
|
|
|
|
* block, is invoked. This function may be invoked concurrently with all
|
|
|
|
* the get/put operations and can safely be mixed with kill and reinit
|
|
|
|
* operations. Note that @ref will stay in atomic mode across kill/reinit
|
|
|
|
* cycles until percpu_ref_switch_to_percpu() is called.
|
|
|
|
*
|
2015-09-29 14:47:19 -07:00
|
|
|
* This function may block if @ref is in the process of switching to atomic
|
|
|
|
* mode. If the caller ensures that @ref is not in the process of
|
|
|
|
* switching to atomic mode, this function can be called from any context.
|
2015-09-29 14:47:17 -07:00
|
|
|
*/
|
|
|
|
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
|
|
|
|
percpu_ref_func_t *confirm_switch)
|
|
|
|
{
|
2015-09-29 14:47:20 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
ref->data->force_atomic = true;
|
2015-09-29 14:47:19 -07:00
|
|
|
__percpu_ref_switch_mode(ref, confirm_switch);
|
2015-09-29 14:47:20 -07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
2015-09-29 14:47:17 -07:00
|
|
|
}
|
2017-03-14 20:05:14 -07:00
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
|
|
|
|
* @ref: percpu_ref to switch to atomic mode
|
|
|
|
*
|
|
|
|
* Schedule switching the ref to atomic mode, and wait for the
|
|
|
|
* switch to complete. Caller must ensure that no other thread
|
|
|
|
* will switch back to percpu mode.
|
|
|
|
*/
|
|
|
|
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
percpu_ref_switch_to_atomic(ref, NULL);
|
2020-10-01 08:48:41 -07:00
|
|
|
wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
|
2017-03-14 20:05:14 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
|
2015-09-29 14:47:17 -07:00
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
/**
|
|
|
|
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
|
|
|
|
* @ref: percpu_ref to switch to percpu mode
|
|
|
|
*
|
|
|
|
* There's no reason to use this function for the usual reference counting.
|
|
|
|
* To re-use an expired ref, use percpu_ref_reinit().
|
|
|
|
*
|
|
|
|
* Switch @ref to percpu mode. This function may be invoked concurrently
|
|
|
|
* with all the get/put operations and can safely be mixed with kill and
|
2014-09-24 10:31:50 -07:00
|
|
|
* reinit operations. This function reverses the sticky atomic state set
|
|
|
|
* by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
|
|
|
|
* dying or dead, the actual switching takes place on the following
|
|
|
|
* percpu_ref_reinit().
|
2014-09-24 10:31:49 -07:00
|
|
|
*
|
2015-09-29 14:47:19 -07:00
|
|
|
* This function may block if @ref is in the process of switching to atomic
|
|
|
|
* mode. If the caller ensures that @ref is not in the process of
|
|
|
|
* switching to atomic mode, this function can be called from any context.
|
2014-09-24 10:31:49 -07:00
|
|
|
*/
|
|
|
|
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
|
|
|
|
{
|
2015-09-29 14:47:20 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
ref->data->force_atomic = false;
|
2015-09-29 14:47:19 -07:00
|
|
|
__percpu_ref_switch_mode(ref, NULL);
|
2015-09-29 14:47:20 -07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
2014-09-24 10:31:48 -07:00
|
|
|
}
|
2017-03-14 20:05:14 -07:00
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
|
2014-09-24 10:31:49 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
|
|
|
|
* @ref: percpu_ref to kill
|
|
|
|
* @confirm_kill: optional confirmation callback
|
|
|
|
*
|
|
|
|
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
|
|
|
|
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
|
|
|
|
* called after @ref is seen as dead from all CPUs at which point all
|
|
|
|
* further invocations of percpu_ref_tryget_live() will fail. See
|
|
|
|
* percpu_ref_tryget_live() for details.
|
|
|
|
*
|
|
|
|
* This function normally doesn't block and can be called from any context
|
2014-09-24 10:31:49 -07:00
|
|
|
* but it may block if @confirm_kill is specified and @ref is in the
|
2015-09-29 14:47:16 -07:00
|
|
|
* process of switching to atomic mode by percpu_ref_switch_to_atomic().
|
2018-03-14 12:45:12 -07:00
|
|
|
*
|
|
|
|
* There are no implied RCU grace periods between kill and release.
|
2014-09-24 10:31:49 -07:00
|
|
|
*/
|
|
|
|
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
|
|
|
|
percpu_ref_func_t *confirm_kill)
|
|
|
|
{
|
2015-09-29 14:47:20 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
|
2021-05-11 06:16:33 -07:00
|
|
|
WARN_ONCE(percpu_ref_is_dying(ref),
|
2020-10-01 08:48:41 -07:00
|
|
|
"%s called more than once on %ps!", __func__,
|
|
|
|
ref->data->release);
|
2014-09-24 10:31:49 -07:00
|
|
|
|
|
|
|
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
|
2015-09-29 14:47:19 -07:00
|
|
|
__percpu_ref_switch_mode(ref, confirm_kill);
|
2014-09-24 10:31:49 -07:00
|
|
|
percpu_ref_put(ref);
|
2015-09-29 14:47:20 -07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
2014-09-24 10:31:49 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
|
2014-09-24 10:31:49 -07:00
|
|
|
|
2020-10-01 08:48:41 -07:00
|
|
|
/**
|
|
|
|
* percpu_ref_is_zero - test whether a percpu refcount reached zero
|
|
|
|
* @ref: percpu_ref to test
|
|
|
|
*
|
|
|
|
* Returns %true if @ref reached zero.
|
|
|
|
*
|
|
|
|
* This function is safe to call as long as @ref is between init and exit.
|
|
|
|
*/
|
|
|
|
bool percpu_ref_is_zero(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
unsigned long __percpu *percpu_count;
|
|
|
|
unsigned long count, flags;
|
|
|
|
|
|
|
|
if (__ref_is_percpu(ref, &percpu_count))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* protect us from being destroyed */
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
if (ref->data)
|
|
|
|
count = atomic_long_read(&ref->data->count);
|
|
|
|
else
|
|
|
|
count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
|
|
|
|
|
|
|
return count == 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
|
|
|
|
|
2014-09-24 10:31:49 -07:00
|
|
|
/**
|
|
|
|
* percpu_ref_reinit - re-initialize a percpu refcount
|
|
|
|
* @ref: perpcu_ref to re-initialize
|
|
|
|
*
|
|
|
|
* Re-initialize @ref so that it's in the same state as when it finished
|
2014-09-24 10:31:50 -07:00
|
|
|
* percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
|
|
|
|
* initialized successfully and reached 0 but not exited.
|
2014-09-24 10:31:49 -07:00
|
|
|
*
|
|
|
|
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
|
|
|
|
* this function is in progress.
|
|
|
|
*/
|
|
|
|
void percpu_ref_reinit(struct percpu_ref *ref)
|
|
|
|
{
|
2018-09-26 14:01:07 -07:00
|
|
|
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
|
|
|
|
|
|
|
|
percpu_ref_resurrect(ref);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* percpu_ref_resurrect - modify a percpu refcount from dead to live
|
|
|
|
* @ref: perpcu_ref to resurrect
|
|
|
|
*
|
|
|
|
* Modify @ref so that it's in the same state as before percpu_ref_kill() was
|
|
|
|
* called. @ref must be dead but must not yet have exited.
|
|
|
|
*
|
|
|
|
* If @ref->release() frees @ref then the caller is responsible for
|
|
|
|
* guaranteeing that @ref->release() does not get called while this
|
|
|
|
* function is in progress.
|
|
|
|
*
|
|
|
|
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
|
|
|
|
* this function is in progress.
|
|
|
|
*/
|
|
|
|
void percpu_ref_resurrect(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
unsigned long __percpu *percpu_count;
|
2015-09-29 14:47:20 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
|
|
|
|
|
2021-05-11 06:16:33 -07:00
|
|
|
WARN_ON_ONCE(!percpu_ref_is_dying(ref));
|
2018-09-26 14:01:07 -07:00
|
|
|
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
|
2014-09-24 10:31:49 -07:00
|
|
|
|
|
|
|
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
|
|
|
|
percpu_ref_get(ref);
|
2015-09-29 14:47:19 -07:00
|
|
|
__percpu_ref_switch_mode(ref, NULL);
|
2015-09-29 14:47:20 -07:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
|
2014-09-24 10:31:49 -07:00
|
|
|
}
|
2018-09-26 14:01:07 -07:00
|
|
|
EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
|