e2852ae825
All percpu counters are linked to a global list on initialization and
removed from it on destruction. The list is walked during CPU up/down.
If a percpu counter is freed without being properly destroyed, the system
will oops only on the next CPU up/down making it pretty nasty to track
down. This patch adds debugobj support for percpu counters so that such
problems can be found easily.
As percpu counters don't make sense on stack and can't be statically
initialized, debugobj support is pretty simple. It's initialized and
activated on counter initialization, and deactivatd and destroyed on
counter destruction. With this patch applied, the bug fixed by commit
602586a83b
(shmem: put_super must
percpu_counter_destroy) triggers the following warning on tmpfs unmount
and the system won't oops on the next cpu up/down operation.
------------[ cut here ]------------
WARNING: at lib/debugobjects.c:259 debug_print_object+0x5c/0x70()
Hardware name: Bochs
ODEBUG: free active (active state 0) object type: percpu_counter
Modules linked in:
Pid: 3999, comm: umount Not tainted 2.6.36-rc2-work+ #5
Call Trace:
[<ffffffff81083f7f>] warn_slowpath_common+0x7f/0xc0
[<ffffffff81084076>] warn_slowpath_fmt+0x46/0x50
[<ffffffff813b45cc>] debug_print_object+0x5c/0x70
[<ffffffff813b50e5>] debug_check_no_obj_freed+0x125/0x210
[<ffffffff811577d3>] kfree+0xb3/0x2f0
[<ffffffff81132edd>] shmem_put_super+0x1d/0x30
[<ffffffff81162e96>] generic_shutdown_super+0x56/0xe0
[<ffffffff81162f86>] kill_anon_super+0x16/0x60
[<ffffffff81162ff7>] kill_litter_super+0x27/0x30
[<ffffffff81163295>] deactivate_locked_super+0x45/0x60
[<ffffffff81163cfa>] deactivate_super+0x4a/0x70
[<ffffffff8117d446>] mntput_no_expire+0x86/0xe0
[<ffffffff8117df7f>] sys_umount+0x6f/0x360
[<ffffffff8103f01b>] system_call_fastpath+0x16/0x1b
---[ end trace cce2a341ba3611a7 ]---
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Thomas Gleixner <tglxlinutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
223 lines
5.0 KiB
C
223 lines
5.0 KiB
C
/*
|
|
* Fast batching percpu counters.
|
|
*/
|
|
|
|
#include <linux/percpu_counter.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/init.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/module.h>
|
|
#include <linux/debugobjects.h>
|
|
|
|
static LIST_HEAD(percpu_counters);
|
|
static DEFINE_MUTEX(percpu_counters_lock);
|
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
|
|
|
|
static struct debug_obj_descr percpu_counter_debug_descr;
|
|
|
|
static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
|
|
{
|
|
struct percpu_counter *fbc = addr;
|
|
|
|
switch (state) {
|
|
case ODEBUG_STATE_ACTIVE:
|
|
percpu_counter_destroy(fbc);
|
|
debug_object_free(fbc, &percpu_counter_debug_descr);
|
|
return 1;
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static struct debug_obj_descr percpu_counter_debug_descr = {
|
|
.name = "percpu_counter",
|
|
.fixup_free = percpu_counter_fixup_free,
|
|
};
|
|
|
|
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
|
|
{
|
|
debug_object_init(fbc, &percpu_counter_debug_descr);
|
|
debug_object_activate(fbc, &percpu_counter_debug_descr);
|
|
}
|
|
|
|
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
|
|
{
|
|
debug_object_deactivate(fbc, &percpu_counter_debug_descr);
|
|
debug_object_free(fbc, &percpu_counter_debug_descr);
|
|
}
|
|
|
|
#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
|
|
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
|
|
{ }
|
|
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
|
|
{ }
|
|
#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
|
|
|
|
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
|
{
|
|
int cpu;
|
|
|
|
spin_lock(&fbc->lock);
|
|
for_each_possible_cpu(cpu) {
|
|
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
*pcount = 0;
|
|
}
|
|
fbc->count = amount;
|
|
spin_unlock(&fbc->lock);
|
|
}
|
|
EXPORT_SYMBOL(percpu_counter_set);
|
|
|
|
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
|
{
|
|
s64 count;
|
|
s32 *pcount;
|
|
int cpu = get_cpu();
|
|
|
|
pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
count = *pcount + amount;
|
|
if (count >= batch || count <= -batch) {
|
|
spin_lock(&fbc->lock);
|
|
fbc->count += count;
|
|
*pcount = 0;
|
|
spin_unlock(&fbc->lock);
|
|
} else {
|
|
*pcount = count;
|
|
}
|
|
put_cpu();
|
|
}
|
|
EXPORT_SYMBOL(__percpu_counter_add);
|
|
|
|
/*
|
|
* Add up all the per-cpu counts, return the result. This is a more accurate
|
|
* but much slower version of percpu_counter_read_positive()
|
|
*/
|
|
s64 __percpu_counter_sum(struct percpu_counter *fbc)
|
|
{
|
|
s64 ret;
|
|
int cpu;
|
|
|
|
spin_lock(&fbc->lock);
|
|
ret = fbc->count;
|
|
for_each_online_cpu(cpu) {
|
|
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
ret += *pcount;
|
|
}
|
|
spin_unlock(&fbc->lock);
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__percpu_counter_sum);
|
|
|
|
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
|
|
struct lock_class_key *key)
|
|
{
|
|
spin_lock_init(&fbc->lock);
|
|
lockdep_set_class(&fbc->lock, key);
|
|
fbc->count = amount;
|
|
fbc->counters = alloc_percpu(s32);
|
|
if (!fbc->counters)
|
|
return -ENOMEM;
|
|
|
|
debug_percpu_counter_activate(fbc);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
INIT_LIST_HEAD(&fbc->list);
|
|
mutex_lock(&percpu_counters_lock);
|
|
list_add(&fbc->list, &percpu_counters);
|
|
mutex_unlock(&percpu_counters_lock);
|
|
#endif
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(__percpu_counter_init);
|
|
|
|
void percpu_counter_destroy(struct percpu_counter *fbc)
|
|
{
|
|
if (!fbc->counters)
|
|
return;
|
|
|
|
debug_percpu_counter_deactivate(fbc);
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
mutex_lock(&percpu_counters_lock);
|
|
list_del(&fbc->list);
|
|
mutex_unlock(&percpu_counters_lock);
|
|
#endif
|
|
free_percpu(fbc->counters);
|
|
fbc->counters = NULL;
|
|
}
|
|
EXPORT_SYMBOL(percpu_counter_destroy);
|
|
|
|
int percpu_counter_batch __read_mostly = 32;
|
|
EXPORT_SYMBOL(percpu_counter_batch);
|
|
|
|
static void compute_batch_value(void)
|
|
{
|
|
int nr = num_online_cpus();
|
|
|
|
percpu_counter_batch = max(32, nr*2);
|
|
}
|
|
|
|
static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
|
|
unsigned long action, void *hcpu)
|
|
{
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
unsigned int cpu;
|
|
struct percpu_counter *fbc;
|
|
|
|
compute_batch_value();
|
|
if (action != CPU_DEAD)
|
|
return NOTIFY_OK;
|
|
|
|
cpu = (unsigned long)hcpu;
|
|
mutex_lock(&percpu_counters_lock);
|
|
list_for_each_entry(fbc, &percpu_counters, list) {
|
|
s32 *pcount;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&fbc->lock, flags);
|
|
pcount = per_cpu_ptr(fbc->counters, cpu);
|
|
fbc->count += *pcount;
|
|
*pcount = 0;
|
|
spin_unlock_irqrestore(&fbc->lock, flags);
|
|
}
|
|
mutex_unlock(&percpu_counters_lock);
|
|
#endif
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
/*
|
|
* Compare counter against given value.
|
|
* Return 1 if greater, 0 if equal and -1 if less
|
|
*/
|
|
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
|
|
{
|
|
s64 count;
|
|
|
|
count = percpu_counter_read(fbc);
|
|
/* Check to see if rough count will be sufficient for comparison */
|
|
if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
|
|
if (count > rhs)
|
|
return 1;
|
|
else
|
|
return -1;
|
|
}
|
|
/* Need to use precise count */
|
|
count = percpu_counter_sum(fbc);
|
|
if (count > rhs)
|
|
return 1;
|
|
else if (count < rhs)
|
|
return -1;
|
|
else
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(percpu_counter_compare);
|
|
|
|
static int __init percpu_counter_startup(void)
|
|
{
|
|
compute_batch_value();
|
|
hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
|
|
return 0;
|
|
}
|
|
module_init(percpu_counter_startup);
|