From 9821d8d4628e630ab56f47a8e6b878a2576e069b Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 14 Feb 2023 09:29:46 +0200 Subject: [PATCH] lib: cpu_rmap: Use allocator for rmap entries Use a proper allocator for rmap entries using a naive for loop. The allocator relies on whether an entry is NULL to be considered free. Remove the used field of rmap which is not needed. Also, avoid crashing the kernel if an entry is not available. Cc: Thomas Gleixner Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed Reviewed-by: Jacob Keller --- include/linux/cpu_rmap.h | 3 +-- lib/cpu_rmap.c | 43 ++++++++++++++++++++++++++++++---------- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index be8aea04d023..0ec745e6cd36 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -16,14 +16,13 @@ * struct cpu_rmap - CPU affinity reverse-map * @refcount: kref for object * @size: Number of objects to be reverse-mapped - * @used: Number of objects added * @obj: Pointer to array of object pointers * @near: For each CPU, the index and distance to the nearest object, * based on affinity masks */ struct cpu_rmap { struct kref refcount; - u16 size, used; + u16 size; void **obj; struct { u16 index; diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index e77f12bb3c77..5d4bf7a8b926 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) } #endif +static int get_free_index(struct cpu_rmap *rmap) +{ + int i; + + for (i = 0; i < rmap->size; i++) + if (!rmap->obj[i]) + return i; + + return -ENOSPC; +} + /** * cpu_rmap_add - add object to a rmap * @rmap: CPU rmap allocated with alloc_cpu_rmap() * @obj: Object to add to rmap * - * Return index of object. + * Return index of object or -ENOSPC if no free entry was found */ int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) { - u16 index; + int index = get_free_index(rmap); + + if (index < 0) + return index; - BUG_ON(rmap->used >= rmap->size); - index = rmap->used++; rmap->obj[index] = obj; return index; } @@ -230,7 +242,7 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) if (!rmap) return; - for (index = 0; index < rmap->used; index++) { + for (index = 0; index < rmap->size; index++) { glue = rmap->obj[index]; if (glue) irq_set_affinity_notifier(glue->notify.irq, NULL); @@ -295,13 +307,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; cpu_rmap_get(rmap); - glue->index = cpu_rmap_add(rmap, glue); + rc = cpu_rmap_add(rmap, glue); + if (rc < 0) + goto err_add; + + glue->index = rc; rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) { - cpu_rmap_put(glue->rmap); - rmap->obj[glue->index] = NULL; - kfree(glue); - } + if (rc) + goto err_set; + + return rc; + +err_set: + rmap->obj[glue->index] = NULL; +err_add: + cpu_rmap_put(glue->rmap); + kfree(glue); return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add);