1

drivers: cacheinfo: use __free attribute instead of of_node_put()

Introduce the __free attribute for scope-based resource management.
Resources allocated with __free are automatically released at the end of
the scope. This enhancement aims to mitigate memory management issues
associated with forgetting to release resources by utilizing __free
instead of of_node_put().

To introduce this feature, some modifications to the code structure were
necessary. The original pattern:
```
prev = np;
while(...) {
  [...]
  np = of_find_next_cache_node(np);
  of_node_put(prev);
  prev = np;
  [...]
}
```
has been updated to:
```
while(...) {
  [...]
  struct device_node __free(device_node) *prev = np;
  np =  of_find_next_cache_node(np)
  [...]
}
```
With this change, the previous node is automatically cleaned up at the end
of each iteration, allowing the elimination of all of_node_put() calls and
some goto statements.

Suggested-by: Julia Lawall <julia.lawall@inria.fr>
Signed-off-by: Vincenzo Mezzela <vincenzo.mezzela@gmail.com>
Link: https://lore.kernel.org/r/20240719151335.869145-1-vincenzo.mezzela@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Vincenzo Mezzela 2024-07-19 17:13:35 +02:00 committed by Greg Kroah-Hartman
parent 30b968b002
commit 1b48fbbc03

View File

@ -202,29 +202,24 @@ static void cache_of_set_props(struct cacheinfo *this_leaf,
static int cache_setup_of_node(unsigned int cpu) static int cache_setup_of_node(unsigned int cpu)
{ {
struct device_node *np, *prev;
struct cacheinfo *this_leaf; struct cacheinfo *this_leaf;
unsigned int index = 0; unsigned int index = 0;
np = of_cpu_device_node_get(cpu); struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
if (!np) { if (!np) {
pr_err("Failed to find cpu%d device node\n", cpu); pr_err("Failed to find cpu%d device node\n", cpu);
return -ENOENT; return -ENOENT;
} }
if (!of_check_cache_nodes(np)) { if (!of_check_cache_nodes(np)) {
of_node_put(np);
return -ENOENT; return -ENOENT;
} }
prev = np;
while (index < cache_leaves(cpu)) { while (index < cache_leaves(cpu)) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index); this_leaf = per_cpu_cacheinfo_idx(cpu, index);
if (this_leaf->level != 1) { if (this_leaf->level != 1) {
struct device_node *prev __free(device_node) = np;
np = of_find_next_cache_node(np); np = of_find_next_cache_node(np);
of_node_put(prev);
prev = np;
if (!np) if (!np)
break; break;
} }
@ -233,8 +228,6 @@ static int cache_setup_of_node(unsigned int cpu)
index++; index++;
} }
of_node_put(np);
if (index != cache_leaves(cpu)) /* not all OF nodes populated */ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
return -ENOENT; return -ENOENT;
@ -243,17 +236,14 @@ static int cache_setup_of_node(unsigned int cpu)
static bool of_check_cache_nodes(struct device_node *np) static bool of_check_cache_nodes(struct device_node *np)
{ {
struct device_node *next;
if (of_property_present(np, "cache-size") || if (of_property_present(np, "cache-size") ||
of_property_present(np, "i-cache-size") || of_property_present(np, "i-cache-size") ||
of_property_present(np, "d-cache-size") || of_property_present(np, "d-cache-size") ||
of_property_present(np, "cache-unified")) of_property_present(np, "cache-unified"))
return true; return true;
next = of_find_next_cache_node(np); struct device_node *next __free(device_node) = of_find_next_cache_node(np);
if (next) { if (next) {
of_node_put(next);
return true; return true;
} }
@ -287,12 +277,10 @@ static int of_count_cache_leaves(struct device_node *np)
int init_of_cache_level(unsigned int cpu) int init_of_cache_level(unsigned int cpu)
{ {
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu); struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
struct device_node *prev = NULL;
unsigned int levels = 0, leaves, level; unsigned int levels = 0, leaves, level;
if (!of_check_cache_nodes(np)) { if (!of_check_cache_nodes(np)) {
of_node_put(np);
return -ENOENT; return -ENOENT;
} }
@ -300,30 +288,27 @@ int init_of_cache_level(unsigned int cpu)
if (leaves > 0) if (leaves > 0)
levels = 1; levels = 1;
prev = np; while (1) {
while ((np = of_find_next_cache_node(np))) { struct device_node *prev __free(device_node) = np;
of_node_put(prev); np = of_find_next_cache_node(np);
prev = np; if (!np)
break;
if (!of_device_is_compatible(np, "cache")) if (!of_device_is_compatible(np, "cache"))
goto err_out; return -EINVAL;
if (of_property_read_u32(np, "cache-level", &level)) if (of_property_read_u32(np, "cache-level", &level))
goto err_out; return -EINVAL;
if (level <= levels) if (level <= levels)
goto err_out; return -EINVAL;
leaves += of_count_cache_leaves(np); leaves += of_count_cache_leaves(np);
levels = level; levels = level;
} }
of_node_put(np);
this_cpu_ci->num_levels = levels; this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves; this_cpu_ci->num_leaves = leaves;
return 0; return 0;
err_out:
of_node_put(np);
return -EINVAL;
} }
#else #else