ac3b432839
module_layout manages different types of memory (text, data, rodata, etc.) in one allocation, which is problematic for some reasons: 1. It is hard to enable CONFIG_STRICT_MODULE_RWX. 2. It is hard to use huge pages in modules (and not break strict rwx). 3. Many archs uses module_layout for arch-specific data, but it is not obvious how these data are used (are they RO, RX, or RW?) Improve the scenario by replacing 2 (or 3) module_layout per module with up to 7 module_memory per module: MOD_TEXT, MOD_DATA, MOD_RODATA, MOD_RO_AFTER_INIT, MOD_INIT_TEXT, MOD_INIT_DATA, MOD_INIT_RODATA, and allocating them separately. This adds slightly more entries to mod_tree (from up to 3 entries per module, to up to 7 entries per module). However, this at most adds a small constant overhead to __module_address(), which is expected to be fast. Various archs use module_layout for different data. These data are put into different module_memory based on their location in module_layout. IOW, data that used to go with text is allocated with MOD_MEM_TYPE_TEXT; data that used to go with data is allocated with MOD_MEM_TYPE_DATA, etc. module_memory simplifies quite some of the module code. For example, ARCH_WANTS_MODULES_DATA_IN_VMALLOC is a lot cleaner, as it just uses a different allocator for the data. kernel/module/strict_rwx.c is also much cleaner with module_memory. Signed-off-by: Song Liu <song@kernel.org> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Guenter Roeck <linux@roeck-us.net> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Reviewed-by: Luis Chamberlain <mcgrof@kernel.org> Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
113 lines
2.6 KiB
C
113 lines
2.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Modules tree lookup
|
|
*
|
|
* Copyright (C) 2015 Peter Zijlstra
|
|
* Copyright (C) 2015 Rusty Russell
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/rbtree_latch.h>
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* Use a latched RB-tree for __module_address(); this allows us to use
|
|
* RCU-sched lookups of the address from any context.
|
|
*
|
|
* This is conditional on PERF_EVENTS || TRACING because those can really hit
|
|
* __module_address() hard by doing a lot of stack unwinding; potentially from
|
|
* NMI context.
|
|
*/
|
|
|
|
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
|
|
{
|
|
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
|
|
|
|
return (unsigned long)mod_mem->base;
|
|
}
|
|
|
|
static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
|
|
{
|
|
struct module_memory *mod_mem = container_of(n, struct module_memory, mtn.node);
|
|
|
|
return (unsigned long)mod_mem->size;
|
|
}
|
|
|
|
static __always_inline bool
|
|
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
|
|
{
|
|
return __mod_tree_val(a) < __mod_tree_val(b);
|
|
}
|
|
|
|
static __always_inline int
|
|
mod_tree_comp(void *key, struct latch_tree_node *n)
|
|
{
|
|
unsigned long val = (unsigned long)key;
|
|
unsigned long start, end;
|
|
|
|
start = __mod_tree_val(n);
|
|
if (val < start)
|
|
return -1;
|
|
|
|
end = start + __mod_tree_size(n);
|
|
if (val >= end)
|
|
return 1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct latch_tree_ops mod_tree_ops = {
|
|
.less = mod_tree_less,
|
|
.comp = mod_tree_comp,
|
|
};
|
|
|
|
static noinline void __mod_tree_insert(struct mod_tree_node *node, struct mod_tree_root *tree)
|
|
{
|
|
latch_tree_insert(&node->node, &tree->root, &mod_tree_ops);
|
|
}
|
|
|
|
static void __mod_tree_remove(struct mod_tree_node *node, struct mod_tree_root *tree)
|
|
{
|
|
latch_tree_erase(&node->node, &tree->root, &mod_tree_ops);
|
|
}
|
|
|
|
/*
|
|
* These modifications: insert, remove_init and remove; are serialized by the
|
|
* module_mutex.
|
|
*/
|
|
void mod_tree_insert(struct module *mod)
|
|
{
|
|
for_each_mod_mem_type(type) {
|
|
mod->mem[type].mtn.mod = mod;
|
|
if (mod->mem[type].size)
|
|
__mod_tree_insert(&mod->mem[type].mtn, &mod_tree);
|
|
}
|
|
}
|
|
|
|
void mod_tree_remove_init(struct module *mod)
|
|
{
|
|
for_class_mod_mem_type(type, init) {
|
|
if (mod->mem[type].size)
|
|
__mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
|
|
}
|
|
}
|
|
|
|
void mod_tree_remove(struct module *mod)
|
|
{
|
|
for_each_mod_mem_type(type) {
|
|
if (mod->mem[type].size)
|
|
__mod_tree_remove(&mod->mem[type].mtn, &mod_tree);
|
|
}
|
|
}
|
|
|
|
struct module *mod_find(unsigned long addr, struct mod_tree_root *tree)
|
|
{
|
|
struct latch_tree_node *ltn;
|
|
|
|
ltn = latch_tree_find((void *)addr, &tree->root, &mod_tree_ops);
|
|
if (!ltn)
|
|
return NULL;
|
|
|
|
return container_of(ltn, struct mod_tree_node, node)->mod;
|
|
}
|