mm: refactor map_deny_write_exec()
Refactor the map_deny_write_exec() to not unnecessarily require a VMA
parameter but rather to accept VMA flags parameters, which allows us to
use this function early in mmap_region() in a subsequent commit.
While we're here, we refactor the function to be more readable and add
some additional documentation.
Link: https://lkml.kernel.org/r/6be8bb59cd7c68006ebb006eb9d8dc27104b1f70.1730224667.git.lorenzo.stoakes@oracle.com
Fixes: deb0f65628
("mm/mmap: undo ->mmap() when arch_validate_flags() fails")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reported-by: Jann Horn <jannh@google.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Jann Horn <jannh@google.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Helge Deller <deller@gmx.de>
Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4080ef1579
commit
0fb4a7ad27
@ -188,16 +188,31 @@ static inline bool arch_memory_deny_write_exec_supported(void)
|
||||
*
|
||||
* d) mmap(PROT_READ | PROT_EXEC)
|
||||
* mmap(PROT_READ | PROT_EXEC | PROT_BTI)
|
||||
*
|
||||
* This is only applicable if the user has set the Memory-Deny-Write-Execute
|
||||
* (MDWE) protection mask for the current process.
|
||||
*
|
||||
* @old specifies the VMA flags the VMA originally possessed, and @new the ones
|
||||
* we propose to set.
|
||||
*
|
||||
* Return: false if proposed change is OK, true if not ok and should be denied.
|
||||
*/
|
||||
static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
|
||||
static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
|
||||
{
|
||||
/* If MDWE is disabled, we have nothing to deny. */
|
||||
if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags))
|
||||
return false;
|
||||
|
||||
if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
|
||||
/* If the new VMA is not executable, we have nothing to deny. */
|
||||
if (!(new & VM_EXEC))
|
||||
return false;
|
||||
|
||||
/* Under MDWE we do not accept newly writably executable VMAs... */
|
||||
if (new & VM_WRITE)
|
||||
return true;
|
||||
|
||||
if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
|
||||
/* ...nor previously non-executable VMAs becoming executable. */
|
||||
if (!(old & VM_EXEC))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1505,7 +1505,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
|
||||
vma_set_anonymous(vma);
|
||||
}
|
||||
|
||||
if (map_deny_write_exec(vma, vma->vm_flags)) {
|
||||
if (map_deny_write_exec(vma->vm_flags, vma->vm_flags)) {
|
||||
error = -EACCES;
|
||||
goto close_and_free_vma;
|
||||
}
|
||||
|
@ -810,7 +810,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
|
||||
break;
|
||||
}
|
||||
|
||||
if (map_deny_write_exec(vma, newflags)) {
|
||||
if (map_deny_write_exec(vma->vm_flags, newflags)) {
|
||||
error = -EACCES;
|
||||
break;
|
||||
}
|
||||
|
2
mm/vma.h
2
mm/vma.h
@ -42,7 +42,7 @@ struct vma_munmap_struct {
|
||||
int vma_count; /* Number of vmas that will be removed */
|
||||
bool unlock; /* Unlock after the munmap */
|
||||
bool clear_ptes; /* If there are outstanding PTE to be cleared */
|
||||
/* 1 byte hole */
|
||||
/* 2 byte hole */
|
||||
unsigned long nr_pages; /* Number of pages being removed */
|
||||
unsigned long locked_vm; /* Number of locked pages */
|
||||
unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
|
||||
|
Loading…
Reference in New Issue
Block a user