From: Amit Daniel Kachhap amitdaniel.kachhap@arm.com
PCuABI memory reservations require adding reservation properties while creating and modifying the VMA. reserv_vma_set_reserv() and variants are used to update those reservation details. Currently, these properties are added only for mmap/mremap, and later commits will add them for other syscalls (shmat) and special VMA mappings.
PCuABI memory reservations also prevent merging or expanding VMAs that do not belong to the same reservation. Use suitable reservation interfaces to check those properties before performing such operations on the VMA.
Signed-off-by: Amit Daniel Kachhap amitdaniel.kachhap@arm.com Co-developed-by: Kevin Brodsky kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky kevin.brodsky@arm.com --- include/linux/mm.h | 4 ++-- kernel/fork.c | 3 +++ mm/mmap.c | 32 +++++++++++++++++++++++++------- mm/mremap.c | 13 +++++++++---- 4 files changed, 39 insertions(+), 13 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index efc17977a31e..f9b5ad66a938 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3259,7 +3259,7 @@ extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff, - bool *need_rmap_locks); + bool *need_rmap_locks, struct reserv_struct *reserv_info); extern void exit_mmap(struct mm_struct *); struct vm_area_struct *vma_modify(struct vma_iterator *vmi, struct vm_area_struct *prev, @@ -3365,7 +3365,7 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf); + struct list_head *uf, unsigned long prot); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, diff --git a/kernel/fork.c b/kernel/fork.c index a460a65624d7..ccbfc0c520ae 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -99,6 +99,7 @@ #include <linux/stackprotector.h> #include <linux/user_events.h> #include <linux/iommu.h> +#include <linux/mm_reserv.h>
#include <asm/pgalloc.h> #include <linux/uaccess.h> @@ -678,6 +679,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; khugepaged_fork(mm, oldmm);
+ reserv_fork(mm, oldmm); + retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count); if (retval) goto out; diff --git a/mm/mmap.c b/mm/mmap.c index 6ae675961785..40d64fa163a2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -911,7 +911,8 @@ static struct vm_area_struct /* Can we merge the predecessor? */ if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, - pgoff, vm_userfaultfd_ctx, anon_name)) { + pgoff, vm_userfaultfd_ctx, anon_name) + && reserv_vma_range_within_reserv(prev, addr, end - addr)) { merge_prev = true; vma_prev(vmi); } @@ -920,7 +921,8 @@ static struct vm_area_struct /* Can we merge the successor? */ if (next && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, - vm_userfaultfd_ctx, anon_name)) { + vm_userfaultfd_ctx, anon_name) && + reserv_vma_range_within_reserv(next, addr, end - addr)) { merge_next = true; }
@@ -1382,7 +1384,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= VM_NORESERVE; }
- addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); + addr = mmap_region(file, addr, len, vm_flags, pgoff, uf, prot); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) @@ -2785,7 +2787,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf) + struct list_head *uf, unsigned long prot) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; @@ -2797,6 +2799,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, bool writable_file_mapping = false; pgoff_t vm_pgoff; int error; + struct reserv_struct reserv_info; + bool new_reserv; VMA_ITERATOR(vmi, mm, addr);
/* Check against address space limit. */ @@ -2814,6 +2818,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return -ENOMEM; }
+ new_reserv = !reserv_find_reserv_info_range(addr, len, true, &reserv_info); + /* Unmap any existing mapping in the area */ if (do_vmi_munmap(&vmi, mm, addr, len, uf, false)) return -ENOMEM; @@ -2840,7 +2846,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Check next */ if (next && next->vm_start == end && !vma_policy(next) && can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, - NULL_VM_UFFD_CTX, NULL)) { + NULL_VM_UFFD_CTX, NULL) && + reserv_vma_range_within_reserv(next, addr, len)) { merge_end = next->vm_end; vma = next; vm_pgoff = next->vm_pgoff - pglen; @@ -2851,7 +2858,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, pgoff, vma->vm_userfaultfd_ctx, NULL) : can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, - NULL_VM_UFFD_CTX, NULL))) { + NULL_VM_UFFD_CTX, NULL)) && + reserv_vma_range_within_reserv(prev, addr, len)) { merge_start = prev->vm_start; vma = prev; vm_pgoff = prev->vm_pgoff; @@ -2959,6 +2967,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (vma_iter_prealloc(&vmi, vma)) goto close_and_free_vma;
+ if (new_reserv) { + reserv_vma_set_reserv(vma, addr, len, prot); + } else { + reserv_vma_set_reserv_data(vma, &reserv_info); + } + /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); vma_iter_store(&vmi, vma); @@ -3432,7 +3446,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, - bool *need_rmap_locks) + bool *need_rmap_locks, struct reserv_struct *reserv_info) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; @@ -3484,6 +3498,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; + if (reserv_info) + reserv_vma_set_reserv_data(new_vma, reserv_info); + else + reserv_vma_set_reserv_start_len(new_vma, addr, len); if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) diff --git a/mm/mremap.c b/mm/mremap.c index 515217a95293..6a9fb59df4a6 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -651,7 +651,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked, unsigned long flags, - struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) + struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap, + struct reserv_struct *reserv_info) { long to_account = new_len - old_len; struct mm_struct *mm = vma->vm_mm; @@ -705,7 +706,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, vma_start_write(vma); new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, - &need_rmap_locks); + &need_rmap_locks, reserv_info); if (!new_vma) { if (vm_flags & VM_ACCOUNT) vm_unacct_memory(to_account >> PAGE_SHIFT); @@ -874,6 +875,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long map_flags = 0; + struct reserv_struct reserv_info, *reserv_ptr = NULL;
if (offset_in_page(new_addr)) goto out; @@ -902,6 +904,9 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, if ((mm->map_count + 2) >= sysctl_max_map_count - 3) return -ENOMEM;
+ if (reserv_find_reserv_info_range(new_addr, new_len, true, &reserv_info)) + reserv_ptr = &reserv_info; + if (flags & MREMAP_FIXED) { ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); if (ret) @@ -945,7 +950,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, new_addr = ret;
ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, - uf_unmap); + uf_unmap, reserv_ptr);
out: return ret; @@ -1160,7 +1165,7 @@ SYSCALL_DEFINE5(__retptr__(mremap), user_uintptr_t, addr, unsigned long, old_len }
ret = move_vma(vma, addr, old_len, new_len, new_addr, - &locked, flags, &uf, &uf_unmap); + &locked, flags, &uf, &uf_unmap, NULL); } out: if (offset_in_page(ret))