On 25/10/2023 07:36, Amit Daniel Kachhap wrote:
[...]
@@ -1225,7 +1226,7 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, /*
- The caller must write-lock current->mm->mmap_lock.
*/ -unsigned long do_mmap(struct file *file, unsigned long addr, +user_uintptr_t do_mmap(struct file *file, user_uintptr_t user_addr,
If we're going to rename parameters, let's fix the address/pointer terminology at the same time, maybe user_ptr?
unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf)
@@ -1233,6 +1234,14 @@ unsigned long do_mmap(struct file *file, unsigned long addr, struct mm_struct *mm = current->mm; vm_flags_t vm_flags; int pkey = 0;
- unsigned long addr = (ptraddr_t)user_addr;
- bool new_caps = true;
+#ifdef CONFIG_CHERI_PURECAP_UABI
- bool ignore_reserv = false;
- VMA_ITERATOR(vmi, mm, addr);
+#else
- bool ignore_reserv = true;
+#endif validate_mm(mm); *populate = 0; @@ -1240,6 +1249,23 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!len) return -EINVAL; +#ifdef CONFIG_CHERI_PURECAP_UABI
- if (is_compat_task()) {
ignore_reserv = true;
goto skip_pcuabi_checks;
- }
- if (cheri_tag_get(user_addr)) {
if (!capability_owns_range(user_addr, addr, len) || !(flags & MAP_FIXED))
return -EINVAL;
if (!reserv_vmi_range_fully_mapped(&vmi, addr, len))
return -ERESERVATION;
The specified error is ENOMEM in this case:
If any part of AlignedRange(addr.address, length) is not currently
mapped, then the call fails with -ENOMEM.
new_caps = false;
- } else {
if (user_addr && !cheri_is_null_derived(user_addr))
If the tag is not set, then the capability must always be null-derived, whether its address is 0 or not.
return -EINVAL;
- }
+skip_pcuabi_checks: +#endif /* * Does the application expect PROT_READ to imply PROT_EXEC? * @@ -1397,11 +1423,26 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= VM_NORESERVE; }
- addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
- if (!ignore_reserv)
vm_flags |= VM_PCUABI_RESERVE;
- if (new_caps)
user_addr = addr;
- addr = mmap_region(file, user_addr, len, vm_flags, pgoff, uf, prot); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len;
- if (!IS_ERR_VALUE(addr)) {
if (!ignore_reserv) {
if (new_caps)
user_addr = build_owning_capability(addr, len, prot);
} else {
user_addr = (user_uintptr_t)uaddr_to_user_ptr_safe(addr);
If ignore_reserv is true, then we are not in PCuABI, so we can simply return the address.
}
return user_addr;
- }
- return addr;
} @@ -2559,11 +2600,13 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
- Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise.
*/ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool downgrade)user_uintptr_t user_start, size_t len, struct list_head *uf,
{ unsigned long end; struct vm_area_struct *vma;
- int ret;
- unsigned long start = (ptraddr_t)user_start;
if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; @@ -2580,7 +2623,16 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, if (!vma) return 0;
- return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
- if (vma->vm_flags & VM_PCUABI_RESERVE) {
if (!capability_owns_range(user_start, start, len))
return -EINVAL;
if (!reserv_vma_match_capability(vma, user_start))
IIUC this checks that the bounds of the capability match that of the reservation that contains the mapping. This is not required, what is required is that the capability bounds are a subset of the reservation:
Its bounds are checked against existing reservations. If the bounds of
the capability are not contained within the bounds of any existing reservation, the call fails with -ERESERVATION.
That also applies to the other syscalls.
return -ERESERVATION;
- }
- ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
- return ret;
} /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. @@ -2589,7 +2641,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
- @len: The length to be munmapped.
- @uf: The userfaultfd list_head
*/ -int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, +int do_munmap(struct mm_struct *mm, user_uintptr_t start, size_t len, struct list_head *uf) { VMA_ITERATOR(vmi, mm, start); @@ -2597,15 +2649,16 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); } -unsigned long mmap_region(struct file *file, unsigned long addr, +unsigned long mmap_region(struct file *file, user_uintptr_t user_addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
struct list_head *uf)
struct list_head *uf, unsigned long prot)
{ struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct vm_area_struct *next, *prev, *merge; pgoff_t pglen = len >> PAGE_SHIFT; unsigned long charged = 0;
- unsigned long addr = (ptraddr_t)user_addr; unsigned long end = addr + len; unsigned long merge_start = addr, merge_end = end; pgoff_t vm_pgoff;
@@ -2628,7 +2681,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } /* Unmap any existing mapping in the area */
- if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
- if (do_vmi_munmap(&vmi, mm, user_addr, len, uf, false)) return -ENOMEM;
/* @@ -2643,7 +2696,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, next = vma_next(&vmi); prev = vma_prev(&vmi);
- if (vm_flags & VM_SPECIAL)
- if (vm_flags & (VM_SPECIAL | VM_PCUABI_RESERVE))
This effectively prevents merging mappings completely in PCuABI. This is too coarse: we only want to prevent merging mappings that belong to different reservations.
Kevin
goto cannot_expand;
/* Attempt to expand an old mapping */ [...]