This commit tackles the issue reported at: https://git.morello-project.org/morello/kernel/linux/-/issues/6
Commit also available at: https://git.morello-project.org/Sevenarth/linux/-/commits/morello/futex-v2
v2: - split code in 3 commits as suggested - added more details in the commit bodies - updated the TODO notation for futex.h - updated the prefix for A64/C64 definitions in futex.h - updated the asm constraint's name to follow naming conventions - updated the robust list entry fetch code to use the pre-existing helper USER_PTR_ALIGN_DOWN - reverted pointer comparisons
Luca Vizzarro (3): arm64: futex: Enable capability-based uaccess futex: Handle capability-based robust list entries futex: Add explicit capability checking TODOs
arch/arm64/include/asm/futex.h | 47 ++++++++++++++++++++++++---------- kernel/futex/core.c | 20 +++++++-------- 2 files changed, 42 insertions(+), 25 deletions(-)
When working with PCuABI we need to ensure that we receive capabilities from user space instead of pointers and perform the appropriate memory accesses.
This commit updates the arm64-specific code for the futex module so that the atomic load and store work with capabilities, hence enabling PCuABI. This commit also reverts "arm64: futex: Access user memory via raw 64-bit pointers".
Whenever a load/store faults, the exception raised will be handled by ex_handler_uaccess_err_zero(), which will correctly set -EFAULT as error and return execution past the loop.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com --- arch/arm64/include/asm/futex.h | 47 ++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 99d73e8a3175..5bd87392724c 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -10,6 +10,18 @@
#include <asm/errno.h>
+#ifdef CONFIG_CHERI_PURECAP_UABI +#define __ASM_SWITCH_TO_C64 " bx #4\n" \ + ".arch morello+c64\n" +#define __ASM_SWITCH_TO_A64 " bx #4\n" \ + ".arch morello\n" +#define __ASM_RW_UPTR_CONSTR "+C" +#else +#define __ASM_SWITCH_TO_C64 +#define __ASM_SWITCH_TO_A64 +#define __ASM_RW_UPTR_CONSTR "+r" +#endif + #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ @@ -18,20 +30,24 @@ do { \ \ uaccess_enable_privileged(); \ asm volatile( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w1, %2\n" \ + __ASM_SWITCH_TO_C64 \ +" prfm pstl1strm, [%2]\n" \ +"1: ldxr %w1, [%2]\n" \ insn "\n" \ -"2: stlxr %w0, %w3, %2\n" \ +"2: stlxr %w0, %w3, [%2]\n" \ " cbz %w0, 3f\n" \ " sub %w4, %w4, %w0\n" \ " cbnz %w4, 1b\n" \ " mov %w0, %w6\n" \ "3:\n" \ " dmb ish\n" \ + __ASM_SWITCH_TO_A64 \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \ - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ - "+r" (loops) \ + /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q + * once LLVM supports it for capabilities. */ \ + : "=&r" (ret), "=&r" (oldval), __ASM_RW_UPTR_CONSTR (uaddr), \ + "=&r" (tmp), "+r" (loops) \ : "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \ @@ -41,8 +57,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp; - /* TODO [PCuABI] - perform the access via the user capability */ - u32 *uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr)); + u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; @@ -85,20 +100,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, int ret = 0; unsigned int loops = FUTEX_MAX_LOOPS; u32 val, tmp; - u32 *uaddr; + u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT;
- /* TODO [PCuABI] - perform the access via the user capability */ - uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr)); + uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable_privileged(); asm volatile("// futex_atomic_cmpxchg_inatomic\n" -" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n" + __ASM_SWITCH_TO_C64 +" prfm pstl1strm, [%2]\n" +"1: ldxr %w1, [%2]\n" " sub %w3, %w1, %w5\n" " cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" +"2: stlxr %w3, %w6, [%2]\n" " cbz %w3, 3f\n" " sub %w4, %w4, %w3\n" " cbnz %w4, 1b\n" @@ -106,9 +121,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, "3:\n" " dmb ish\n" "4:\n" + __ASM_SWITCH_TO_A64 _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) + /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q once + * LLVM supports it for capabilities. */ + : "+r" (ret), "=&r" (val), __ASM_RW_UPTR_CONSTR (uaddr), "=&r" (tmp), + "+r" (loops) : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) : "memory"); uaccess_disable_privileged();
On 04/04/2023 12:30, Luca Vizzarro wrote:
When working with PCuABI we need to ensure that we receive capabilities from user space instead of pointers and perform the appropriate memory accesses.
This commit updates the arm64-specific code for the futex module so that the atomic load and store work with capabilities, hence enabling PCuABI. This commit also reverts "arm64: futex: Access user memory via raw
"also" might make it sound like this is unrelated, but in fact reverting this commit is a prerequisite to perform capability-based uaccess. "therefore" maybe?
64-bit pointers".
Whenever a load/store faults, the exception raised will be handled by ex_handler_uaccess_err_zero(), which will correctly set -EFAULT as error and return execution past the loop.
This is how it works in general, Morello or not. The tricky bit is C64. In fact it is not obvious why we need to switch to C64 at all, since we don't need it for normal uaccess routines. Would be nice to elaborate on both: why we need C64, and why exception handling keeps working as-is.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com
arch/arm64/include/asm/futex.h | 47 ++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 14 deletions(-)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 99d73e8a3175..5bd87392724c 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -10,6 +10,18 @@ #include <asm/errno.h> +#ifdef CONFIG_CHERI_PURECAP_UABI +#define __ASM_SWITCH_TO_C64 " bx #4\n" \
".arch morello+c64\n"
Nit: the second line is not aligned any more.
Kevin
+#define __ASM_SWITCH_TO_A64 " bx #4\n" \
".arch morello\n"
+#define __ASM_RW_UPTR_CONSTR "+C" +#else +#define __ASM_SWITCH_TO_C64 +#define __ASM_SWITCH_TO_A64 +#define __ASM_RW_UPTR_CONSTR "+r" +#endif
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ @@ -18,20 +30,24 @@ do { \ \ uaccess_enable_privileged(); \ asm volatile( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w1, %2\n" \
- __ASM_SWITCH_TO_C64 \
+" prfm pstl1strm, [%2]\n" \ +"1: ldxr %w1, [%2]\n" \ insn "\n" \ -"2: stlxr %w0, %w3, %2\n" \ +"2: stlxr %w0, %w3, [%2]\n" \ " cbz %w0, 3f\n" \ " sub %w4, %w4, %w0\n" \ " cbnz %w4, 1b\n" \ " mov %w0, %w6\n" \ "3:\n" \ " dmb ish\n" \
- __ASM_SWITCH_TO_A64 \ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
"+r" (loops) \
- /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q
* once LLVM supports it for capabilities. */ \
- : "=&r" (ret), "=&r" (oldval), __ASM_RW_UPTR_CONSTR (uaddr), \
: "r" (oparg), "Ir" (-EAGAIN) \ : "memory"); \ uaccess_disable_privileged(); \"=&r" (tmp), "+r" (loops) \
@@ -41,8 +57,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) { int oldval = 0, ret, tmp;
- /* TODO [PCuABI] - perform the access via the user capability */
- u32 *uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr));
- u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT; @@ -85,20 +100,20 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, int ret = 0; unsigned int loops = FUTEX_MAX_LOOPS; u32 val, tmp;
- u32 *uaddr;
- u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32))) return -EFAULT;
- /* TODO [PCuABI] - perform the access via the user capability */
- uaddr = (u32 *)user_ptr_addr(__uaccess_mask_ptr(_uaddr));
- uaddr = __uaccess_mask_ptr(_uaddr); uaccess_enable_privileged(); asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-" prfm pstl1strm, %2\n" -"1: ldxr %w1, %2\n"
- __ASM_SWITCH_TO_C64
+" prfm pstl1strm, [%2]\n" +"1: ldxr %w1, [%2]\n" " sub %w3, %w1, %w5\n" " cbnz %w3, 4f\n" -"2: stlxr %w3, %w6, %2\n" +"2: stlxr %w3, %w6, [%2]\n" " cbz %w3, 3f\n" " sub %w4, %w4, %w3\n" " cbnz %w4, 1b\n" @@ -106,9 +121,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, "3:\n" " dmb ish\n" "4:\n"
- __ASM_SWITCH_TO_A64 _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
- /* TODO [PCuABI]: temporary solution for uaddr. Should be reverted to +Q once
* LLVM supports it for capabilities. */
- : "+r" (ret), "=&r" (val), __ASM_RW_UPTR_CONSTR (uaddr), "=&r" (tmp),
: "r" (oldval), "r" (newval), "Ir" (-EAGAIN) : "memory"); uaccess_disable_privileged();"+r" (loops)
A robust list is implemented in the user space, but the kernel holds the right to walk it. Given that a robust list may implement the priority inheritance method, the approach chosen to inform the kernel of this feature is to encode the LSB of the uaddr of a list entry to walk from. In a PCuABI environment the uaddr is not a raw pointer but a capability and requires special handling.
This commit makes it possible that no matter the kind of uaddr received, this is decoded correctly.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com --- kernel/futex/core.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 759332a26b5a..689b8be704ae 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -31,6 +31,7 @@ * "The futexes are also cursed." * "But they come in a choice of three flavours!" */ +#include <linux/cheri.h> #include <linux/compat.h> #include <linux/jhash.h> #include <linux/pagemap.h> @@ -750,20 +751,13 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, #endif unsigned int *pi) { - unsigned long uentry; + struct robust_list __user *uentry;
- if (get_user(uentry, (unsigned long __user *)head)) + if (get_user_ptr(uentry, head)) return -EFAULT;
- /* - * TODO [PCuABI] - pointer conversion to be checked - * Each entry points to either next one or head of the list - * so this should probably operate on capabilities and use - * get_user_ptr instead, or validate the capability prior to - * get_user - */ - *entry = uaddr_to_user_ptr(uentry & ~1UL); - *pi = uentry & 1; + *entry = USER_PTR_ALIGN_DOWN(uentry, 2); + *pi = user_ptr_addr(uentry) & 1;
return 0; }
On 04/04/2023 12:30, Luca Vizzarro wrote:
A robust list is implemented in the user space, but the kernel holds the right to walk it. Given that a robust list may implement the priority inheritance method, the approach chosen to inform the kernel of this feature is to encode the LSB of the uaddr of a list entry to walk from.
The second sentence looks completely disconnected from the first (and the one after), so it's not clear what its point is. We could probably do without, as this is true regardless of the ABI. OTOH it can be useful to clarify that USER_PTR_ALIGN_DOWN() is used to clear the LSB because it preserves (capability) user pointers.
In a PCuABI environment the uaddr is not a raw pointer but a
s/pointer/address/ here too (in PCuABI a pointer is a capability by definition).
Also, did you mean "uentry" instead of "uaddr"? The code uses the former, not the latter.
capability and requires special handling.
This commit makes it possible that no matter the kind of uaddr received, this is decoded correctly.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com
kernel/futex/core.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 759332a26b5a..689b8be704ae 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -31,6 +31,7 @@
- "The futexes are also cursed."
- "But they come in a choice of three flavours!"
*/ +#include <linux/cheri.h>
Just realised, do you actually need that?
Kevin
#include <linux/compat.h> #include <linux/jhash.h> #include <linux/pagemap.h> @@ -750,20 +751,13 @@ static inline int fetch_robust_entry(struct robust_list __user **entry, #endif unsigned int *pi) {
- unsigned long uentry;
- struct robust_list __user *uentry;
- if (get_user(uentry, (unsigned long __user *)head))
- if (get_user_ptr(uentry, head)) return -EFAULT;
- /*
* TODO [PCuABI] - pointer conversion to be checked
* Each entry points to either next one or head of the list
* so this should probably operate on capabilities and use
* get_user_ptr instead, or validate the capability prior to
* get_user
*/
- *entry = uaddr_to_user_ptr(uentry & ~1UL);
- *pi = uentry & 1;
- *entry = USER_PTR_ALIGN_DOWN(uentry, 2);
- *pi = user_ptr_addr(uentry) & 1;
return 0; }
Within the futex module, there are some cases in which a raw pointer is handled. When working with PCuABI this means that the capability is discarded, and no checks can be performed by the hardware.
This commit adds TODOs whenever explicit capability checks need to be performed, right before the raw pointer is extracted.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com --- kernel/futex/core.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 689b8be704ae..dd864c0a2e03 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -227,6 +227,8 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, struct address_space *mapping; int err, ro = 0;
+ /* TODO [PCuABI] - capability checks for uaccess */ + /* * The futex address must be "naturally" aligned. */ @@ -412,6 +414,8 @@ int fault_in_user_writeable(u32 __user *uaddr) struct mm_struct *mm = current->mm; int ret;
+ /* TODO [PCuABI] - capability checks for uaccess */ + mmap_read_lock(mm); ret = fixup_user_fault(mm, user_ptr_addr(uaddr), FAULT_FLAG_WRITE, NULL);
On 04/04/2023 12:30, Luca Vizzarro wrote:
Within the futex module, there are some cases in which a raw pointer is
s/pointer/address/ (below too), could also say "user address" to disambiguate (raw kernel addresses are not a concern in our hybrid kernel).
Kevin
handled. When working with PCuABI this means that the capability is discarded, and no checks can be performed by the hardware.
This commit adds TODOs whenever explicit capability checks need to be performed, right before the raw pointer is extracted.
Signed-off-by: Luca Vizzarro Luca.Vizzarro@arm.com
kernel/futex/core.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/kernel/futex/core.c b/kernel/futex/core.c index 689b8be704ae..dd864c0a2e03 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -227,6 +227,8 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, struct address_space *mapping; int err, ro = 0;
- /* TODO [PCuABI] - capability checks for uaccess */
- /*
*/
- The futex address must be "naturally" aligned.
@@ -412,6 +414,8 @@ int fault_in_user_writeable(u32 __user *uaddr) struct mm_struct *mm = current->mm; int ret;
- /* TODO [PCuABI] - capability checks for uaccess */
- mmap_read_lock(mm); ret = fixup_user_fault(mm, user_ptr_addr(uaddr), FAULT_FLAG_WRITE, NULL);
linux-morello@op-lists.linaro.org