Skip to content

Commit

Permalink
[CHERI] Add tests for __atomic_always_lock_free(sizeof(uintptr_t))
Browse files Browse the repository at this point in the history
We don't yet return true here for purecap CHERI since right now clang
will emit libcalls for non-capability sizeof(uintptr_t) operations
such as __int128 for 64-bit RISC-V.
  • Loading branch information
arichardson committed Sep 22, 2023
1 parent fb9ffe2 commit 3f32365
Show file tree
Hide file tree
Showing 2 changed files with 306 additions and 0 deletions.
281 changes: 281 additions & 0 deletions clang/test/CodeGen/cheri/atomic-lock-free.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature
/// Check that we emit inline atomics rather than library calls for capability-size atomics
// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP64
// RUN: %riscv64_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID64
// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=PURECAP32
// RUN: %riscv32_cheri_cc1 -target-feature +a %s -emit-llvm -o - -disable-O0-optnone -verify | opt -S -mem2reg | FileCheck %s --check-prefixes=HYBRID32

#if __CHERI_CAPABILITY_WIDTH__ == 64
typedef __INT64_TYPE__ cap_size_int;
#else
typedef __int128 cap_size_int;
#endif

// PURECAP64-LABEL: define {{[^@]+}}@load_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr addrspace(200) [[L]] seq_cst, align 8
// PURECAP64-NEXT: ret i64 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@load_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[L]] seq_cst, align 8
// HYBRID64-NEXT: ret i64 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@load_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr addrspace(200) [[L]] seq_cst, align 4
// PURECAP32-NEXT: ret i32 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@load_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0:[0-9]+]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[L]] seq_cst, align 4
// HYBRID32-NEXT: ret i32 [[TMP0]]
//
long load_long(long* l) {
return __atomic_load_n(l, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@load_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 16
// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@load_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 16
// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@load_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[I]] seq_cst, align 8
// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@load_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[I]] seq_cst, align 8
// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
__intcap load_cap(__intcap* i) {
return __atomic_load_n(i, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@loadi128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200)
// PURECAP64-NEXT: call void @__atomic_load(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16
// PURECAP64-NEXT: ret i128 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@loadi128
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: call void @__atomic_load(i64 noundef 16, ptr noundef [[I]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
// HYBRID64-NEXT: ret i128 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@loadi128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr addrspace(200) noundef [[I]], i32 noundef 5)
// PURECAP32-NEXT: ret i64 [[CALL]]
//
// HYBRID32-LABEL: define {{[^@]+}}@loadi128
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_load_8(ptr noundef [[I]], i32 noundef 5)
// HYBRID32-NEXT: ret i64 [[CALL]]
//
cap_size_int loadi128(cap_size_int* i) {
return __atomic_load_n(i, __ATOMIC_SEQ_CST);
// expected-warning@-1{{large atomic operation may incur significant performance penalty}}
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i64 [[VAL]] seq_cst, align 8
// PURECAP64-NEXT: ret i64 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i64 [[VAL]] seq_cst, align 8
// HYBRID64-NEXT: ret i64 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[L]], i32 [[VAL]] seq_cst, align 4
// PURECAP32-NEXT: ret i32 [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]], i32 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[L]], i32 [[VAL]] seq_cst, align 4
// HYBRID32-NEXT: ret i32 [[TMP0]]
//
long xchg_long(long* l, long val) {
return __atomic_exchange_n(l, val, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16
// PURECAP64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 16
// HYBRID64-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr addrspace(200) [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8
// PURECAP32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]], ptr addrspace(200) noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[I]], ptr addrspace(200) [[VAL]] seq_cst, align 8
// HYBRID32-NEXT: ret ptr addrspace(200) [[TMP0]]
//
__intcap xchg_cap(__intcap* i, __intcap val) {
return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST);
}

// PURECAP64-LABEL: define {{[^@]+}}@xchg_i128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16, addrspace(200)
// PURECAP64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16, addrspace(200)
// PURECAP64-NEXT: store i128 [[VAL]], ptr addrspace(200) [[DOTATOMICTMP]], align 16
// PURECAP64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr addrspace(200) noundef [[I]], ptr addrspace(200) noundef [[DOTATOMICTMP]], ptr addrspace(200) noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// PURECAP64-NEXT: [[TMP0:%.*]] = load i128, ptr addrspace(200) [[ATOMIC_TEMP]], align 16
// PURECAP64-NEXT: ret i128 [[TMP0]]
//
// HYBRID64-LABEL: define {{[^@]+}}@xchg_i128
// HYBRID64-SAME: (ptr noundef [[I:%.*]], i128 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// HYBRID64-NEXT: store i128 [[VAL]], ptr [[DOTATOMICTMP]], align 16
// HYBRID64-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr noundef [[I]], ptr noundef [[DOTATOMICTMP]], ptr noundef [[ATOMIC_TEMP]], i32 noundef signext 5)
// HYBRID64-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 16
// HYBRID64-NEXT: ret i128 [[TMP0]]
//
// PURECAP32-LABEL: define {{[^@]+}}@xchg_i128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr addrspace(200) noundef [[I]], i64 noundef [[VAL]], i32 noundef 5)
// PURECAP32-NEXT: ret i64 [[CALL]]
//
// HYBRID32-LABEL: define {{[^@]+}}@xchg_i128
// HYBRID32-SAME: (ptr noundef [[I:%.*]], i64 noundef [[VAL:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(ptr noundef [[I]], i64 noundef [[VAL]], i32 noundef 5)
// HYBRID32-NEXT: ret i64 [[CALL]]
//
cap_size_int xchg_i128(cap_size_int* i, cap_size_int val) {
return __atomic_exchange_n(i, val, __ATOMIC_SEQ_CST);
// expected-warning@-1{{large atomic operation may incur significant performance penalty}}
}

// PURECAP64-LABEL: define {{[^@]+}}@lock_free_long
// PURECAP64-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: ret i1 true
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_long
// HYBRID64-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: ret i1 true
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_long
// PURECAP32-SAME: (ptr addrspace(200) noundef [[L:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: ret i1 true
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_long
// HYBRID32-SAME: (ptr noundef [[L:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: ret i1 true
//
_Bool lock_free_long(long* l) {
_Static_assert(__atomic_always_lock_free(sizeof(*l), 0), "");
return __atomic_is_lock_free(sizeof(*l), l);
}

//
// FIXME: should return true here
// PURECAP64-LABEL: define {{[^@]+}}@lock_free_cap
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]])
// PURECAP64-NEXT: ret i1 [[CALL]]
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_cap
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]])
// HYBRID64-NEXT: ret i1 [[CALL]]
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_cap
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]])
// PURECAP32-NEXT: ret i1 [[CALL]]
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_cap
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]])
// HYBRID32-NEXT: ret i1 [[CALL]]
//
_Bool lock_free_cap(__intcap* i) {
// TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), "");
return __atomic_is_lock_free(sizeof(*i), i);
}

//
// FIXME: should return true here
// PURECAP64-LABEL: define {{[^@]+}}@lock_free_i128
// PURECAP64-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP64-NEXT: entry:
// PURECAP64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr addrspace(200) noundef [[I]])
// PURECAP64-NEXT: ret i1 [[CALL]]
//
// HYBRID64-LABEL: define {{[^@]+}}@lock_free_i128
// HYBRID64-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID64-NEXT: entry:
// HYBRID64-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef [[I]])
// HYBRID64-NEXT: ret i1 [[CALL]]
//
// PURECAP32-LABEL: define {{[^@]+}}@lock_free_i128
// PURECAP32-SAME: (ptr addrspace(200) noundef [[I:%.*]]) addrspace(200) #[[ATTR0]] {
// PURECAP32-NEXT: entry:
// PURECAP32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr addrspace(200) noundef [[I]])
// PURECAP32-NEXT: ret i1 [[CALL]]
//
// HYBRID32-LABEL: define {{[^@]+}}@lock_free_i128
// HYBRID32-SAME: (ptr noundef [[I:%.*]]) #[[ATTR0]] {
// HYBRID32-NEXT: entry:
// HYBRID32-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_is_lock_free(i32 noundef 8, ptr noundef [[I]])
// HYBRID32-NEXT: ret i1 [[CALL]]
//
_Bool lock_free_i128(cap_size_int* i) {
// TODO: _Static_assert(__atomic_always_lock_free(sizeof(*i), 0), "");
return __atomic_is_lock_free(sizeof(*i), i);
}
25 changes: 25 additions & 0 deletions clang/test/Sema/cheri/atomic-lock-free.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/// Check that we report true for __atomic_always_lock_free(sizeof(uintptr_t)).
/// For example libc++'s std::atomic includes a is_always_lock_free member defined as
/// static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0);
/// This was incorrectly being set to false for purecap std::atomic<uintptr_t>.
/// Ideally the builtin would take a type rather than a size but unfortunately it's too late to change that.
/// See also CodeGen/cheri/atomic-lock-free.c to show that we generate the appropriate code.
// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap
// RUN: %riscv64_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid
// RUN: %riscv32_cheri_purecap_cc1 -target-feature +a %s -fsyntax-only -verify=purecap
// RUN: %riscv32_cheri_cc1 -target-feature +a %s -fsyntax-only -verify=hybrid

_Static_assert(__atomic_always_lock_free(sizeof(char), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(short), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(int), 0), "");
_Static_assert(__atomic_always_lock_free(sizeof(__INTPTR_TYPE__), 0), "");
// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(__intcap), 0)'}}
_Static_assert(__atomic_always_lock_free(sizeof(__UINTPTR_TYPE__), 0), "");
// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(unsigned __intcap), 0)'}}
_Static_assert(__atomic_always_lock_free(sizeof(void *), 0), "");
// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}}
/// TODO: it would be nice if hybrid mode also allowed lock-free sizeof(void * __capability)
/// but this is not currently true since atomic RMW/CMPXCHG with capability
/// pointers are not supported.
_Static_assert(__atomic_always_lock_free(sizeof(void * __capability), 0), ""); // hybrid-error{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void * __capability), 0)'}}
// FIXME: purecap-error@-1{{static assertion failed due to requirement '__atomic_always_lock_free(sizeof(void *), 0)'}}

0 comments on commit 3f32365

Please sign in to comment.