From b12151c40d8ad3f1d4c45bd8bd2da9cc94076d5a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 10 Feb 2024 15:26:55 +0100 Subject: [PATCH 1/4] interpret/write_discriminant: when encoding niched variant, ensure the stored value matches --- ...um-set-discriminant-niche-variant-wrong.rs | 31 +++++++++++++++++++ ...et-discriminant-niche-variant-wrong.stderr | 20 ++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/fail/enum-set-discriminant-niche-variant-wrong.rs create mode 100644 tests/fail/enum-set-discriminant-niche-variant-wrong.stderr diff --git a/tests/fail/enum-set-discriminant-niche-variant-wrong.rs b/tests/fail/enum-set-discriminant-niche-variant-wrong.rs new file mode 100644 index 0000000000..7097aa0c43 --- /dev/null +++ b/tests/fail/enum-set-discriminant-niche-variant-wrong.rs @@ -0,0 +1,31 @@ +#![feature(core_intrinsics)] +#![feature(custom_mir)] + +use std::intrinsics::mir::*; +use std::num::NonZeroI32; + +// We define our own option type so that we can control the varian indices. +#[allow(unused)] +enum Option { + None, + Some(T), +} +use Option::*; + +#[custom_mir(dialect = "runtime", phase = "optimized")] +fn set_discriminant(ptr: &mut Option) { + mir! { + { + // We set the discriminant to `Some`, which is a NOP since this is the niched variant. + // However, the enum is actually encoding `None` currently! That's not good... + SetDiscriminant(*ptr, 1); + //~^ ERROR: trying to set discriminant of a Option> to the niched variant, but the value does not match + Return() + } + } +} + +pub fn main() { + let mut v = None; + set_discriminant(&mut v); +} diff --git a/tests/fail/enum-set-discriminant-niche-variant-wrong.stderr b/tests/fail/enum-set-discriminant-niche-variant-wrong.stderr new file mode 100644 index 0000000000..a48a0a993d --- /dev/null +++ b/tests/fail/enum-set-discriminant-niche-variant-wrong.stderr @@ -0,0 +1,20 @@ +error: Undefined Behavior: trying to set discriminant of a Option> to the niched variant, but the value does not match + --> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC + | +LL | SetDiscriminant(*ptr, 1); + | ^^^^^^^^^^^^^^^^^^^^^^^^ trying to set discriminant of a Option> to the niched variant, but the value does not match + | + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE: + = note: inside `set_discriminant` at $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC +note: inside `main` + --> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC + | +LL | set_discriminant(&mut v); + | ^^^^^^^^^^^^^^^^^^^^^^^^ + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + From bcd59fb8cc3486a39ab0a5d0ea9df9821e70c676 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Feb 2024 13:15:40 +0100 Subject: [PATCH 2/4] Preparing for merge from rustc --- rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-version b/rust-version index 11d4766b93..2c98082bc1 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -0cbef48150e1fab161b5fd147b57ceb3f9272a52 +b17491c8f6d555386104dfd82004c01bfef09c95 From e95bcfc288a389adabc32588f01dd6ac9a046598 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Feb 2024 13:19:49 +0100 Subject: [PATCH 3/4] comment tweaks --- src/concurrency/sync.rs | 6 +++--- tests/fail/enum-set-discriminant-niche-variant-wrong.rs | 6 +++--- tests/pass-dep/concurrency/libc_pthread_cond.rs | 2 +- tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs | 2 +- tests/pass-dep/shims/pthread-sync.rs | 5 ++--- 5 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 42b9161afd..5d79b9fab0 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -70,7 +70,7 @@ struct Mutex { lock_count: usize, /// The queue of threads waiting for this mutex. queue: VecDeque, - /// Data race handle, this tracks the happens-before + /// Data race handle. This tracks the happens-before /// relationship between each mutex access. It is /// released to during unlock and acquired from during /// locking, and therefore stores the clock of the last @@ -92,7 +92,7 @@ struct RwLock { writer_queue: VecDeque, /// The queue of reader threads waiting for this lock. reader_queue: VecDeque, - /// Data race handle for writers, tracks the happens-before + /// Data race handle for writers. Tracks the happens-before /// ordering between each write access to a rwlock and is updated /// after a sequence of concurrent readers to track the happens- /// before ordering between the set of previous readers and @@ -101,7 +101,7 @@ struct RwLock { /// lock or the joined clock of the set of last threads to release /// shared reader locks. data_race: VClock, - /// Data race handle for readers, this is temporary storage + /// Data race handle for readers. This is temporary storage /// for the combined happens-before ordering for between all /// concurrent readers and the next writer, and the value /// is stored to the main data_race variable once all diff --git a/tests/fail/enum-set-discriminant-niche-variant-wrong.rs b/tests/fail/enum-set-discriminant-niche-variant-wrong.rs index 7097aa0c43..428f371ca5 100644 --- a/tests/fail/enum-set-discriminant-niche-variant-wrong.rs +++ b/tests/fail/enum-set-discriminant-niche-variant-wrong.rs @@ -4,11 +4,11 @@ use std::intrinsics::mir::*; use std::num::NonZeroI32; -// We define our own option type so that we can control the varian indices. +// We define our own option type so that we can control the variant indices. #[allow(unused)] enum Option { - None, - Some(T), + None, // variant 0 + Some(T), // variant 1 } use Option::*; diff --git a/tests/pass-dep/concurrency/libc_pthread_cond.rs b/tests/pass-dep/concurrency/libc_pthread_cond.rs index b0325f7d78..f362caa11d 100644 --- a/tests/pass-dep/concurrency/libc_pthread_cond.rs +++ b/tests/pass-dep/concurrency/libc_pthread_cond.rs @@ -22,7 +22,7 @@ fn test_timed_wait_timeout(clock_id: i32) { let mut now_mu: MaybeUninit = MaybeUninit::uninit(); assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0); let now = now_mu.assume_init(); - // Waiting for a second... mostly because waiting less requires mich more tricky arithmetic. + // Waiting for a second... mostly because waiting less requires much more tricky arithmetic. // FIXME: wait less. let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; diff --git a/tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs b/tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs index 103ce44006..66c0895a5d 100644 --- a/tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs +++ b/tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs @@ -21,7 +21,7 @@ fn test_timed_wait_timeout(clock_id: i32) { let mut now_mu: MaybeUninit = MaybeUninit::uninit(); assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0); let now = now_mu.assume_init(); - // Waiting for a second... mostly because waiting less requires mich more tricky arithmetic. + // Waiting for a second... mostly because waiting less requires much more tricky arithmetic. // FIXME: wait less. let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec }; diff --git a/tests/pass-dep/shims/pthread-sync.rs b/tests/pass-dep/shims/pthread-sync.rs index 4cc5b7d68a..e812760f79 100644 --- a/tests/pass-dep/shims/pthread-sync.rs +++ b/tests/pass-dep/shims/pthread-sync.rs @@ -98,9 +98,8 @@ fn test_mutex_libc_static_initializer_recursive() { } } -// Testing the behavior of std::sync::RwLock does not fully exercise the pthread rwlock shims, we -// need to go a layer deeper and test the behavior of the libc functions, because -// std::sys::unix::rwlock::RWLock itself keeps track of write_locked and num_readers. +// std::sync::RwLock does not even used pthread_rwlock any more. +// Do some smoke testing of the API surface. fn test_rwlock_libc_static_initializer() { let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); unsafe { From a0c04859348047a202fac6bda26b235c34d3118f Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 12 Feb 2024 14:05:35 +0100 Subject: [PATCH 4/4] also test pthread_mutex/rwlock directly turns out one of the synchronizations in rwlock_writer_unlock is unnecessary --- src/concurrency/sync.rs | 11 +-- tests/pass-dep/shims/pthread-sync.rs | 129 +++++++++++++++++++++++++-- tests/pass/concurrency/sync.rs | 27 ++++-- 3 files changed, 146 insertions(+), 21 deletions(-) diff --git a/src/concurrency/sync.rs b/src/concurrency/sync.rs index 5d79b9fab0..6863119032 100644 --- a/src/concurrency/sync.rs +++ b/src/concurrency/sync.rs @@ -110,6 +110,7 @@ struct RwLock { /// must load the clock of the last write and must not /// add happens-before orderings between shared reader /// locks. + /// This is only relevant when there is an active reader. data_race_reader: VClock, } @@ -485,6 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { Entry::Vacant(_) => return false, // we did not even own this lock } if let Some(data_race) = &this.machine.data_race { + // Add this to the shared-release clock of all concurrent readers. data_race.validate_lock_release_shared( &mut rwlock.data_race_reader, reader, @@ -539,20 +541,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } rwlock.writer = None; trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer); - // Release memory to both reader and writer vector clocks - // since this writer happens-before both the union of readers once they are finished - // and the next writer + // Release memory to next lock holder. if let Some(data_race) = &this.machine.data_race { data_race.validate_lock_release( &mut rwlock.data_race, current_writer, current_span, ); - data_race.validate_lock_release( - &mut rwlock.data_race_reader, - current_writer, - current_span, - ); } // The thread was a writer. // diff --git a/tests/pass-dep/shims/pthread-sync.rs b/tests/pass-dep/shims/pthread-sync.rs index e812760f79..077bbfff16 100644 --- a/tests/pass-dep/shims/pthread-sync.rs +++ b/tests/pass-dep/shims/pthread-sync.rs @@ -1,24 +1,34 @@ //@ignore-target-windows: No libc on Windows +// We use `yield` to test specific interleavings, so disable automatic preemption. +//@compile-flags: -Zmiri-preemption-rate=0 +#![feature(sync_unsafe_cell)] + +use std::cell::SyncUnsafeCell; +use std::thread; +use std::{mem, ptr}; fn main() { test_mutex_libc_init_recursive(); test_mutex_libc_init_normal(); test_mutex_libc_init_errorcheck(); test_rwlock_libc_static_initializer(); - #[cfg(target_os = "linux")] test_mutex_libc_static_initializer_recursive(); + + test_mutex(); + check_rwlock_write(); + check_rwlock_read_no_deadlock(); } fn test_mutex_libc_init_recursive() { unsafe { - let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed(); + let mut attr: libc::pthread_mutexattr_t = mem::zeroed(); assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0); assert_eq!( libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE), 0, ); - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + let mut mutex: libc::pthread_mutex_t = mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0); assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); @@ -36,7 +46,7 @@ fn test_mutex_libc_init_recursive() { fn test_mutex_libc_init_normal() { unsafe { - let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed(); assert_eq!( libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, 0x12345678), libc::EINVAL, @@ -45,7 +55,7 @@ fn test_mutex_libc_init_normal() { libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0, ); - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + let mut mutex: libc::pthread_mutex_t = mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); @@ -58,7 +68,7 @@ fn test_mutex_libc_init_normal() { fn test_mutex_libc_init_errorcheck() { unsafe { - let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed(); assert_eq!( libc::pthread_mutexattr_settype( &mut mutexattr as *mut _, @@ -66,7 +76,7 @@ fn test_mutex_libc_init_errorcheck() { ), 0, ); - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + let mut mutex: libc::pthread_mutex_t = mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); @@ -98,6 +108,111 @@ fn test_mutex_libc_static_initializer_recursive() { } } +struct SendPtr { + ptr: *mut T, +} +unsafe impl Send for SendPtr {} +impl Copy for SendPtr {} +impl Clone for SendPtr { + fn clone(&self) -> Self { + *self + } +} + +fn test_mutex() { + // Specifically *not* using `Arc` to make sure there is no synchronization apart from the mutex. + unsafe { + let data = SyncUnsafeCell::new((libc::PTHREAD_MUTEX_INITIALIZER, 0)); + let ptr = SendPtr { ptr: data.get() }; + let mut threads = Vec::new(); + + for _ in 0..3 { + let thread = thread::spawn(move || { + let ptr = ptr; // circumvent per-field closure capture + let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0); + assert_eq!(libc::pthread_mutex_lock(mutexptr), 0); + thread::yield_now(); + (*ptr.ptr).1 += 1; + assert_eq!(libc::pthread_mutex_unlock(mutexptr), 0); + }); + threads.push(thread); + } + + for thread in threads { + thread.join().unwrap(); + } + + let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0); + assert_eq!(libc::pthread_mutex_trylock(mutexptr), 0); + assert_eq!((*ptr.ptr).1, 3); + } +} + +fn check_rwlock_write() { + unsafe { + let data = SyncUnsafeCell::new((libc::PTHREAD_RWLOCK_INITIALIZER, 0)); + let ptr = SendPtr { ptr: data.get() }; + let mut threads = Vec::new(); + + for _ in 0..3 { + let thread = thread::spawn(move || { + let ptr = ptr; // circumvent per-field closure capture + let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0); + assert_eq!(libc::pthread_rwlock_wrlock(rwlockptr), 0); + thread::yield_now(); + (*ptr.ptr).1 += 1; + assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0); + }); + threads.push(thread); + + let readthread = thread::spawn(move || { + let ptr = ptr; // circumvent per-field closure capture + let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0); + assert_eq!(libc::pthread_rwlock_rdlock(rwlockptr), 0); + thread::yield_now(); + let val = (*ptr.ptr).1; + assert!(val >= 0 && val <= 3); + assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0); + }); + threads.push(readthread); + } + + for thread in threads { + thread.join().unwrap(); + } + + let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rwlockptr), 0); + assert_eq!((*ptr.ptr).1, 3); + } +} + +fn check_rwlock_read_no_deadlock() { + unsafe { + let l1 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + let l1 = SendPtr { ptr: l1.get() }; + let l2 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + let l2 = SendPtr { ptr: l2.get() }; + + // acquire l1 and hold it until after the other thread is done + assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0); + let handle = thread::spawn(move || { + let l1 = l1; // circumvent per-field closure capture + let l2 = l2; // circumvent per-field closure capture + // acquire l2 before the other thread + assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0); + thread::yield_now(); + assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0); + thread::yield_now(); + assert_eq!(libc::pthread_rwlock_unlock(l1.ptr), 0); + assert_eq!(libc::pthread_rwlock_unlock(l2.ptr), 0); + }); + thread::yield_now(); + assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0); + handle.join().unwrap(); + } +} + // std::sync::RwLock does not even used pthread_rwlock any more. // Do some smoke testing of the API surface. fn test_rwlock_libc_static_initializer() { diff --git a/tests/pass/concurrency/sync.rs b/tests/pass/concurrency/sync.rs index e93e617fd2..1d48e5312d 100644 --- a/tests/pass/concurrency/sync.rs +++ b/tests/pass/concurrency/sync.rs @@ -1,6 +1,7 @@ //@revisions: stack tree //@[tree]compile-flags: -Zmiri-tree-borrows -//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance +// We use `yield` to test specific interleavings, so disable automatic preemption. +//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0 use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock}; use std::thread; @@ -119,13 +120,25 @@ fn check_rwlock_write() { let mut threads = Vec::new(); for _ in 0..3 { - let data = Arc::clone(&data); - let thread = thread::spawn(move || { - let mut data = data.write().unwrap(); - thread::yield_now(); - *data += 1; + let thread = thread::spawn({ + let data = Arc::clone(&data); + move || { + let mut data = data.write().unwrap(); + thread::yield_now(); + *data += 1; + } }); threads.push(thread); + + let readthread = thread::spawn({ + let data = Arc::clone(&data); + move || { + let data = data.read().unwrap(); + thread::yield_now(); + assert!(*data >= 0 && *data <= 3); + } + }); + threads.push(readthread); } for thread in threads { @@ -144,8 +157,10 @@ fn check_rwlock_read_no_deadlock() { let l1_copy = Arc::clone(&l1); let l2_copy = Arc::clone(&l2); + // acquire l1 and hold it until after the other thread is done let _guard1 = l1.read().unwrap(); let handle = thread::spawn(move || { + // acquire l2 before the other thread let _guard2 = l2_copy.read().unwrap(); thread::yield_now(); let _guard1 = l1_copy.read().unwrap();