Skip to content

Commit

Permalink
Auto merge of #3296 - RalfJung:rustup, r=RalfJung
Browse files Browse the repository at this point in the history
Rustup, more pthread tests
  • Loading branch information
bors committed Feb 12, 2024
2 parents 877d78e + a0c0485 commit e8123e5
Show file tree
Hide file tree
Showing 8 changed files with 205 additions and 30 deletions.
2 changes: 1 addition & 1 deletion rust-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0cbef48150e1fab161b5fd147b57ceb3f9272a52
b17491c8f6d555386104dfd82004c01bfef09c95
17 changes: 6 additions & 11 deletions src/concurrency/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ struct Mutex {
lock_count: usize,
/// The queue of threads waiting for this mutex.
queue: VecDeque<ThreadId>,
/// Data race handle, this tracks the happens-before
/// Data race handle. This tracks the happens-before
/// relationship between each mutex access. It is
/// released to during unlock and acquired from during
/// locking, and therefore stores the clock of the last
Expand All @@ -92,7 +92,7 @@ struct RwLock {
writer_queue: VecDeque<ThreadId>,
/// The queue of reader threads waiting for this lock.
reader_queue: VecDeque<ThreadId>,
/// Data race handle for writers, tracks the happens-before
/// Data race handle for writers. Tracks the happens-before
/// ordering between each write access to a rwlock and is updated
/// after a sequence of concurrent readers to track the happens-
/// before ordering between the set of previous readers and
Expand All @@ -101,7 +101,7 @@ struct RwLock {
/// lock or the joined clock of the set of last threads to release
/// shared reader locks.
data_race: VClock,
/// Data race handle for readers, this is temporary storage
/// Data race handle for readers. This is temporary storage
/// for the combined happens-before ordering for between all
/// concurrent readers and the next writer, and the value
/// is stored to the main data_race variable once all
Expand All @@ -110,6 +110,7 @@ struct RwLock {
/// must load the clock of the last write and must not
/// add happens-before orderings between shared reader
/// locks.
/// This is only relevant when there is an active reader.
data_race_reader: VClock,
}

Expand Down Expand Up @@ -485,6 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
Entry::Vacant(_) => return false, // we did not even own this lock
}
if let Some(data_race) = &this.machine.data_race {
// Add this to the shared-release clock of all concurrent readers.
data_race.validate_lock_release_shared(
&mut rwlock.data_race_reader,
reader,
Expand Down Expand Up @@ -539,20 +541,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
rwlock.writer = None;
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
// Release memory to both reader and writer vector clocks
// since this writer happens-before both the union of readers once they are finished
// and the next writer
// Release memory to next lock holder.
if let Some(data_race) = &this.machine.data_race {
data_race.validate_lock_release(
&mut rwlock.data_race,
current_writer,
current_span,
);
data_race.validate_lock_release(
&mut rwlock.data_race_reader,
current_writer,
current_span,
);
}
// The thread was a writer.
//
Expand Down
31 changes: 31 additions & 0 deletions tests/fail/enum-set-discriminant-niche-variant-wrong.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#![feature(core_intrinsics)]
#![feature(custom_mir)]

use std::intrinsics::mir::*;
use std::num::NonZeroI32;

// We define our own option type so that we can control the variant indices.
#[allow(unused)]
enum Option<T> {
None, // variant 0
Some(T), // variant 1
}
use Option::*;

#[custom_mir(dialect = "runtime", phase = "optimized")]
fn set_discriminant(ptr: &mut Option<NonZeroI32>) {
mir! {
{
// We set the discriminant to `Some`, which is a NOP since this is the niched variant.
// However, the enum is actually encoding `None` currently! That's not good...
SetDiscriminant(*ptr, 1);
//~^ ERROR: trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
Return()
}
}
}

pub fn main() {
let mut v = None;
set_discriminant(&mut v);
}
20 changes: 20 additions & 0 deletions tests/fail/enum-set-discriminant-niche-variant-wrong.stderr
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
error: Undefined Behavior: trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
--> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
|
LL | SetDiscriminant(*ptr, 1);
| ^^^^^^^^^^^^^^^^^^^^^^^^ trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
|
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE:
= note: inside `set_discriminant` at $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
note: inside `main`
--> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
|
LL | set_discriminant(&mut v);
| ^^^^^^^^^^^^^^^^^^^^^^^^

note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace

error: aborting due to 1 previous error

2 changes: 1 addition & 1 deletion tests/pass-dep/concurrency/libc_pthread_cond.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ fn test_timed_wait_timeout(clock_id: i32) {
let mut now_mu: MaybeUninit<libc::timespec> = MaybeUninit::uninit();
assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0);
let now = now_mu.assume_init();
// Waiting for a second... mostly because waiting less requires mich more tricky arithmetic.
// Waiting for a second... mostly because waiting less requires much more tricky arithmetic.
// FIXME: wait less.
let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec };

Expand Down
2 changes: 1 addition & 1 deletion tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ fn test_timed_wait_timeout(clock_id: i32) {
let mut now_mu: MaybeUninit<libc::timespec> = MaybeUninit::uninit();
assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0);
let now = now_mu.assume_init();
// Waiting for a second... mostly because waiting less requires mich more tricky arithmetic.
// Waiting for a second... mostly because waiting less requires much more tricky arithmetic.
// FIXME: wait less.
let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec };

Expand Down
134 changes: 124 additions & 10 deletions tests/pass-dep/shims/pthread-sync.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,34 @@
//@ignore-target-windows: No libc on Windows
// We use `yield` to test specific interleavings, so disable automatic preemption.
//@compile-flags: -Zmiri-preemption-rate=0
#![feature(sync_unsafe_cell)]

use std::cell::SyncUnsafeCell;
use std::thread;
use std::{mem, ptr};

fn main() {
test_mutex_libc_init_recursive();
test_mutex_libc_init_normal();
test_mutex_libc_init_errorcheck();
test_rwlock_libc_static_initializer();

#[cfg(target_os = "linux")]
test_mutex_libc_static_initializer_recursive();

test_mutex();
check_rwlock_write();
check_rwlock_read_no_deadlock();
}

fn test_mutex_libc_init_recursive() {
unsafe {
let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed();
let mut attr: libc::pthread_mutexattr_t = mem::zeroed();
assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0);
assert_eq!(
libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE),
0,
);
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0);
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0);
Expand All @@ -36,7 +46,7 @@ fn test_mutex_libc_init_recursive() {

fn test_mutex_libc_init_normal() {
unsafe {
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
assert_eq!(
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, 0x12345678),
libc::EINVAL,
Expand All @@ -45,7 +55,7 @@ fn test_mutex_libc_init_normal() {
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL),
0,
);
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
Expand All @@ -58,15 +68,15 @@ fn test_mutex_libc_init_normal() {

fn test_mutex_libc_init_errorcheck() {
unsafe {
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
assert_eq!(
libc::pthread_mutexattr_settype(
&mut mutexattr as *mut _,
libc::PTHREAD_MUTEX_ERRORCHECK,
),
0,
);
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
Expand Down Expand Up @@ -98,9 +108,113 @@ fn test_mutex_libc_static_initializer_recursive() {
}
}

// Testing the behavior of std::sync::RwLock does not fully exercise the pthread rwlock shims, we
// need to go a layer deeper and test the behavior of the libc functions, because
// std::sys::unix::rwlock::RWLock itself keeps track of write_locked and num_readers.
struct SendPtr<T> {
ptr: *mut T,
}
unsafe impl<T> Send for SendPtr<T> {}
impl<T> Copy for SendPtr<T> {}
impl<T> Clone for SendPtr<T> {
fn clone(&self) -> Self {
*self
}
}

fn test_mutex() {
// Specifically *not* using `Arc` to make sure there is no synchronization apart from the mutex.
unsafe {
let data = SyncUnsafeCell::new((libc::PTHREAD_MUTEX_INITIALIZER, 0));
let ptr = SendPtr { ptr: data.get() };
let mut threads = Vec::new();

for _ in 0..3 {
let thread = thread::spawn(move || {
let ptr = ptr; // circumvent per-field closure capture
let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
assert_eq!(libc::pthread_mutex_lock(mutexptr), 0);
thread::yield_now();
(*ptr.ptr).1 += 1;
assert_eq!(libc::pthread_mutex_unlock(mutexptr), 0);
});
threads.push(thread);
}

for thread in threads {
thread.join().unwrap();
}

let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
assert_eq!(libc::pthread_mutex_trylock(mutexptr), 0);
assert_eq!((*ptr.ptr).1, 3);
}
}

fn check_rwlock_write() {
unsafe {
let data = SyncUnsafeCell::new((libc::PTHREAD_RWLOCK_INITIALIZER, 0));
let ptr = SendPtr { ptr: data.get() };
let mut threads = Vec::new();

for _ in 0..3 {
let thread = thread::spawn(move || {
let ptr = ptr; // circumvent per-field closure capture
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
assert_eq!(libc::pthread_rwlock_wrlock(rwlockptr), 0);
thread::yield_now();
(*ptr.ptr).1 += 1;
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
});
threads.push(thread);

let readthread = thread::spawn(move || {
let ptr = ptr; // circumvent per-field closure capture
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
assert_eq!(libc::pthread_rwlock_rdlock(rwlockptr), 0);
thread::yield_now();
let val = (*ptr.ptr).1;
assert!(val >= 0 && val <= 3);
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
});
threads.push(readthread);
}

for thread in threads {
thread.join().unwrap();
}

let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
assert_eq!(libc::pthread_rwlock_tryrdlock(rwlockptr), 0);
assert_eq!((*ptr.ptr).1, 3);
}
}

fn check_rwlock_read_no_deadlock() {
unsafe {
let l1 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
let l1 = SendPtr { ptr: l1.get() };
let l2 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
let l2 = SendPtr { ptr: l2.get() };

// acquire l1 and hold it until after the other thread is done
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
let handle = thread::spawn(move || {
let l1 = l1; // circumvent per-field closure capture
let l2 = l2; // circumvent per-field closure capture
// acquire l2 before the other thread
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
thread::yield_now();
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
thread::yield_now();
assert_eq!(libc::pthread_rwlock_unlock(l1.ptr), 0);
assert_eq!(libc::pthread_rwlock_unlock(l2.ptr), 0);
});
thread::yield_now();
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
handle.join().unwrap();
}
}

// std::sync::RwLock does not even used pthread_rwlock any more.
// Do some smoke testing of the API surface.
fn test_rwlock_libc_static_initializer() {
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
unsafe {
Expand Down
27 changes: 21 additions & 6 deletions tests/pass/concurrency/sync.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//@revisions: stack tree
//@[tree]compile-flags: -Zmiri-tree-borrows
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance
// We use `yield` to test specific interleavings, so disable automatic preemption.
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0

use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock};
use std::thread;
Expand Down Expand Up @@ -119,13 +120,25 @@ fn check_rwlock_write() {
let mut threads = Vec::new();

for _ in 0..3 {
let data = Arc::clone(&data);
let thread = thread::spawn(move || {
let mut data = data.write().unwrap();
thread::yield_now();
*data += 1;
let thread = thread::spawn({
let data = Arc::clone(&data);
move || {
let mut data = data.write().unwrap();
thread::yield_now();
*data += 1;
}
});
threads.push(thread);

let readthread = thread::spawn({
let data = Arc::clone(&data);
move || {
let data = data.read().unwrap();
thread::yield_now();
assert!(*data >= 0 && *data <= 3);
}
});
threads.push(readthread);
}

for thread in threads {
Expand All @@ -144,8 +157,10 @@ fn check_rwlock_read_no_deadlock() {

let l1_copy = Arc::clone(&l1);
let l2_copy = Arc::clone(&l2);
// acquire l1 and hold it until after the other thread is done
let _guard1 = l1.read().unwrap();
let handle = thread::spawn(move || {
// acquire l2 before the other thread
let _guard2 = l2_copy.read().unwrap();
thread::yield_now();
let _guard1 = l1_copy.read().unwrap();
Expand Down

0 comments on commit e8123e5

Please sign in to comment.