|
22 | 22 | // that, we'll just allow that some unix targets don't use this module at all.
|
23 | 23 | #![allow(dead_code, unused_macros)]
|
24 | 24 |
|
25 |
| -use crate::ffi::{c_void, CStr}; |
| 25 | +use crate::ffi::CStr; |
26 | 26 | use crate::marker::PhantomData;
|
27 | 27 | use crate::mem;
|
28 |
| -use crate::ptr; |
29 | 28 | use crate::sync::atomic::{self, AtomicUsize, Ordering};
|
30 | 29 |
|
31 | 30 | // We can use true weak linkage on ELF targets.
|
@@ -130,25 +129,25 @@ impl<F> DlsymWeak<F> {
|
130 | 129 | // Cold because it should only happen during first-time initialization.
|
131 | 130 | #[cold]
|
132 | 131 | unsafe fn initialize(&self) -> Option<F> {
|
133 |
| - assert_eq!(mem::size_of::<F>(), mem::size_of::<*mut ()>()); |
| 132 | + assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); |
134 | 133 |
|
135 | 134 | let val = fetch(self.name);
|
136 | 135 | // This synchronizes with the acquire fence in `get`.
|
137 |
| - self.addr.store(val.addr(), Ordering::Release); |
| 136 | + self.addr.store(val, Ordering::Release); |
138 | 137 |
|
139 |
| - match val.addr() { |
| 138 | + match val { |
140 | 139 | 0 => None,
|
141 |
| - _ => Some(mem::transmute_copy::<*mut c_void, F>(&val)), |
| 140 | + addr => Some(mem::transmute_copy::<usize, F>(&addr)), |
142 | 141 | }
|
143 | 142 | }
|
144 | 143 | }
|
145 | 144 |
|
146 |
| -unsafe fn fetch(name: &str) -> *mut c_void { |
| 145 | +unsafe fn fetch(name: &str) -> usize { |
147 | 146 | let name = match CStr::from_bytes_with_nul(name.as_bytes()) {
|
148 | 147 | Ok(cstr) => cstr,
|
149 |
| - Err(..) => return ptr::null_mut(), |
| 148 | + Err(..) => return 0, |
150 | 149 | };
|
151 |
| - libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) |
| 150 | + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr()) as usize |
152 | 151 | }
|
153 | 152 |
|
154 | 153 | #[cfg(not(any(target_os = "linux", target_os = "android")))]
|
|
0 commit comments