From 993fb934640b7e514f3c629c33a2698a83ed8c3e Mon Sep 17 00:00:00 2001 From: Corey Farwell Date: Sun, 19 Aug 2018 22:16:22 -0400 Subject: [PATCH] Replace usages of ptr::offset with ptr::{add,sub}. --- src/liballoc/alloc.rs | 2 +- src/liballoc/boxed.rs | 2 +- src/liballoc/collections/btree/node.rs | 48 +++++++++---------- src/liballoc/collections/vec_deque.rs | 42 ++++++++-------- src/liballoc/raw_vec.rs | 4 +- src/liballoc/rc.rs | 2 +- src/liballoc/slice.rs | 8 ++-- src/liballoc/string.rs | 14 +++--- src/liballoc/sync.rs | 2 +- src/liballoc/vec.rs | 32 ++++++------- src/liballoc_system/lib.rs | 2 +- src/libarena/lib.rs | 4 +- src/libcore/intrinsics.rs | 2 +- src/libcore/marker.rs | 2 +- src/libcore/ptr.rs | 16 +++---- src/libcore/slice/memchr.rs | 4 +- src/libcore/slice/mod.rs | 46 +++++++++--------- src/libcore/slice/rotate.rs | 14 +++--- src/libcore/slice/sort.rs | 6 +-- src/libcore/str/mod.rs | 14 +++--- src/libpanic_unwind/dwarf/mod.rs | 2 +- src/librustc_data_structures/array_vec.rs | 6 +-- src/librustc_data_structures/small_vec.rs | 2 +- src/libstd/collections/hash/table.rs | 4 +- src/libstd/sys/windows/pipe.rs | 2 +- src/libstd/sys_common/wtf8.rs | 2 +- ...thod-mut-self-modifies-mut-slice-lvalue.rs | 2 +- src/test/run-pass/realloc-16687.rs | 8 ++-- src/test/run-pass/running-with-no-runtime.rs | 2 +- 29 files changed, 148 insertions(+), 148 deletions(-) diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index c69b2fb5e1c2a..3bd0c243b39ac 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -245,7 +245,7 @@ mod tests { .unwrap_or_else(|_| handle_alloc_error(layout)); let mut i = ptr.cast::().as_ptr(); - let end = i.offset(layout.size() as isize); + let end = i.add(layout.size()); while i < end { assert_eq!(*i, 0); i = i.offset(1); diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 08db5136d0404..7e6cd902bb3d1 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -704,7 +704,7 @@ impl Clone for Box<[T]> { impl Drop for BoxBuilder { fn drop(&mut self) { let mut data = self.data.ptr(); - let max = unsafe { data.offset(self.len as isize) }; + let max = unsafe { data.add(self.len) }; while data != max { unsafe { diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs index 0ae45b3123259..0315545262b6b 100644 --- a/src/liballoc/collections/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -1151,12 +1151,12 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> let new_len = self.node.len() - self.idx - 1; ptr::copy_nonoverlapping( - self.node.keys().as_ptr().offset(self.idx as isize + 1), + self.node.keys().as_ptr().add(self.idx + 1), new_node.keys.as_mut_ptr(), new_len ); ptr::copy_nonoverlapping( - self.node.vals().as_ptr().offset(self.idx as isize + 1), + self.node.vals().as_ptr().add(self.idx + 1), new_node.vals.as_mut_ptr(), new_len ); @@ -1209,17 +1209,17 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: let new_len = self.node.len() - self.idx - 1; ptr::copy_nonoverlapping( - self.node.keys().as_ptr().offset(self.idx as isize + 1), + self.node.keys().as_ptr().add(self.idx + 1), new_node.data.keys.as_mut_ptr(), new_len ); ptr::copy_nonoverlapping( - self.node.vals().as_ptr().offset(self.idx as isize + 1), + self.node.vals().as_ptr().add(self.idx + 1), new_node.data.vals.as_mut_ptr(), new_len ); ptr::copy_nonoverlapping( - self.node.as_internal().edges.as_ptr().offset(self.idx as isize + 1), + self.node.as_internal().edges.as_ptr().add(self.idx + 1), new_node.edges.as_mut_ptr(), new_len + 1 ); @@ -1283,14 +1283,14 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: slice_remove(self.node.keys_mut(), self.idx)); ptr::copy_nonoverlapping( right_node.keys().as_ptr(), - left_node.keys_mut().as_mut_ptr().offset(left_len as isize + 1), + left_node.keys_mut().as_mut_ptr().add(left_len + 1), right_len ); ptr::write(left_node.vals_mut().get_unchecked_mut(left_len), slice_remove(self.node.vals_mut(), self.idx)); ptr::copy_nonoverlapping( right_node.vals().as_ptr(), - left_node.vals_mut().as_mut_ptr().offset(left_len as isize + 1), + left_node.vals_mut().as_mut_ptr().add(left_len + 1), right_len ); @@ -1309,7 +1309,7 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: .as_internal_mut() .edges .as_mut_ptr() - .offset(left_len as isize + 1), + .add(left_len + 1), right_len + 1 ); @@ -1394,10 +1394,10 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: // Make room for stolen elements in the right child. ptr::copy(right_kv.0, - right_kv.0.offset(count as isize), + right_kv.0.add(count), right_len); ptr::copy(right_kv.1, - right_kv.1.offset(count as isize), + right_kv.1.add(count), right_len); // Move elements from the left child to the right one. @@ -1418,7 +1418,7 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: // Make room for stolen edges. let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); ptr::copy(right_edges, - right_edges.offset(count as isize), + right_edges.add(count), right_len + 1); right.correct_childrens_parent_links(count, count + right_len + 1); @@ -1463,10 +1463,10 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: move_kv(right_kv, count - 1, parent_kv, 0, 1); // Fix right indexing - ptr::copy(right_kv.0.offset(count as isize), + ptr::copy(right_kv.0.add(count), right_kv.0, new_right_len); - ptr::copy(right_kv.1.offset(count as isize), + ptr::copy(right_kv.1.add(count), right_kv.1, new_right_len); } @@ -1480,7 +1480,7 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: // Fix right indexing. let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); - ptr::copy(right_edges.offset(count as isize), + ptr::copy(right_edges.add(count), right_edges, new_right_len + 1); right.correct_childrens_parent_links(0, new_right_len + 1); @@ -1497,11 +1497,11 @@ unsafe fn move_kv( dest: (*mut K, *mut V), dest_offset: usize, count: usize) { - ptr::copy_nonoverlapping(source.0.offset(source_offset as isize), - dest.0.offset(dest_offset as isize), + ptr::copy_nonoverlapping(source.0.add(source_offset), + dest.0.add(dest_offset), count); - ptr::copy_nonoverlapping(source.1.offset(source_offset as isize), - dest.1.offset(dest_offset as isize), + ptr::copy_nonoverlapping(source.1.add(source_offset), + dest.1.add(dest_offset), count); } @@ -1513,8 +1513,8 @@ unsafe fn move_edges( { let source_ptr = source.as_internal_mut().edges.as_mut_ptr(); let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr(); - ptr::copy_nonoverlapping(source_ptr.offset(source_offset as isize), - dest_ptr.offset(dest_offset as isize), + ptr::copy_nonoverlapping(source_ptr.add(source_offset), + dest_ptr.add(dest_offset), count); dest.correct_childrens_parent_links(dest_offset, dest_offset + count); } @@ -1604,8 +1604,8 @@ pub mod marker { unsafe fn slice_insert(slice: &mut [T], idx: usize, val: T) { ptr::copy( - slice.as_ptr().offset(idx as isize), - slice.as_mut_ptr().offset(idx as isize + 1), + slice.as_ptr().add(idx), + slice.as_mut_ptr().add(idx + 1), slice.len() - idx ); ptr::write(slice.get_unchecked_mut(idx), val); @@ -1614,8 +1614,8 @@ unsafe fn slice_insert(slice: &mut [T], idx: usize, val: T) { unsafe fn slice_remove(slice: &mut [T], idx: usize) -> T { let ret = ptr::read(slice.get_unchecked(idx)); ptr::copy( - slice.as_ptr().offset(idx as isize + 1), - slice.as_mut_ptr().offset(idx as isize), + slice.as_ptr().add(idx + 1), + slice.as_mut_ptr().add(idx), slice.len() - idx - 1 ); ret diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index 0f759bb8f0b4f..55c8a78f8d046 100644 --- a/src/liballoc/collections/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -126,13 +126,13 @@ impl VecDeque { /// Moves an element out of the buffer #[inline] unsafe fn buffer_read(&mut self, off: usize) -> T { - ptr::read(self.ptr().offset(off as isize)) + ptr::read(self.ptr().add(off)) } /// Writes an element into the buffer, moving it. #[inline] unsafe fn buffer_write(&mut self, off: usize, value: T) { - ptr::write(self.ptr().offset(off as isize), value); + ptr::write(self.ptr().add(off), value); } /// Returns `true` if and only if the buffer is at full capacity. @@ -177,8 +177,8 @@ impl VecDeque { src, len, self.cap()); - ptr::copy(self.ptr().offset(src as isize), - self.ptr().offset(dst as isize), + ptr::copy(self.ptr().add(src), + self.ptr().add(dst), len); } @@ -197,8 +197,8 @@ impl VecDeque { src, len, self.cap()); - ptr::copy_nonoverlapping(self.ptr().offset(src as isize), - self.ptr().offset(dst as isize), + ptr::copy_nonoverlapping(self.ptr().add(src), + self.ptr().add(dst), len); } @@ -436,7 +436,7 @@ impl VecDeque { pub fn get(&self, index: usize) -> Option<&T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); - unsafe { Some(&*self.ptr().offset(idx as isize)) } + unsafe { Some(&*self.ptr().add(idx)) } } else { None } @@ -465,7 +465,7 @@ impl VecDeque { pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { if index < self.len() { let idx = self.wrap_add(self.tail, index); - unsafe { Some(&mut *self.ptr().offset(idx as isize)) } + unsafe { Some(&mut *self.ptr().add(idx)) } } else { None } @@ -501,8 +501,8 @@ impl VecDeque { let ri = self.wrap_add(self.tail, i); let rj = self.wrap_add(self.tail, j); unsafe { - ptr::swap(self.ptr().offset(ri as isize), - self.ptr().offset(rj as isize)) + ptr::swap(self.ptr().add(ri), + self.ptr().add(rj)) } } @@ -1805,20 +1805,20 @@ impl VecDeque { // `at` lies in the first half. let amount_in_first = first_len - at; - ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize), + ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first); // just take all of the second half. ptr::copy_nonoverlapping(second_half.as_ptr(), - other.ptr().offset(amount_in_first as isize), + other.ptr().add(amount_in_first), second_len); } else { // `at` lies in the second half, need to factor in the elements we skipped // in the first half. let offset = at - first_len; let amount_in_second = second_len - offset; - ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize), + ptr::copy_nonoverlapping(second_half.as_ptr().add(offset), other.ptr(), amount_in_second); } @@ -2709,24 +2709,24 @@ impl From> for Vec { // Need to move the ring to the front of the buffer, as vec will expect this. if other.is_contiguous() { - ptr::copy(buf.offset(tail as isize), buf, len); + ptr::copy(buf.add(tail), buf, len); } else { if (tail - head) >= cmp::min(cap - tail, head) { // There is enough free space in the centre for the shortest block so we can // do this in at most three copy moves. if (cap - tail) > head { // right hand block is the long one; move that enough for the left - ptr::copy(buf.offset(tail as isize), - buf.offset((tail - head) as isize), + ptr::copy(buf.add(tail), + buf.add(tail - head), cap - tail); // copy left in the end - ptr::copy(buf, buf.offset((cap - head) as isize), head); + ptr::copy(buf, buf.add(cap - head), head); // shift the new thing to the start - ptr::copy(buf.offset((tail - head) as isize), buf, len); + ptr::copy(buf.add(tail - head), buf, len); } else { // left hand block is the long one, we can do it in two! - ptr::copy(buf, buf.offset((cap - tail) as isize), head); - ptr::copy(buf.offset(tail as isize), buf, cap - tail); + ptr::copy(buf, buf.add(cap - tail), head); + ptr::copy(buf.add(tail), buf, cap - tail); } } else { // Need to use N swaps to move the ring @@ -2751,7 +2751,7 @@ impl From> for Vec { for i in left_edge..right_edge { right_offset = (i - left_edge) % (cap - right_edge); let src: isize = (right_edge + right_offset) as isize; - ptr::swap(buf.offset(i as isize), buf.offset(src)); + ptr::swap(buf.add(i), buf.offset(src)); } let n_ops = right_edge - left_edge; left_edge += n_ops; diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index 4f2686abf4515..831010e3fe2c9 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -282,7 +282,7 @@ impl RawVec { /// // double would have aborted or panicked if the len exceeded /// // `isize::MAX` so this is safe to do unchecked now. /// unsafe { - /// ptr::write(self.buf.ptr().offset(self.len as isize), elem); + /// ptr::write(self.buf.ptr().add(self.len), elem); /// } /// self.len += 1; /// } @@ -487,7 +487,7 @@ impl RawVec { /// // `isize::MAX` so this is safe to do unchecked now. /// for x in elems { /// unsafe { - /// ptr::write(self.buf.ptr().offset(self.len as isize), x.clone()); + /// ptr::write(self.buf.ptr().add(self.len), x.clone()); /// } /// self.len += 1; /// } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index be049eb6e5ef3..b8f1d51db464f 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -771,7 +771,7 @@ impl RcFromSlice for Rc<[T]> { }; for (i, item) in v.iter().enumerate() { - ptr::write(elems.offset(i as isize), item.clone()); + ptr::write(elems.add(i), item.clone()); guard.n_elems += 1; } diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index c27c596e7975a..9d442b3e00ca0 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -715,8 +715,8 @@ unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) { let len = v.len(); let v = v.as_mut_ptr(); - let v_mid = v.offset(mid as isize); - let v_end = v.offset(len as isize); + let v_mid = v.add(mid); + let v_end = v.add(len); // The merge process first copies the shorter run into `buf`. Then it traces the newly copied // run and the longer run forwards (or backwards), comparing their next unconsumed elements and @@ -742,7 +742,7 @@ unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) ptr::copy_nonoverlapping(v, buf, mid); hole = MergeHole { start: buf, - end: buf.offset(mid as isize), + end: buf.add(mid), dest: v, }; @@ -766,7 +766,7 @@ unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) ptr::copy_nonoverlapping(v_mid, buf, len - mid); hole = MergeHole { start: buf, - end: buf.offset((len - mid) as isize), + end: buf.add(len - mid), dest: v_mid, }; diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index dd559df08cce6..eabda7123dec0 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -1190,8 +1190,8 @@ impl String { let next = idx + ch.len_utf8(); let len = self.len(); unsafe { - ptr::copy(self.vec.as_ptr().offset(next as isize), - self.vec.as_mut_ptr().offset(idx as isize), + ptr::copy(self.vec.as_ptr().add(next), + self.vec.as_mut_ptr().add(idx), len - next); self.vec.set_len(len - (next - idx)); } @@ -1232,8 +1232,8 @@ impl String { del_bytes += ch_len; } else if del_bytes > 0 { unsafe { - ptr::copy(self.vec.as_ptr().offset(idx as isize), - self.vec.as_mut_ptr().offset((idx - del_bytes) as isize), + ptr::copy(self.vec.as_ptr().add(idx), + self.vec.as_mut_ptr().add(idx - del_bytes), ch_len); } } @@ -1289,11 +1289,11 @@ impl String { let amt = bytes.len(); self.vec.reserve(amt); - ptr::copy(self.vec.as_ptr().offset(idx as isize), - self.vec.as_mut_ptr().offset((idx + amt) as isize), + ptr::copy(self.vec.as_ptr().add(idx), + self.vec.as_mut_ptr().add(idx + amt), len - idx); ptr::copy(bytes.as_ptr(), - self.vec.as_mut_ptr().offset(idx as isize), + self.vec.as_mut_ptr().add(idx), amt); self.vec.set_len(len + amt); } diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index a00b6b4e435f0..2cd7898f4c781 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -672,7 +672,7 @@ impl ArcFromSlice for Arc<[T]> { }; for (i, item) in v.iter().enumerate() { - ptr::write(elems.offset(i as isize), item.clone()); + ptr::write(elems.add(i), item.clone()); guard.n_elems += 1; } diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index cc913dfbb4b01..e9c1a3df51891 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -692,7 +692,7 @@ impl Vec { pub fn truncate(&mut self, len: usize) { let current_len = self.len; unsafe { - let mut ptr = self.as_mut_ptr().offset(self.len as isize); + let mut ptr = self.as_mut_ptr().add(self.len); // Set the final length at the end, keeping in mind that // dropping an element might panic. Works around a missed // optimization, as seen in the following issue: @@ -856,7 +856,7 @@ impl Vec { // infallible // The spot to put the new value { - let p = self.as_mut_ptr().offset(index as isize); + let p = self.as_mut_ptr().add(index); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy(p, p.offset(1), len - index); @@ -891,7 +891,7 @@ impl Vec { let ret; { // the place we are taking from. - let ptr = self.as_mut_ptr().offset(index as isize); + let ptr = self.as_mut_ptr().add(index); // copy it out, unsafely having a copy of the value on // the stack and in the vector at the same time. ret = ptr::read(ptr); @@ -1034,8 +1034,8 @@ impl Vec { let mut w: usize = 1; while r < ln { - let p_r = p.offset(r as isize); - let p_wm1 = p.offset((w - 1) as isize); + let p_r = p.add(r); + let p_wm1 = p.add(w - 1); if !same_bucket(&mut *p_r, &mut *p_wm1) { if r != w { let p_w = p_wm1.offset(1); @@ -1072,7 +1072,7 @@ impl Vec { self.reserve(1); } unsafe { - let end = self.as_mut_ptr().offset(self.len as isize); + let end = self.as_mut_ptr().add(self.len); ptr::write(end, value); self.len += 1; } @@ -1196,7 +1196,7 @@ impl Vec { self.set_len(start); // Use the borrow in the IterMut to indicate borrowing behavior of the // whole Drain iterator (like &mut T). - let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().offset(start as isize), + let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start); Drain { tail_start: end, @@ -1290,7 +1290,7 @@ impl Vec { self.set_len(at); other.set_len(other_len); - ptr::copy_nonoverlapping(self.as_ptr().offset(at as isize), + ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len()); } @@ -1473,7 +1473,7 @@ impl Vec { self.reserve(n); unsafe { - let mut ptr = self.as_mut_ptr().offset(self.len() as isize); + let mut ptr = self.as_mut_ptr().add(self.len()); // Use SetLenOnDrop to work around bug where compiler // may not realize the store through `ptr` through self.set_len() // don't alias. @@ -1799,7 +1799,7 @@ impl IntoIterator for Vec { let end = if mem::size_of::() == 0 { arith_offset(begin as *const i8, self.len() as isize) as *const T } else { - begin.offset(self.len() as isize) as *const T + begin.add(self.len()) as *const T }; let cap = self.buf.cap(); mem::forget(self); @@ -1898,7 +1898,7 @@ impl SpecExtend for Vec if let Some(additional) = high { self.reserve(additional); unsafe { - let mut ptr = self.as_mut_ptr().offset(self.len() as isize); + let mut ptr = self.as_mut_ptr().add(self.len()); let mut local_len = SetLenOnDrop::new(&mut self.len); for element in iterator { ptr::write(ptr, element); @@ -2561,8 +2561,8 @@ impl<'a, T> Drop for Drain<'a, T> { let start = source_vec.len(); let tail = self.tail_start; if tail != start { - let src = source_vec.as_ptr().offset(tail as isize); - let dst = source_vec.as_mut_ptr().offset(start as isize); + let src = source_vec.as_ptr().add(tail); + let dst = source_vec.as_mut_ptr().add(start); ptr::copy(src, dst, self.tail_len); } source_vec.set_len(start + self.tail_len); @@ -2672,7 +2672,7 @@ impl<'a, T> Drain<'a, T> { let range_start = vec.len; let range_end = self.tail_start; let range_slice = slice::from_raw_parts_mut( - vec.as_mut_ptr().offset(range_start as isize), + vec.as_mut_ptr().add(range_start), range_end - range_start); for place in range_slice { @@ -2693,8 +2693,8 @@ impl<'a, T> Drain<'a, T> { vec.buf.reserve(used_capacity, extra_capacity); let new_tail_start = self.tail_start + extra_capacity; - let src = vec.as_ptr().offset(self.tail_start as isize); - let dst = vec.as_mut_ptr().offset(new_tail_start as isize); + let src = vec.as_ptr().add(self.tail_start); + let dst = vec.as_mut_ptr().add(new_tail_start); ptr::copy(src, dst, self.tail_len); self.tail_start = new_tail_start; } diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs index c5e056f6b12be..753b6a5e29248 100644 --- a/src/liballoc_system/lib.rs +++ b/src/liballoc_system/lib.rs @@ -249,7 +249,7 @@ mod platform { } unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { - let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize); + let aligned = ptr.add(align - (ptr as usize & (align - 1))); *get_header(aligned) = Header(ptr); aligned } diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 265721c749755..6ad703180c224 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -106,7 +106,7 @@ impl TypedArenaChunk { // A pointer as large as possible for zero-sized elements. !0 as *mut T } else { - self.start().offset(self.storage.cap() as isize) + self.start().add(self.storage.cap()) } } } @@ -179,7 +179,7 @@ impl TypedArena { unsafe { let start_ptr = self.ptr.get(); let arena_slice = slice::from_raw_parts_mut(start_ptr, slice.len()); - self.ptr.set(start_ptr.offset(arena_slice.len() as isize)); + self.ptr.set(start_ptr.add(arena_slice.len())); arena_slice.copy_from_slice(slice); arena_slice } diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 9ddf902349dd2..7756a6f71dbd6 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -918,7 +918,7 @@ extern "rust-intrinsic" { /// // treat it as "dead", and therefore, you only have two real /// // mutable slices. /// (slice::from_raw_parts_mut(ptr, mid), - /// slice::from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) + /// slice::from_raw_parts_mut(ptr.add(mid), len - mid)) /// } /// } /// ``` diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index d18e167fc3fa4..b44f6d9971b30 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -511,7 +511,7 @@ macro_rules! impls{ /// let ptr = vec.as_ptr(); /// Slice { /// start: ptr, -/// end: unsafe { ptr.offset(vec.len() as isize) }, +/// end: unsafe { ptr.add(vec.len()) }, /// phantom: PhantomData, /// } /// } diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 61033e7511253..e5b9c041799f3 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -226,8 +226,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // Declaring `t` here avoids aligning the stack when this loop is unused let mut t: Block = mem::uninitialized(); let t = &mut t as *mut _ as *mut u8; - let x = x.offset(i as isize); - let y = y.offset(i as isize); + let x = x.add(i); + let y = y.add(i); // Swap a block of bytes of x & y, using t as a temporary buffer // This should be optimized into efficient SIMD operations where available @@ -243,8 +243,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { let rem = len - i; let t = &mut t as *mut _ as *mut u8; - let x = x.offset(i as isize); - let y = y.offset(i as isize); + let x = x.add(i); + let y = y.add(i); copy_nonoverlapping(x, t, rem); copy_nonoverlapping(y, x, rem); @@ -613,7 +613,7 @@ impl *const T { /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so - /// `vec.as_ptr().offset(vec.len() as isize)` is always safe. + /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request @@ -1231,7 +1231,7 @@ impl *const T { /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::()); /// if offset < x.len() - n - 1 { - /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point @@ -1334,7 +1334,7 @@ impl *mut T { /// The compiler and standard library generally tries to ensure allocations /// never reach a size where an offset is a concern. For instance, `Vec` /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so - /// `vec.as_ptr().offset(vec.len() as isize)` is always safe. + /// `vec.as_ptr().add(vec.len())` is always safe. /// /// Most platforms fundamentally can't even construct such an allocation. /// For instance, no known 64-bit platform can ever serve a request @@ -2261,7 +2261,7 @@ impl *mut T { /// let ptr = &x[n] as *const u8; /// let offset = ptr.align_offset(align_of::()); /// if offset < x.len() - n - 1 { - /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// let u16_ptr = ptr.add(offset) as *const u16; /// assert_ne!(*u16_ptr, 500); /// } else { /// // while the pointer can be aligned via `offset`, it would point diff --git a/src/libcore/slice/memchr.rs b/src/libcore/slice/memchr.rs index c9d3c7fea9839..cf95333af9cbb 100644 --- a/src/libcore/slice/memchr.rs +++ b/src/libcore/slice/memchr.rs @@ -72,8 +72,8 @@ pub fn memchr(x: u8, text: &[u8]) -> Option { if len >= 2 * usize_bytes { while offset <= len - 2 * usize_bytes { unsafe { - let u = *(ptr.offset(offset as isize) as *const usize); - let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize); + let u = *(ptr.add(offset) as *const usize); + let v = *(ptr.add(offset + usize_bytes) as *const usize); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index a3960556d3412..6e66def240cfa 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -383,7 +383,7 @@ impl [T] { /// /// unsafe { /// for i in 0..x.len() { - /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize)); + /// assert_eq!(x.get_unchecked(i), &*x_ptr.add(i)); /// } /// } /// ``` @@ -410,7 +410,7 @@ impl [T] { /// /// unsafe { /// for i in 0..x.len() { - /// *x_ptr.offset(i as isize) += 2; + /// *x_ptr.add(i) += 2; /// } /// } /// assert_eq!(x, &[3, 4, 6]); @@ -546,9 +546,9 @@ impl [T] { assume(!ptr.is_null()); let end = if mem::size_of::() == 0 { - (ptr as *const u8).wrapping_offset(self.len() as isize) as *const T + (ptr as *const u8).wrapping_add(self.len()) as *const T } else { - ptr.offset(self.len() as isize) + ptr.add(self.len()) }; Iter { @@ -578,9 +578,9 @@ impl [T] { assume(!ptr.is_null()); let end = if mem::size_of::() == 0 { - (ptr as *mut u8).wrapping_offset(self.len() as isize) as *mut T + (ptr as *mut u8).wrapping_add(self.len()) as *mut T } else { - ptr.offset(self.len() as isize) + ptr.add(self.len()) }; IterMut { @@ -842,7 +842,7 @@ impl [T] { assert!(mid <= len); (from_raw_parts_mut(ptr, mid), - from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) + from_raw_parts_mut(ptr.add(mid), len - mid)) } } @@ -1444,7 +1444,7 @@ impl [T] { unsafe { let p = self.as_mut_ptr(); - rotate::ptr_rotate(mid, p.offset(mid as isize), k); + rotate::ptr_rotate(mid, p.add(mid), k); } } @@ -1485,7 +1485,7 @@ impl [T] { unsafe { let p = self.as_mut_ptr(); - rotate::ptr_rotate(mid, p.offset(mid as isize), k); + rotate::ptr_rotate(mid, p.add(mid), k); } } @@ -1789,7 +1789,7 @@ impl [T] { let (us_len, ts_len) = rest.align_to_offsets::(); (left, from_raw_parts(rest.as_ptr() as *const U, us_len), - from_raw_parts(rest.as_ptr().offset((rest.len() - ts_len) as isize), ts_len)) + from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len)) } } @@ -1843,7 +1843,7 @@ impl [T] { let mut_ptr = rest.as_mut_ptr(); (left, from_raw_parts_mut(mut_ptr as *mut U, us_len), - from_raw_parts_mut(mut_ptr.offset((rest.len() - ts_len) as isize), ts_len)) + from_raw_parts_mut(mut_ptr.add(rest.len() - ts_len), ts_len)) } } } @@ -2037,12 +2037,12 @@ impl SliceIndex<[T]> for usize { #[inline] unsafe fn get_unchecked(self, slice: &[T]) -> &T { - &*slice.as_ptr().offset(self as isize) + &*slice.as_ptr().add(self) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut T { - &mut *slice.as_mut_ptr().offset(self as isize) + &mut *slice.as_mut_ptr().add(self) } #[inline] @@ -2086,12 +2086,12 @@ impl SliceIndex<[T]> for ops::Range { #[inline] unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { - from_raw_parts(slice.as_ptr().offset(self.start as isize), self.end - self.start) + from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { - from_raw_parts_mut(slice.as_mut_ptr().offset(self.start as isize), self.end - self.start) + from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start) } #[inline] @@ -2467,7 +2467,7 @@ macro_rules! iterator { } // We are in bounds. `offset` does the right thing even for ZSTs. unsafe { - let elem = Some(& $( $mut_ )* *self.ptr.offset(n as isize)); + let elem = Some(& $( $mut_ )* *self.ptr.add(n)); self.post_inc_start((n as isize).wrapping_add(1)); elem } @@ -3347,7 +3347,7 @@ impl<'a, T> FusedIterator for Windows<'a, T> {} #[doc(hidden)] unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] { - from_raw_parts(self.v.as_ptr().offset(i as isize), self.size) + from_raw_parts(self.v.as_ptr().add(i), self.size) } fn may_have_side_effect() -> bool { false } } @@ -3474,7 +3474,7 @@ unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> { None => self.v.len(), Some(end) => cmp::min(end, self.v.len()), }; - from_raw_parts(self.v.as_ptr().offset(start as isize), end - start) + from_raw_parts(self.v.as_ptr().add(start), end - start) } fn may_have_side_effect() -> bool { false } } @@ -3593,7 +3593,7 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> { None => self.v.len(), Some(end) => cmp::min(end, self.v.len()), }; - from_raw_parts_mut(self.v.as_mut_ptr().offset(start as isize), end - start) + from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) } fn may_have_side_effect() -> bool { false } } @@ -3716,7 +3716,7 @@ impl<'a, T> FusedIterator for ExactChunks<'a, T> {} unsafe impl<'a, T> TrustedRandomAccess for ExactChunks<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] { let start = i * self.chunk_size; - from_raw_parts(self.v.as_ptr().offset(start as isize), self.chunk_size) + from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) } fn may_have_side_effect() -> bool { false } } @@ -3831,7 +3831,7 @@ impl<'a, T> FusedIterator for ExactChunksMut<'a, T> {} unsafe impl<'a, T> TrustedRandomAccess for ExactChunksMut<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] { let start = i * self.chunk_size; - from_raw_parts_mut(self.v.as_mut_ptr().offset(start as isize), self.chunk_size) + from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) } fn may_have_side_effect() -> bool { false } } @@ -4116,7 +4116,7 @@ impl_marker_for!(BytewiseEquality, #[doc(hidden)] unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a T { - &*self.ptr.offset(i as isize) + &*self.ptr.add(i) } fn may_have_side_effect() -> bool { false } } @@ -4124,7 +4124,7 @@ unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> { #[doc(hidden)] unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> { unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T { - &mut *self.ptr.offset(i as isize) + &mut *self.ptr.add(i) } fn may_have_side_effect() -> bool { false } } diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs index 28ef53ccb5cb6..0d182b8497452 100644 --- a/src/libcore/slice/rotate.rs +++ b/src/libcore/slice/rotate.rs @@ -77,8 +77,8 @@ pub unsafe fn ptr_rotate(mut left: usize, mid: *mut T, mut right: usize) { } ptr::swap_nonoverlapping( - mid.offset(-(left as isize)), - mid.offset((right-delta) as isize), + mid.sub(left), + mid.add(right - delta), delta); if left <= right { @@ -91,15 +91,15 @@ pub unsafe fn ptr_rotate(mut left: usize, mid: *mut T, mut right: usize) { let rawarray = RawArray::new(); let buf = rawarray.ptr(); - let dim = mid.offset(-(left as isize)).offset(right as isize); + let dim = mid.sub(left).add(right); if left <= right { - ptr::copy_nonoverlapping(mid.offset(-(left as isize)), buf, left); - ptr::copy(mid, mid.offset(-(left as isize)), right); + ptr::copy_nonoverlapping(mid.sub(left), buf, left); + ptr::copy(mid, mid.sub(left), right); ptr::copy_nonoverlapping(buf, dim, left); } else { ptr::copy_nonoverlapping(mid, buf, right); - ptr::copy(mid.offset(-(left as isize)), dim, left); - ptr::copy_nonoverlapping(buf, mid.offset(-(left as isize)), right); + ptr::copy(mid.sub(left), dim, left); + ptr::copy_nonoverlapping(buf, mid.sub(left), right); } } diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index 518d56095d601..e4c1fd03f9eb3 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -221,15 +221,15 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // 3. `end` - End pointer into the `offsets` array. // 4. `offsets - Indices of out-of-order elements within the block. - // The current block on the left side (from `l` to `l.offset(block_l)`). + // The current block on the left side (from `l` to `l.add(block_l)`). let mut l = v.as_mut_ptr(); let mut block_l = BLOCK; let mut start_l = ptr::null_mut(); let mut end_l = ptr::null_mut(); let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() }; - // The current block on the right side (from `r.offset(-block_r)` to `r`). - let mut r = unsafe { l.offset(v.len() as isize) }; + // The current block on the right side (from `r.sub(block_r)` to `r`). + let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); let mut end_r = ptr::null_mut(); diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 810d19df0c5ba..64bdf4d9f486a 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -1518,12 +1518,12 @@ fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { let ptr = v.as_ptr(); let align = unsafe { // the offset is safe, because `index` is guaranteed inbounds - ptr.offset(index as isize).align_offset(usize_bytes) + ptr.add(index).align_offset(usize_bytes) }; if align == 0 { while index < blocks_end { unsafe { - let block = ptr.offset(index as isize) as *const usize; + let block = ptr.add(index) as *const usize; // break if there is a nonascii byte let zu = contains_nonascii(*block); let zv = contains_nonascii(*block.offset(1)); @@ -1878,13 +1878,13 @@ mod traits { } #[inline] unsafe fn get_unchecked(self, slice: &str) -> &Self::Output { - let ptr = slice.as_ptr().offset(self.start as isize); + let ptr = slice.as_ptr().add(self.start); let len = self.end - self.start; super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr().offset(self.start as isize); + let ptr = slice.as_ptr().add(self.start); let len = self.end - self.start; super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len)) } @@ -1973,13 +1973,13 @@ mod traits { } #[inline] unsafe fn get_unchecked(self, slice: &str) -> &Self::Output { - let ptr = slice.as_ptr().offset(self.start as isize); + let ptr = slice.as_ptr().add(self.start); let len = slice.len() - self.start; super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr().offset(self.start as isize); + let ptr = slice.as_ptr().add(self.start); let len = slice.len() - self.start; super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len)) } @@ -2573,7 +2573,7 @@ impl str { unsafe { (from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)), from_utf8_unchecked_mut(slice::from_raw_parts_mut( - ptr.offset(mid as isize), + ptr.add(mid), len - mid ))) } diff --git a/src/libpanic_unwind/dwarf/mod.rs b/src/libpanic_unwind/dwarf/mod.rs index 5c05ac11d307d..7e0c32fe03d8a 100644 --- a/src/libpanic_unwind/dwarf/mod.rs +++ b/src/libpanic_unwind/dwarf/mod.rs @@ -38,7 +38,7 @@ impl DwarfReader { // telling the backend to generate "misalignment-safe" code. pub unsafe fn read(&mut self) -> T { let Unaligned(result) = *(self.ptr as *const Unaligned); - self.ptr = self.ptr.offset(mem::size_of::() as isize); + self.ptr = self.ptr.add(mem::size_of::()); result } diff --git a/src/librustc_data_structures/array_vec.rs b/src/librustc_data_structures/array_vec.rs index 56bb961324210..45fb565706180 100644 --- a/src/librustc_data_structures/array_vec.rs +++ b/src/librustc_data_structures/array_vec.rs @@ -139,7 +139,7 @@ impl ArrayVec { // whole Drain iterator (like &mut T). let range_slice = { let arr = &mut self.values as &mut [ManuallyDrop<::Element>]; - slice::from_raw_parts_mut(arr.as_mut_ptr().offset(start as isize), + slice::from_raw_parts_mut(arr.as_mut_ptr().add(start), end - start) }; Drain { @@ -262,8 +262,8 @@ impl<'a, A: Array> Drop for Drain<'a, A> { { let arr = &mut source_array_vec.values as &mut [ManuallyDrop<::Element>]; - let src = arr.as_ptr().offset(tail as isize); - let dst = arr.as_mut_ptr().offset(start as isize); + let src = arr.as_ptr().add(tail); + let dst = arr.as_mut_ptr().add(start); ptr::copy(src, dst, self.tail_len); }; source_array_vec.set_len(start + self.tail_len); diff --git a/src/librustc_data_structures/small_vec.rs b/src/librustc_data_structures/small_vec.rs index 6f101b20d8806..689aad25b431d 100644 --- a/src/librustc_data_structures/small_vec.rs +++ b/src/librustc_data_structures/small_vec.rs @@ -125,7 +125,7 @@ impl SmallVec { // infallible // The spot to put the new value { - let p = self.as_mut_ptr().offset(index as isize); + let p = self.as_mut_ptr().add(index); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy(p, p.offset(1), len - index); diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index 2b319186a8db2..768357ec8dc41 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -234,10 +234,10 @@ fn can_alias_safehash_as_hash() { // make a RawBucket point to invalid memory using safe code. impl RawBucket { unsafe fn hash(&self) -> *mut HashUint { - self.hash_start.offset(self.idx as isize) + self.hash_start.add(self.idx) } unsafe fn pair(&self) -> *mut (K, V) { - self.pair_start.offset(self.idx as isize) as *mut (K, V) + self.pair_start.add(self.idx) as *mut (K, V) } unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) { (self.hash(), self.pair()) diff --git a/src/libstd/sys/windows/pipe.rs b/src/libstd/sys/windows/pipe.rs index df1dd7401af61..4b19519a57a87 100644 --- a/src/libstd/sys/windows/pipe.rs +++ b/src/libstd/sys/windows/pipe.rs @@ -359,6 +359,6 @@ unsafe fn slice_to_end(v: &mut Vec) -> &mut [u8] { if v.capacity() == v.len() { v.reserve(1); } - slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), + slice::from_raw_parts_mut(v.as_mut_ptr().add(v.len()), v.capacity() - v.len()) } diff --git a/src/libstd/sys_common/wtf8.rs b/src/libstd/sys_common/wtf8.rs index 45204b56ead36..8725abe741679 100644 --- a/src/libstd/sys_common/wtf8.rs +++ b/src/libstd/sys_common/wtf8.rs @@ -772,7 +772,7 @@ pub fn is_code_point_boundary(slice: &Wtf8, index: usize) -> bool { pub unsafe fn slice_unchecked(s: &Wtf8, begin: usize, end: usize) -> &Wtf8 { // memory layout of an &[u8] and &Wtf8 are the same Wtf8::from_bytes_unchecked(slice::from_raw_parts( - s.bytes.as_ptr().offset(begin as isize), + s.bytes.as_ptr().add(begin), end - begin )) } diff --git a/src/test/run-pass/method-mut-self-modifies-mut-slice-lvalue.rs b/src/test/run-pass/method-mut-self-modifies-mut-slice-lvalue.rs index 4de8f6a719415..220482f57eb01 100644 --- a/src/test/run-pass/method-mut-self-modifies-mut-slice-lvalue.rs +++ b/src/test/run-pass/method-mut-self-modifies-mut-slice-lvalue.rs @@ -27,7 +27,7 @@ impl<'a> MyWriter for &'a mut [u8] { let write_len = buf.len(); unsafe { *self = slice::from_raw_parts_mut( - self.as_mut_ptr().offset(write_len as isize), + self.as_mut_ptr().add(write_len), self.len() - write_len ); } diff --git a/src/test/run-pass/realloc-16687.rs b/src/test/run-pass/realloc-16687.rs index 61ef386442bb9..c4cae1e1d611d 100644 --- a/src/test/run-pass/realloc-16687.rs +++ b/src/test/run-pass/realloc-16687.rs @@ -37,8 +37,8 @@ unsafe fn test_triangle() -> bool { for i in 0..COUNT / 2 { let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i)); for j in 0..size { - assert_eq!(*p0.offset(j as isize), i as u8); - assert_eq!(*p1.offset(j as isize), i as u8); + assert_eq!(*p0.add(j), i as u8); + assert_eq!(*p1.add(j), i as u8); } } } @@ -100,8 +100,8 @@ unsafe fn test_triangle() -> bool { for i in 0..COUNT / 2 { let (p0, p1, size) = (ascend[2*i], ascend[2*i+1], idx_to_size(i)); for j in 0..size { - *p0.offset(j as isize) = i as u8; - *p1.offset(j as isize) = i as u8; + *p0.add(j) = i as u8; + *p1.add(j) = i as u8; } } diff --git a/src/test/run-pass/running-with-no-runtime.rs b/src/test/run-pass/running-with-no-runtime.rs index a0b83ba4c24a5..d349519478ae4 100644 --- a/src/test/run-pass/running-with-no-runtime.rs +++ b/src/test/run-pass/running-with-no-runtime.rs @@ -36,7 +36,7 @@ fn start(argc: isize, argv: *const *const u8) -> isize { let args = unsafe { (0..argc as usize).map(|i| { - let ptr = *argv.offset(i as isize) as *const _; + let ptr = *argv.add(i) as *const _; CStr::from_ptr(ptr).to_bytes().to_vec() }).collect::>() };