1
1
//! The virtual memory representation of the MIR interpreter.
2
2
3
+ use std:: borrow:: Cow ;
4
+ use std:: convert:: TryFrom ;
5
+ use std:: iter;
6
+ use std:: ops:: { Deref , DerefMut , Range } ;
7
+
8
+ use rustc_ast:: ast:: Mutability ;
9
+ use rustc_data_structures:: sorted_map:: SortedMap ;
10
+ use rustc_target:: abi:: HasDataLayout ;
11
+
3
12
use super :: {
4
13
read_target_uint, write_target_uint, AllocId , InterpResult , Pointer , Scalar , ScalarMaybeUndef ,
5
14
} ;
6
15
7
16
use crate :: ty:: layout:: { Align , Size } ;
8
17
9
- use rustc_ast:: ast:: Mutability ;
10
- use rustc_data_structures:: sorted_map:: SortedMap ;
11
- use rustc_target:: abi:: HasDataLayout ;
12
- use std:: borrow:: Cow ;
13
- use std:: iter;
14
- use std:: ops:: { Deref , DerefMut , Range } ;
15
-
16
18
// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
17
19
// `src/librustc_mir/interpret/snapshot.rs`.
18
20
#[ derive( Clone , Debug , Eq , PartialEq , PartialOrd , Ord , Hash , RustcEncodable , RustcDecodable ) ]
@@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
90
92
/// Creates a read-only allocation initialized by the given bytes
91
93
pub fn from_bytes < ' a > ( slice : impl Into < Cow < ' a , [ u8 ] > > , align : Align ) -> Self {
92
94
let bytes = slice. into ( ) . into_owned ( ) ;
93
- let size = Size :: from_bytes ( bytes. len ( ) as u64 ) ;
95
+ let size = Size :: from_bytes ( bytes. len ( ) ) ;
94
96
Self {
95
97
bytes,
96
98
relocations : Relocations :: new ( ) ,
@@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
107
109
}
108
110
109
111
pub fn undef ( size : Size , align : Align ) -> Self {
110
- assert_eq ! ( size. bytes( ) as usize as u64 , size. bytes( ) ) ;
111
112
Allocation {
112
- bytes : vec ! [ 0 ; size. bytes ( ) as usize ] ,
113
+ bytes : vec ! [ 0 ; size. bytes_usize ( ) ] ,
113
114
relocations : Relocations :: new ( ) ,
114
115
undef_mask : UndefMask :: new ( size, false ) ,
115
116
size,
@@ -152,7 +153,7 @@ impl Allocation<(), ()> {
152
153
/// Raw accessors. Provide access to otherwise private bytes.
153
154
impl < Tag , Extra > Allocation < Tag , Extra > {
154
155
pub fn len ( & self ) -> usize {
155
- self . size . bytes ( ) as usize
156
+ self . size . bytes_usize ( )
156
157
}
157
158
158
159
/// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -183,20 +184,15 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
183
184
#[ inline]
184
185
fn check_bounds ( & self , offset : Size , size : Size ) -> Range < usize > {
185
186
let end = offset + size; // This does overflow checking.
186
- assert_eq ! (
187
- end. bytes( ) as usize as u64 ,
188
- end. bytes( ) ,
189
- "cannot handle this access on this host architecture"
190
- ) ;
191
- let end = end. bytes ( ) as usize ;
187
+ let end = usize:: try_from ( end. bytes ( ) ) . expect ( "access too big for this host architecture" ) ;
192
188
assert ! (
193
189
end <= self . len( ) ,
194
190
"Out-of-bounds access at offset {}, size {} in allocation of size {}" ,
195
191
offset. bytes( ) ,
196
192
size. bytes( ) ,
197
193
self . len( )
198
194
) ;
199
- ( offset. bytes ( ) as usize ) ..end
195
+ offset. bytes_usize ( ) ..end
200
196
}
201
197
202
198
/// The last argument controls whether we error out when there are undefined
@@ -294,11 +290,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
294
290
cx : & impl HasDataLayout ,
295
291
ptr : Pointer < Tag > ,
296
292
) -> InterpResult < ' tcx , & [ u8 ] > {
297
- assert_eq ! ( ptr. offset. bytes( ) as usize as u64 , ptr. offset. bytes( ) ) ;
298
- let offset = ptr. offset . bytes ( ) as usize ;
293
+ let offset = ptr. offset . bytes_usize ( ) ;
299
294
Ok ( match self . bytes [ offset..] . iter ( ) . position ( |& c| c == 0 ) {
300
295
Some ( size) => {
301
- let size_with_null = Size :: from_bytes ( ( size + 1 ) as u64 ) ;
296
+ let size_with_null = Size :: from_bytes ( size) + Size :: from_bytes ( 1 ) ;
302
297
// Go through `get_bytes` for checks and AllocationExtra hooks.
303
298
// We read the null, so we include it in the request, but we want it removed
304
299
// from the result, so we do subslicing.
@@ -343,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
343
338
let ( lower, upper) = src. size_hint ( ) ;
344
339
let len = upper. expect ( "can only write bounded iterators" ) ;
345
340
assert_eq ! ( lower, len, "can only write iterators with a precise length" ) ;
346
- let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len as u64 ) ) ?;
341
+ let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len) ) ?;
347
342
// `zip` would stop when the first iterator ends; we want to definitely
348
343
// cover all of `bytes`.
349
344
for dest in bytes {
@@ -386,7 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
386
381
} else {
387
382
match self . relocations . get ( & ptr. offset ) {
388
383
Some ( & ( tag, alloc_id) ) => {
389
- let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits as u64 ) , tag) ;
384
+ let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits) , tag) ;
390
385
return Ok ( ScalarMaybeUndef :: Scalar ( ptr. into ( ) ) ) ;
391
386
}
392
387
None => { }
@@ -433,7 +428,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
433
428
} ;
434
429
435
430
let bytes = match val. to_bits_or_ptr ( type_size, cx) {
436
- Err ( val) => val. offset . bytes ( ) as u128 ,
431
+ Err ( val) => u128 :: from ( val. offset . bytes ( ) ) ,
437
432
Ok ( data) => data,
438
433
} ;
439
434
@@ -524,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
524
519
)
525
520
} ;
526
521
let start = ptr. offset ;
527
- let end = start + size;
522
+ let end = start + size; // `Size` addition
528
523
529
524
// Mark parts of the outermost relocations as undefined if they partially fall outside the
530
525
// given range.
@@ -563,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
563
558
#[ inline]
564
559
fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
565
560
self . undef_mask
566
- . is_range_defined ( ptr. offset , ptr. offset + size)
561
+ . is_range_defined ( ptr. offset , ptr. offset + size) // `Size` addition
567
562
. or_else ( |idx| throw_ub ! ( InvalidUndefBytes ( Some ( Pointer :: new( ptr. alloc_id, idx) ) ) ) )
568
563
}
569
564
@@ -643,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
643
638
if defined. ranges . len ( ) <= 1 {
644
639
self . undef_mask . set_range_inbounds (
645
640
dest. offset ,
646
- dest. offset + size * repeat,
641
+ dest. offset + size * repeat, // `Size` operations
647
642
defined. initial ,
648
643
) ;
649
644
return ;
@@ -721,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
721
716
for i in 0 ..length {
722
717
new_relocations. extend ( relocations. iter ( ) . map ( |& ( offset, reloc) | {
723
718
// compute offset for current repetition
724
- let dest_offset = dest. offset + ( i * size ) ;
719
+ let dest_offset = dest. offset + size * i ; // `Size` operations
725
720
(
726
721
// shift offsets from source allocation to destination allocation
727
- offset + dest_offset - src. offset ,
722
+ ( offset + dest_offset) - src. offset , // `Size` operations
728
723
reloc,
729
724
)
730
725
} ) ) ;
@@ -861,18 +856,18 @@ impl UndefMask {
861
856
if amount. bytes ( ) == 0 {
862
857
return ;
863
858
}
864
- let unused_trailing_bits = self . blocks . len ( ) as u64 * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
859
+ let unused_trailing_bits =
860
+ u64:: try_from ( self . blocks . len ( ) ) . unwrap ( ) * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
865
861
if amount. bytes ( ) > unused_trailing_bits {
866
862
let additional_blocks = amount. bytes ( ) / Self :: BLOCK_SIZE + 1 ;
867
- assert_eq ! ( additional_blocks as usize as u64 , additional_blocks) ;
868
863
self . blocks . extend (
869
864
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
870
- iter:: repeat ( 0 ) . take ( additional_blocks as usize ) ,
865
+ iter:: repeat ( 0 ) . take ( usize:: try_from ( additional_blocks ) . unwrap ( ) ) ,
871
866
) ;
872
867
}
873
868
let start = self . len ;
874
869
self . len += amount;
875
- self . set_range_inbounds ( start, start + amount, new_state) ;
870
+ self . set_range_inbounds ( start, start + amount, new_state) ; // `Size` operation
876
871
}
877
872
}
878
873
@@ -881,7 +876,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
881
876
let bits = bits. bytes ( ) ;
882
877
let a = bits / UndefMask :: BLOCK_SIZE ;
883
878
let b = bits % UndefMask :: BLOCK_SIZE ;
884
- assert_eq ! ( a as usize as u64 , a) ;
885
- assert_eq ! ( b as usize as u64 , b) ;
886
- ( a as usize , b as usize )
879
+ ( usize:: try_from ( a) . unwrap ( ) , usize:: try_from ( b) . unwrap ( ) )
887
880
}
0 commit comments