@@ -27,6 +27,8 @@ use core::cmp::min;
27
27
use core:: mem:: size_of;
28
28
use { RngCore , BlockRngCore , CryptoRng , SeedableRng , Error } ;
29
29
30
+ #[ cfg( feature="serde1" ) ] use serde:: { Serialize , Deserialize } ;
31
+
30
32
/// Implement `next_u64` via `next_u32`, little-endian order.
31
33
pub fn next_u64_via_u32 < R : RngCore + ?Sized > ( rng : & mut R ) -> u64 {
32
34
// Use LE; we explicitly generate one value before the next.
@@ -184,10 +186,14 @@ pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
184
186
/// [`RngCore`]: ../RngCore.t.html
185
187
/// [`SeedableRng`]: ../SeedableRng.t.html
186
188
#[ derive( Clone ) ]
189
+ #[ cfg_attr( feature="serde1" , derive( Serialize , Deserialize ) ) ]
187
190
pub struct BlockRng < R : BlockRngCore + ?Sized > {
188
- pub results : R :: Results ,
189
- pub index : usize ,
190
- pub core : R ,
191
+ #[ cfg_attr( feature="serde1" , serde( bound(
192
+ serialize = "R::Results: Serialize" ,
193
+ deserialize = "R::Results: Deserialize<'de>" ) ) ) ]
194
+ results : R :: Results ,
195
+ index : usize ,
196
+ core : R ,
191
197
}
192
198
193
199
// Custom Debug implementation that does not expose the contents of `results`.
@@ -201,6 +207,35 @@ impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng<R> {
201
207
}
202
208
}
203
209
210
+ impl < R : BlockRngCore > BlockRng < R > {
211
+ /// Create a new `BlockRng` from an existing RNG implementing
212
+ /// `BlockRngCore`. Results will be generated on first use.
213
+ pub fn new ( core : R ) -> BlockRng < R > {
214
+ let results_empty = R :: Results :: default ( ) ;
215
+ BlockRng {
216
+ core,
217
+ index : results_empty. as_ref ( ) . len ( ) ,
218
+ results : results_empty,
219
+ }
220
+ }
221
+
222
+ /// Return a reference the wrapped `BlockRngCore`.
223
+ pub fn inner ( & self ) -> & R {
224
+ & self . core
225
+ }
226
+
227
+ /// Return a mutable reference the wrapped `BlockRngCore`.
228
+ pub fn inner_mut ( & mut self ) -> & mut R {
229
+ & mut self . core
230
+ }
231
+
232
+ // Reset the number of available results.
233
+ // This will force a new set of results to be generated on next use.
234
+ pub fn reset ( & mut self ) {
235
+ self . index = self . results . as_ref ( ) . len ( ) ;
236
+ }
237
+ }
238
+
204
239
impl < R : BlockRngCore < Item =u32 > > RngCore for BlockRng < R >
205
240
where <R as BlockRngCore >:: Results : AsRef < [ u32 ] >
206
241
{
@@ -317,21 +352,190 @@ impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> {
317
352
type Seed = R :: Seed ;
318
353
319
354
fn from_seed ( seed : Self :: Seed ) -> Self {
355
+ Self :: new ( R :: from_seed ( seed) )
356
+ }
357
+
358
+ fn from_rng < S : RngCore > ( rng : S ) -> Result < Self , Error > {
359
+ Ok ( Self :: new ( R :: from_rng ( rng) ?) )
360
+ }
361
+ }
362
+
363
+
364
+
365
+ /// Wrapper around PRNGs that implement [`BlockRngCore`] to keep a results
366
+ /// buffer and offer the methods from [`RngCore`].
367
+ ///
368
+ /// This is similar to [`BlockRng`], but specialized for algorithms that operate
369
+ /// on `u64` values.
370
+ ///
371
+ /// [`BlockRngCore`]: ../BlockRngCore.t.html
372
+ /// [`RngCore`]: ../RngCore.t.html
373
+ /// [`BlockRng`]: struct.BlockRng.html
374
+ #[ derive( Clone ) ]
375
+ #[ cfg_attr( feature="serde1" , derive( Serialize , Deserialize ) ) ]
376
+ pub struct BlockRng64 < R : BlockRngCore + ?Sized > {
377
+ #[ cfg_attr( feature="serde1" , serde( bound(
378
+ serialize = "R::Results: Serialize" ,
379
+ deserialize = "R::Results: Deserialize<'de>" ) ) ) ]
380
+ results : R :: Results ,
381
+ index : usize ,
382
+ half_used : bool , // true if only half of the previous result is used
383
+ core : R ,
384
+ }
385
+
386
+ // Custom Debug implementation that does not expose the contents of `results`.
387
+ impl < R : BlockRngCore + fmt:: Debug > fmt:: Debug for BlockRng64 < R > {
388
+ fn fmt ( & self , fmt : & mut fmt:: Formatter ) -> fmt:: Result {
389
+ fmt. debug_struct ( "BlockRng64" )
390
+ . field ( "core" , & self . core )
391
+ . field ( "result_len" , & self . results . as_ref ( ) . len ( ) )
392
+ . field ( "index" , & self . index )
393
+ . field ( "half_used" , & self . half_used )
394
+ . finish ( )
395
+ }
396
+ }
397
+
398
+ impl < R : BlockRngCore > BlockRng64 < R > {
399
+ /// Create a new `BlockRng` from an existing RNG implementing
400
+ /// `BlockRngCore`. Results will be generated on first use.
401
+ pub fn new ( core : R ) -> BlockRng64 < R > {
320
402
let results_empty = R :: Results :: default ( ) ;
321
- Self {
322
- core : R :: from_seed ( seed) ,
323
- index : results_empty. as_ref ( ) . len ( ) , // generate on first use
403
+ BlockRng64 {
404
+ core,
405
+ index : results_empty. as_ref ( ) . len ( ) ,
406
+ half_used : false ,
324
407
results : results_empty,
325
408
}
326
409
}
327
410
411
+ /// Return a mutable reference the wrapped `BlockRngCore`.
412
+ pub fn inner ( & mut self ) -> & mut R {
413
+ & mut self . core
414
+ }
415
+
416
+ // Reset the number of available results.
417
+ // This will force a new set of results to be generated on next use.
418
+ pub fn reset ( & mut self ) {
419
+ self . index = self . results . as_ref ( ) . len ( ) ;
420
+ }
421
+ }
422
+
423
+ impl < R : BlockRngCore < Item =u64 > > RngCore for BlockRng64 < R >
424
+ where <R as BlockRngCore >:: Results : AsRef < [ u64 ] >
425
+ {
426
+ #[ inline( always) ]
427
+ fn next_u32 ( & mut self ) -> u32 {
428
+ let mut index = self . index * 2 - self . half_used as usize ;
429
+ if index >= self . results . as_ref ( ) . len ( ) * 2 {
430
+ self . core . generate ( & mut self . results ) ;
431
+ self . index = 0 ;
432
+ // `self.half_used` is by definition `false`
433
+ self . half_used = false ;
434
+ index = 0 ;
435
+ }
436
+
437
+ self . half_used = !self . half_used ;
438
+ self . index += self . half_used as usize ;
439
+
440
+ // Index as if this is a u32 slice.
441
+ unsafe {
442
+ let results =
443
+ & * ( self . results . as_ref ( ) as * const [ u64 ] as * const [ u32 ] ) ;
444
+ if cfg ! ( target_endian = "little" ) {
445
+ * results. get_unchecked ( index)
446
+ } else {
447
+ * results. get_unchecked ( index ^ 1 )
448
+ }
449
+ }
450
+ }
451
+
452
+ #[ inline( always) ]
453
+ fn next_u64 ( & mut self ) -> u64 {
454
+ if self . index >= self . results . as_ref ( ) . len ( ) {
455
+ self . core . generate ( & mut self . results ) ;
456
+ self . index = 0 ;
457
+ }
458
+
459
+ let value = self . results . as_ref ( ) [ self . index ] ;
460
+ self . index += 1 ;
461
+ self . half_used = false ;
462
+ value
463
+ }
464
+
465
+ // As an optimization we try to write directly into the output buffer.
466
+ // This is only enabled for little-endian platforms where unaligned writes
467
+ // are known to be safe and fast.
468
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
469
+ fn fill_bytes ( & mut self , dest : & mut [ u8 ] ) {
470
+ let mut filled = 0 ;
471
+ self . half_used = false ;
472
+
473
+ // Continue filling from the current set of results
474
+ if self . index < self . results . as_ref ( ) . len ( ) {
475
+ let ( consumed_u64, filled_u8) =
476
+ fill_via_u64_chunks ( & self . results . as_ref ( ) [ self . index ..] ,
477
+ dest) ;
478
+
479
+ self . index += consumed_u64;
480
+ filled += filled_u8;
481
+ }
482
+
483
+ let len_remainder =
484
+ ( dest. len ( ) - filled) % ( self . results . as_ref ( ) . len ( ) * 8 ) ;
485
+ let end_direct = dest. len ( ) - len_remainder;
486
+
487
+ while filled < end_direct {
488
+ let dest_u64: & mut R :: Results = unsafe {
489
+ :: core:: mem:: transmute ( dest[ filled..] . as_mut_ptr ( ) )
490
+ } ;
491
+ self . core . generate ( dest_u64) ;
492
+ filled += self . results . as_ref ( ) . len ( ) * 8 ;
493
+ }
494
+ self . index = self . results . as_ref ( ) . len ( ) ;
495
+
496
+ if len_remainder > 0 {
497
+ self . core . generate ( & mut self . results ) ;
498
+ let ( consumed_u64, _) =
499
+ fill_via_u64_chunks ( & mut self . results . as_ref ( ) ,
500
+ & mut dest[ filled..] ) ;
501
+
502
+ self . index = consumed_u64;
503
+ }
504
+ }
505
+
506
+ #[ cfg( not( any( target_arch = "x86" , target_arch = "x86_64" ) ) ) ]
507
+ fn fill_bytes ( & mut self , dest : & mut [ u8 ] ) {
508
+ let mut read_len = 0 ;
509
+ self . half_used = false ;
510
+ while read_len < dest. len ( ) {
511
+ if self . index as usize >= self . results . as_ref ( ) . len ( ) {
512
+ self . core . generate ( & mut self . results ) ;
513
+ self . index = 0 ;
514
+ }
515
+
516
+ let ( consumed_u64, filled_u8) =
517
+ fill_via_u64_chunks ( & self . results . as_ref ( ) [ self . index as usize ..] ,
518
+ & mut dest[ read_len..] ) ;
519
+
520
+ self . index += consumed_u64;
521
+ read_len += filled_u8;
522
+ }
523
+ }
524
+
525
+ fn try_fill_bytes ( & mut self , dest : & mut [ u8 ] ) -> Result < ( ) , Error > {
526
+ Ok ( self . fill_bytes ( dest) )
527
+ }
528
+ }
529
+
530
+ impl < R : BlockRngCore + SeedableRng > SeedableRng for BlockRng64 < R > {
531
+ type Seed = R :: Seed ;
532
+
533
+ fn from_seed ( seed : Self :: Seed ) -> Self {
534
+ Self :: new ( R :: from_seed ( seed) )
535
+ }
536
+
328
537
fn from_rng < S : RngCore > ( rng : S ) -> Result < Self , Error > {
329
- let results_empty = R :: Results :: default ( ) ;
330
- Ok ( Self {
331
- core : R :: from_rng ( rng) ?,
332
- index : results_empty. as_ref ( ) . len ( ) , // generate on first use
333
- results : results_empty,
334
- } )
538
+ Ok ( Self :: new ( R :: from_rng ( rng) ?) )
335
539
}
336
540
}
337
541
0 commit comments