@@ -30,15 +30,18 @@ struct Biases {
30
30
new_key : f64 ,
31
31
/// When exercising a new commit, the probability of causing it to crash.
32
32
crash : f64 ,
33
+ /// Instead of exercising a new commit, this is a probability of executing a rollback.
34
+ rollback : f64 ,
33
35
}
34
36
35
37
impl Biases {
36
- fn new ( delete : u8 , overflow : u8 , new_key : u8 , crash : u8 ) -> Self {
38
+ fn new ( delete : u8 , overflow : u8 , new_key : u8 , crash : u8 , rollback : u8 ) -> Self {
37
39
Self {
38
40
delete : ( delete as f64 ) / 100.0 ,
39
41
overflow : ( overflow as f64 ) / 100.0 ,
40
42
new_key : ( new_key as f64 ) / 100.0 ,
41
43
crash : ( crash as f64 ) / 100.0 ,
44
+ rollback : ( rollback as f64 ) / 100.0 ,
42
45
}
43
46
}
44
47
}
@@ -68,8 +71,8 @@ struct WorkloadState {
68
71
/// If true, the size of each commit will be within 0 and self.size,
69
72
/// otherwise it will always be workload-size.
70
73
random_size : bool ,
71
- /// The values that were committed .
72
- committed : Snapshot ,
74
+ /// All committed key values, divided in Snapshots .
75
+ committed : Vec < Snapshot > ,
73
76
}
74
77
75
78
impl WorkloadState {
@@ -79,7 +82,7 @@ impl WorkloadState {
79
82
biases,
80
83
size,
81
84
random_size,
82
- committed : Snapshot :: empty ( ) ,
85
+ committed : vec ! [ Snapshot :: empty( ) ] ,
83
86
}
84
87
}
85
88
@@ -90,7 +93,7 @@ impl WorkloadState {
90
93
}
91
94
92
95
fn gen_commit ( & mut self ) -> ( Snapshot , Vec < KeyValueChange > ) {
93
- let mut snapshot = self . committed . clone ( ) ;
96
+ let mut snapshot = self . last_snapshot ( ) . clone ( ) ;
94
97
snapshot. sync_seqn += 1 ;
95
98
96
99
let num_changes = if self . random_size {
@@ -129,27 +132,27 @@ impl WorkloadState {
129
132
//
130
133
// - Pick a key that was already generated before, but generate a key that shares some bits.
131
134
let mut key = [ 0 ; 32 ] ;
132
- if !self . committed . state . is_empty ( ) && self . rng . gen_bool ( self . biases . delete ) {
135
+ if !self . last_snapshot ( ) . state . is_empty ( ) && self . rng . gen_bool ( self . biases . delete ) {
133
136
loop {
134
137
self . rng . fill_bytes ( & mut key) ;
135
- if let Some ( ( next_key, Some ( _) ) ) = self . committed . state . get_next ( & key) {
138
+ if let Some ( ( next_key, Some ( _) ) ) = self . last_snapshot ( ) . state . get_next ( & key) {
136
139
return KeyValueChange :: Delete ( * next_key) ;
137
140
}
138
141
}
139
142
}
140
143
141
- if self . committed . state . is_empty ( ) || self . rng . gen_bool ( self . biases . new_key ) {
144
+ if self . last_snapshot ( ) . state . is_empty ( ) || self . rng . gen_bool ( self . biases . new_key ) {
142
145
loop {
143
146
self . rng . fill_bytes ( & mut key) ;
144
- if !self . committed . state . contains_key ( & key) {
147
+ if !self . last_snapshot ( ) . state . contains_key ( & key) {
145
148
return KeyValueChange :: Insert ( key, self . gen_value ( ) ) ;
146
149
}
147
150
}
148
151
}
149
152
150
153
loop {
151
154
self . rng . fill_bytes ( & mut key) ;
152
- if let Some ( ( next_key, _) ) = self . committed . state . get_next ( & key) {
155
+ if let Some ( ( next_key, _) ) = self . last_snapshot ( ) . state . get_next ( & key) {
153
156
return KeyValueChange :: Insert ( * next_key, self . gen_value ( ) ) ;
154
157
}
155
158
}
@@ -174,7 +177,12 @@ impl WorkloadState {
174
177
}
175
178
176
179
fn commit ( & mut self , snapshot : Snapshot ) {
177
- self . committed = snapshot;
180
+ self . committed . push ( snapshot) ;
181
+ }
182
+
183
+ fn last_snapshot ( & self ) -> & Snapshot {
184
+ // UNWRAP: self.committed contains always at leas one empty snapshot.
185
+ self . committed . last ( ) . unwrap ( )
178
186
}
179
187
}
180
188
@@ -206,6 +214,8 @@ pub struct Workload {
206
214
ensure_changeset : bool ,
207
215
/// Whether to ensure the correctness of the state after every crash.
208
216
ensure_snapshot : bool ,
217
+ /// The max amount of blocks involved in a rollback.
218
+ max_rollback_blocks : usize ,
209
219
}
210
220
211
221
impl Workload {
@@ -227,6 +237,7 @@ impl Workload {
227
237
workload_params. overflow ,
228
238
workload_params. new_key ,
229
239
workload_params. crash ,
240
+ workload_params. rollback ,
230
241
) ;
231
242
let mut state = WorkloadState :: new (
232
243
seed,
@@ -246,6 +257,7 @@ impl Workload {
246
257
n_successfull_commit : 0 ,
247
258
ensure_changeset : workload_params. ensure_changeset ,
248
259
ensure_snapshot : workload_params. ensure_snapshot ,
260
+ max_rollback_blocks : workload_params. max_rollback_blocks ,
249
261
}
250
262
}
251
263
@@ -277,19 +289,21 @@ impl Workload {
277
289
async fn run_iteration ( & mut self ) -> Result < ( ) > {
278
290
let agent = self . agent . as_ref ( ) . unwrap ( ) ;
279
291
let rr = agent. rr ( ) . clone ( ) ;
280
- // TODO: make the choice of the exercise more sophisticated.
281
- //
282
- // - commits should be much more frequent.
283
- // - crashes should be less frequent.
284
- // - rollbacks should be less frequent.
285
- let exercise_crash = self . state . rng . gen_bool ( self . state . biases . crash ) ;
286
- if exercise_crash {
287
- trace ! ( "run_iteration, should crash" ) ;
288
- self . exercise_commit_crashing ( & rr) . await ?;
289
- } else {
292
+
293
+ if self . state . rng . gen_bool ( self . state . biases . rollback ) {
290
294
trace ! ( "run_iteration" ) ;
291
- self . exercise_commit ( & rr) . await ?;
295
+ self . exercise_rollback ( & rr) . await ?;
296
+ return Ok ( ( ) ) ;
292
297
}
298
+
299
+ if self . state . rng . gen_bool ( self . state . biases . crash ) {
300
+ trace ! ( "run_iteration" ) ;
301
+ self . exercise_commit_crashing ( & rr) . await ?;
302
+ return Ok ( ( ) ) ;
303
+ }
304
+
305
+ trace ! ( "run_iteration" ) ;
306
+ self . exercise_commit ( & rr) . await ?;
293
307
Ok ( ( ) )
294
308
}
295
309
@@ -366,6 +380,7 @@ impl Workload {
366
380
// possibilities of crashing during sync.
367
381
crash_time = ( crash_time as f64 * 0.98 ) as u64 ;
368
382
383
+ trace ! ( "exercising crash" ) ;
369
384
rr. send_request ( crate :: message:: ToAgent :: Commit (
370
385
crate :: message:: CommitPayload {
371
386
changeset : changeset. clone ( ) ,
@@ -421,7 +436,7 @@ impl Workload {
421
436
KeyValueChange :: Insert ( key, _value) => {
422
437
// The current value must be equal to the previous one.
423
438
let current_value = rr. send_request_query ( * key) . await ?;
424
- match self . state . committed . state . get ( key) {
439
+ match self . state . last_snapshot ( ) . state . get ( key) {
425
440
None | Some ( None ) if current_value. is_some ( ) => {
426
441
return Err ( anyhow:: anyhow!( "New inserted item should not be present" ) ) ;
427
442
}
@@ -435,7 +450,7 @@ impl Workload {
435
450
}
436
451
KeyValueChange :: Delete ( key) => {
437
452
// UNWRAP: Non existing keys are not deleted.
438
- let prev_value = self . state . committed . state . get ( key) . unwrap ( ) ;
453
+ let prev_value = self . state . last_snapshot ( ) . state . get ( key) . unwrap ( ) ;
439
454
assert ! ( prev_value. is_some( ) ) ;
440
455
if rr. send_request_query ( * key) . await ?. as_ref ( ) != prev_value. as_ref ( ) {
441
456
return Err ( anyhow:: anyhow!(
@@ -453,7 +468,13 @@ impl Workload {
453
468
return Ok ( ( ) ) ;
454
469
}
455
470
456
- for ( i, ( key, expected_value) ) in ( self . state . committed . state . iter ( ) ) . enumerate ( ) {
471
+ if dbg ! ( self . state. last_snapshot( ) . sync_seqn) != dbg ! ( rr. send_query_sync_seqn( ) . await ?) {
472
+ return Err ( anyhow:: anyhow!(
473
+ "Unexpected sync_seqn while ensuring snapshot validity"
474
+ ) ) ;
475
+ }
476
+
477
+ for ( i, ( key, expected_value) ) in ( self . state . last_snapshot ( ) . state . iter ( ) ) . enumerate ( ) {
457
478
let value = rr. send_request_query ( * key) . await ?;
458
479
if & value != expected_value {
459
480
return Err ( anyhow:: anyhow!(
@@ -468,14 +489,36 @@ impl Workload {
468
489
Ok ( ( ) )
469
490
}
470
491
492
+ async fn exercise_rollback ( & mut self , rr : & comms:: RequestResponse ) -> anyhow:: Result < ( ) > {
493
+ // TODO: n_blocks should also depend on the max rollback supported by nomt.
494
+ let mut n_blocks = self . state . rng . gen_range ( 1 ..self . max_rollback_blocks ) as usize ;
495
+ let blocks_avaiable_to_rollback = self . state . committed . len ( ) - 1 ;
496
+ n_blocks = std:: cmp:: min ( blocks_avaiable_to_rollback, n_blocks) ;
497
+ let last_sync_seqn = self . state . last_snapshot ( ) . sync_seqn ;
498
+ self . state
499
+ . committed
500
+ . truncate ( self . state . committed . len ( ) - n_blocks) ;
501
+ // The application of a rollback counts as increased sync_seq.
502
+ self . state . committed . last_mut ( ) . unwrap ( ) . sync_seqn = last_sync_seqn + 1 ;
503
+
504
+ trace ! ( "performing rollback of {} blocks" , n_blocks) ;
505
+ rr. send_request ( crate :: message:: ToAgent :: Rollback ( n_blocks) )
506
+ . await ?;
507
+
508
+ self . ensure_snapshot_validity ( rr) . await ?;
509
+
510
+ Ok ( ( ) )
511
+ }
512
+
471
513
async fn spawn_new_agent ( & mut self ) -> anyhow:: Result < ( ) > {
472
514
assert ! ( self . agent. is_none( ) ) ;
473
515
controller:: spawn_agent_into ( & mut self . agent ) . await ?;
474
516
let workdir = self . workdir . path ( ) . display ( ) . to_string ( ) ;
517
+ let rollback = self . state . biases . rollback > 0.0 ;
475
518
self . agent
476
519
. as_mut ( )
477
520
. unwrap ( )
478
- . init ( workdir, self . workload_id , self . bitbox_seed )
521
+ . init ( workdir, self . workload_id , self . bitbox_seed , dbg ! ( rollback ) )
479
522
. await ?;
480
523
481
524
self . ensure_snapshot_validity ( self . agent . as_ref ( ) . unwrap ( ) . rr ( ) )
0 commit comments