@@ -4,7 +4,10 @@ use crate::abis::{
4
4
};
5
5
use super::abis::tx_effect::TxEffect ;
6
6
use dep::types:: {
7
- abis ::{log_hash::ScopedLogHash , public_data_write::PublicDataWrite , sponge_blob::SpongeBlob },
7
+ abis ::{
8
+ log::Log , log_hash::ScopedLogHash , public_data_write::PublicDataWrite ,
9
+ sponge_blob::SpongeBlob ,
10
+ },
8
11
constants ::{
9
12
AZTEC_MAX_EPOCH_DURATION , CONTRACT_CLASS_LOGS_PREFIX , L2_L1_MSGS_PREFIX ,
10
13
MAX_CONTRACT_CLASS_LOGS_PER_TX , MAX_L2_TO_L1_MSGS_PER_TX , MAX_NOTE_HASHES_PER_TX ,
@@ -17,7 +20,7 @@ use dep::types::{
17
20
hash ::{accumulate_sha256 , silo_unencrypted_log_hash },
18
21
merkle_tree::VariableMerkleTree ,
19
22
traits:: is_empty ,
20
- utils ::{ arrays ::{array_concat , array_length , array_merge }, field :: field_from_bytes },
23
+ utils:: arrays ::{array_length , array_merge },
21
24
};
22
25
use blob::blob_public_inputs::BlockBlobPublicInputs ;
23
26
@@ -134,8 +137,9 @@ pub fn compute_kernel_out_hash(l2_to_l1_msgs: [Field; MAX_L2_TO_L1_MSGS_PER_TX])
134
137
* Uses 2 bytes to encode the length even when we only need 1 to keep uniform.
135
138
*/
136
139
pub fn encode_blob_prefix (input_type : u8 , array_len : u32 ) -> Field {
137
- let len_bytes = (array_len as Field ).to_be_bytes ::<2 >();
138
- field_from_bytes ([input_type , 0 , len_bytes [0 ], len_bytes [1 ]], true )
140
+ let array_len = array_len as Field ;
141
+ array_len .assert_max_bit_size ::<16 >();
142
+ (input_type as Field ) * (256 * 256 * 256 ) + array_len
139
143
}
140
144
141
145
// Tx effects consist of
@@ -185,7 +189,10 @@ pub(crate) fn append_tx_effects_for_blob(
185
189
fn get_tx_effects_hash_input (
186
190
tx_effect : TxEffect ,
187
191
) -> ([Field ; TX_EFFECTS_BLOB_HASH_INPUT_FIELDS ], u32 ) {
188
- let mut tx_effects_hash_input = unsafe { get_tx_effects_hash_input_helper (tx_effect ) };
192
+ tx_effect .transaction_fee .assert_max_bit_size ::<29 * 8 >();
193
+ let TWO_POW_240 = 1766847064778384329583297500742918515827483896875618958121606201292619776 ;
194
+ let prefixed_tx_fee : Field =
195
+ (TX_FEE_PREFIX as Field ) * TWO_POW_240 + (tx_effect .transaction_fee as Field );
189
196
190
197
let note_hashes = tx_effect .note_hashes ;
191
198
let nullifiers = tx_effect .nullifiers ;
@@ -201,6 +208,21 @@ fn get_tx_effects_hash_input(
201
208
silo_unencrypted_log_hash (log )
202
209
});
203
210
211
+ let mut tx_effects_hash_input = unsafe {
212
+ get_tx_effects_hash_input_helper (
213
+ tx_effect .tx_hash ,
214
+ prefixed_tx_fee ,
215
+ tx_effect .note_hashes ,
216
+ tx_effect .nullifiers ,
217
+ tx_effect .l2_to_l1_msgs ,
218
+ public_data_update_requests ,
219
+ private_logs ,
220
+ unencrypted_logs ,
221
+ contract_class_logs ,
222
+ tx_effect .revert_code as Field ,
223
+ )
224
+ };
225
+
204
226
let mut offset = 0 ;
205
227
let mut array_len = 0 ;
206
228
@@ -215,16 +237,7 @@ fn get_tx_effects_hash_input(
215
237
216
238
// TX FEE
217
239
// Using 29 bytes to encompass all reasonable fee lengths
218
- assert_eq (
219
- tx_effects_hash_input [offset ],
220
- field_from_bytes (
221
- array_concat (
222
- [TX_FEE_PREFIX , 0 ],
223
- tx_effect .transaction_fee .to_be_bytes ::<29 >(),
224
- ),
225
- true ,
226
- ),
227
- );
240
+ assert_eq (tx_effects_hash_input [offset ], prefixed_tx_fee );
228
241
offset += 1 ;
229
242
230
243
// NB: The array_length function does NOT constrain we have a sorted left-packed array.
@@ -349,126 +362,99 @@ fn get_tx_effects_hash_input(
349
362
}
350
363
351
364
// Now we know the number of fields appended, we can assign the first value:
352
- // TX_START_PREFIX | 0 | txlen[0] txlen[1] | 0 | REVERT_CODE_PREFIX | 0 | revert_code
353
- // Start prefix is "tx_start".to_field() => 8 bytes
354
- let prefix_bytes = TX_START_PREFIX .to_be_bytes ::<8 >();
355
- let length_bytes = (offset as Field ).to_be_bytes ::<2 >();
365
+ let expected_tx_start_field =
366
+ generate_tx_start_field (offset as Field , tx_effect .revert_code as Field );
356
367
// REVERT CODE
357
- assert_eq (
358
- tx_effects_hash_input [0 ],
359
- field_from_bytes (
360
- array_concat (
361
- prefix_bytes ,
362
- [
363
- 0 ,
364
- length_bytes [0 ],
365
- length_bytes [1 ],
366
- 0 ,
367
- REVERT_CODE_PREFIX ,
368
- 0 ,
369
- tx_effect .revert_code ,
370
- ],
371
- ),
372
- true ,
373
- ),
374
- );
368
+ assert_eq (tx_effects_hash_input [0 ], expected_tx_start_field );
375
369
376
370
(tx_effects_hash_input , offset )
377
371
}
378
372
373
+ fn generate_tx_start_field (offset : Field , revert_code : Field ) -> Field {
374
+ // TX_START_PREFIX | 0 | 0 | 0 | 0 | REVERT_CODE_PREFIX | 0 | 0
375
+ let constant = (TX_START_PREFIX as Field ) * (256 * 256 * 256 * 256 * 256 * 256 * 256 )
376
+ + (REVERT_CODE_PREFIX as Field ) * (256 * 256 );
377
+
378
+ let tx_start_field = constant + offset * (256 * 256 * 256 * 256 ) + revert_code ;
379
+
380
+ tx_start_field
381
+ }
382
+
379
383
unconstrained fn get_tx_effects_hash_input_helper (
380
- tx_effect : TxEffect ,
384
+ tx_hash : Field ,
385
+ prefixed_tx_fee : Field ,
386
+ note_hashes : [Field ; MAX_NOTE_HASHES_PER_TX ],
387
+ nullifiers : [Field ; MAX_NULLIFIERS_PER_TX ],
388
+ l2_to_l1_msgs : [Field ; MAX_L2_TO_L1_MSGS_PER_TX ],
389
+ public_data_update_requests : [PublicDataWrite ; MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX ],
390
+ private_logs : [Log <PRIVATE_LOG_SIZE_IN_FIELDS >; MAX_PRIVATE_LOGS_PER_TX ],
391
+ unencrypted_logs : [Field ; MAX_UNENCRYPTED_LOGS_PER_TX ],
392
+ contract_class_logs : [Field ; MAX_CONTRACT_CLASS_LOGS_PER_TX ],
393
+ revert_code : Field ,
381
394
) -> [Field ; TX_EFFECTS_BLOB_HASH_INPUT_FIELDS ] {
382
395
let mut tx_effects_hash_input = [0 ; TX_EFFECTS_BLOB_HASH_INPUT_FIELDS ];
383
396
384
- let note_hashes = tx_effect .note_hashes ;
385
- let nullifiers = tx_effect .nullifiers ;
386
-
387
397
// Public writes are the concatenation of all non-empty user update requests and protocol update requests, then padded with zeroes.
388
398
// The incoming all_public_data_update_requests may have empty update requests in the middle, so we move those to the end of the array.
389
- let public_data_update_requests =
390
- get_all_update_requests_for_tx_effects (tx_effect .public_data_writes );
391
- let private_logs = tx_effect .private_logs ;
392
- let unencrypted_logs =
393
- tx_effect .unencrypted_logs_hashes .map (|log : ScopedLogHash | silo_unencrypted_log_hash (log ));
394
- let contract_class_logs = tx_effect .contract_class_logs_hashes .map (|log : ScopedLogHash | {
395
- silo_unencrypted_log_hash (log )
396
- });
397
-
398
- let mut offset = 0 ;
399
- let mut array_len = 0 ;
400
-
401
- // NB: for publishing fields of blob data we use the first element of the blob to encode:
402
- // TX_START_PREFIX | 0 | txlen[0] txlen[1] | 0 | REVERT_CODE_PREFIX | 0 | revert_code
403
- // Two bytes are used to encode the number of fields appended here, given by 'offset'
404
- // We only know the value once the appending is complete, hence we overwrite input[0] below
405
- tx_effects_hash_input [offset ] = 0 ;
406
- offset += 1 ;
407
399
408
- tx_effects_hash_input [offset ] = tx_effect .tx_hash ;
409
- offset += 1 ;
400
+ tx_effects_hash_input [1 ] = tx_hash ;
410
401
411
402
// TX FEE
412
403
// Using 29 bytes to encompass all reasonable fee lengths
413
- tx_effects_hash_input [offset ] = field_from_bytes (
414
- array_concat (
415
- [TX_FEE_PREFIX , 0 ],
416
- tx_effect .transaction_fee .to_be_bytes ::<29 >(),
417
- ),
418
- true ,
419
- );
420
- offset += 1 ;
404
+ tx_effects_hash_input [2 ] = prefixed_tx_fee ;
405
+
406
+ let mut offset = 3 ;
421
407
422
408
// NB: The array_length function does NOT constrain we have a sorted left-packed array.
423
409
// We can use it because all inputs here come from the kernels which DO constrain left-packing.
424
410
// If that ever changes, we will have to constrain it by counting items differently.
425
411
// NOTE HASHES
426
- array_len = array_length (note_hashes );
412
+ let array_len = array_length (note_hashes );
427
413
if array_len != 0 {
428
414
let notes_prefix = encode_blob_prefix (NOTES_PREFIX , array_len );
429
415
tx_effects_hash_input [offset ] = notes_prefix ;
430
416
offset += 1 ;
431
417
432
- for j in 0 ..MAX_NOTE_HASHES_PER_TX {
418
+ for j in 0 ..array_len {
433
419
tx_effects_hash_input [offset + j ] = note_hashes [j ];
434
420
}
435
421
offset += array_len ;
436
422
}
437
423
438
424
// NULLIFIERS
439
- array_len = array_length (nullifiers );
425
+ let array_len = array_length (nullifiers );
440
426
if array_len != 0 {
441
427
let nullifiers_prefix = encode_blob_prefix (NULLIFIERS_PREFIX , array_len );
442
428
tx_effects_hash_input [offset ] = nullifiers_prefix ;
443
429
offset += 1 ;
444
430
445
- for j in 0 ..MAX_NULLIFIERS_PER_TX {
431
+ for j in 0 ..array_len {
446
432
tx_effects_hash_input [offset + j ] = nullifiers [j ];
447
433
}
448
434
offset += array_len ;
449
435
}
450
436
451
437
// L2 TO L1 MESSAGES
452
- array_len = array_length (tx_effect . l2_to_l1_msgs );
438
+ let array_len = array_length (l2_to_l1_msgs );
453
439
if array_len != 0 {
454
440
let l2_to_l1_msgs_prefix = encode_blob_prefix (L2_L1_MSGS_PREFIX , array_len );
455
441
tx_effects_hash_input [offset ] = l2_to_l1_msgs_prefix ;
456
442
offset += 1 ;
457
443
458
- for j in 0 ..MAX_L2_TO_L1_MSGS_PER_TX {
459
- tx_effects_hash_input [offset + j ] = tx_effect . l2_to_l1_msgs [j ];
444
+ for j in 0 ..array_len {
445
+ tx_effects_hash_input [offset + j ] = l2_to_l1_msgs [j ];
460
446
}
461
447
offset += array_len ;
462
448
}
463
449
464
450
// PUBLIC DATA UPDATE REQUESTS
465
- array_len = array_length (public_data_update_requests );
451
+ let array_len = array_length (public_data_update_requests );
466
452
if array_len != 0 {
467
453
let public_data_update_requests_prefix =
468
454
encode_blob_prefix (PUBLIC_DATA_UPDATE_REQUESTS_PREFIX , array_len * 2 );
469
455
tx_effects_hash_input [offset ] = public_data_update_requests_prefix ;
470
456
offset += 1 ;
471
- for j in 0 ..MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX {
457
+ for j in 0 ..array_len {
472
458
tx_effects_hash_input [offset + j * 2 ] = public_data_update_requests [j ].leaf_slot ;
473
459
tx_effects_hash_input [offset + j * 2 + 1 ] = public_data_update_requests [j ].value ;
474
460
}
@@ -477,13 +463,14 @@ unconstrained fn get_tx_effects_hash_input_helper(
477
463
478
464
// TODO(Miranda): squash 0s in a nested loop and add len prefix?
479
465
// PRIVATE_LOGS
480
- array_len = array_length (private_logs ) * PRIVATE_LOG_SIZE_IN_FIELDS ;
481
- if array_len != 0 {
466
+ let num_private_logs = array_length (private_logs );
467
+ if num_private_logs != 0 {
468
+ let array_len = num_private_logs * PRIVATE_LOG_SIZE_IN_FIELDS ;
482
469
let private_logs_prefix = encode_blob_prefix (PRIVATE_LOGS_PREFIX , array_len );
483
470
tx_effects_hash_input [offset ] = private_logs_prefix ;
484
471
offset += 1 ;
485
472
486
- for j in 0 ..MAX_PRIVATE_LOGS_PER_TX {
473
+ for j in 0 ..num_private_logs {
487
474
for k in 0 ..PRIVATE_LOG_SIZE_IN_FIELDS {
488
475
let index = offset + j * PRIVATE_LOG_SIZE_IN_FIELDS + k ;
489
476
tx_effects_hash_input [index ] = private_logs [j ].fields [k ];
@@ -495,44 +482,33 @@ unconstrained fn get_tx_effects_hash_input_helper(
495
482
// TODO(#8954): When logs are refactored into fields, we will append the values here
496
483
// Currently appending the single log hash as an interim solution
497
484
// UNENCRYPTED LOGS
498
- array_len = array_length (unencrypted_logs );
485
+ let array_len = array_length (unencrypted_logs );
499
486
if array_len != 0 {
500
487
let unencrypted_logs_prefix = encode_blob_prefix (UNENCRYPTED_LOGS_PREFIX , array_len );
501
488
tx_effects_hash_input [offset ] = unencrypted_logs_prefix ;
502
489
offset += 1 ;
503
490
504
- for j in 0 ..MAX_UNENCRYPTED_LOGS_PER_TX {
491
+ for j in 0 ..array_len {
505
492
tx_effects_hash_input [offset + j ] = unencrypted_logs [j ];
506
493
}
507
494
offset += array_len ;
508
495
}
509
496
510
497
// CONTRACT CLASS LOGS
511
- array_len = array_length (contract_class_logs );
498
+ let array_len = array_length (contract_class_logs );
512
499
if array_len != 0 {
513
500
let contract_class_logs_prefix = encode_blob_prefix (CONTRACT_CLASS_LOGS_PREFIX , array_len );
514
501
tx_effects_hash_input [offset ] = contract_class_logs_prefix ;
515
502
offset += 1 ;
516
503
517
- for j in 0 ..MAX_CONTRACT_CLASS_LOGS_PER_TX {
504
+ for j in 0 ..array_len {
518
505
tx_effects_hash_input [offset + j ] = contract_class_logs [j ];
519
506
}
520
507
offset += array_len ;
521
508
}
522
509
523
510
// Now we know the number of fields appended, we can assign the first value:
524
- // TX_START_PREFIX | 0 | txlen[0] txlen[1] | 0 | REVERT_CODE_PREFIX | 0 | revert_code
525
- // Start prefix is "tx_start".to_field() => 8 bytes
526
- let prefix_bytes = TX_START_PREFIX .to_be_bytes ::<8 >();
527
- let length_bytes = (offset as Field ).to_be_bytes ::<2 >();
528
- // REVERT CODE
529
- tx_effects_hash_input [0 ] = field_from_bytes (
530
- array_concat (
531
- prefix_bytes ,
532
- [0 , length_bytes [0 ], length_bytes [1 ], 0 , REVERT_CODE_PREFIX , 0 , tx_effect .revert_code ],
533
- ),
534
- true ,
535
- );
511
+ tx_effects_hash_input [0 ] = generate_tx_start_field (offset as Field , revert_code );
536
512
537
513
tx_effects_hash_input
538
514
}
0 commit comments