@@ -3,6 +3,7 @@ mod tests;
3
3
4
4
use crate :: node_client:: NodeClient ;
5
5
use crate :: single_disk_farm:: piece_cache:: { DiskPieceCache , Offset } ;
6
+ use crate :: single_disk_farm:: plot_cache:: { DiskPlotCache , MaybePieceStoredResult } ;
6
7
use crate :: utils:: { run_future_in_dedicated_thread, AsyncJoinOnDrop } ;
7
8
use event_listener_primitives:: { Bag , HandlerId } ;
8
9
use futures:: channel:: oneshot;
@@ -11,6 +12,7 @@ use futures::{select, FutureExt, StreamExt};
11
12
use parking_lot:: RwLock ;
12
13
use std:: collections:: { HashMap , VecDeque } ;
13
14
use std:: num:: NonZeroU16 ;
15
+ use std:: sync:: atomic:: { AtomicUsize , Ordering } ;
14
16
use std:: sync:: Arc ;
15
17
use std:: time:: Duration ;
16
18
use std:: { fmt, mem} ;
@@ -51,7 +53,7 @@ struct DiskPieceCacheState {
51
53
#[ derive( Debug ) ]
52
54
enum WorkerCommand {
53
55
ReplaceBackingCaches {
54
- new_caches : Vec < DiskPieceCache > ,
56
+ new_piece_caches : Vec < DiskPieceCache > ,
55
57
acknowledgement : oneshot:: Sender < ( ) > ,
56
58
} ,
57
59
ForgetKey {
@@ -102,11 +104,11 @@ where
102
104
. expect ( "Always set during worker instantiation" ) ;
103
105
104
106
if let Some ( WorkerCommand :: ReplaceBackingCaches {
105
- new_caches ,
107
+ new_piece_caches ,
106
108
acknowledgement,
107
109
} ) = worker_receiver. recv ( ) . await
108
110
{
109
- self . initialize ( & piece_getter, & mut worker_state, new_caches )
111
+ self . initialize ( & piece_getter, & mut worker_state, new_piece_caches )
110
112
. await ;
111
113
// Doesn't matter if receiver is still waiting for acknowledgement
112
114
let _ = acknowledgement. send ( ( ) ) ;
@@ -163,10 +165,10 @@ where
163
165
{
164
166
match command {
165
167
WorkerCommand :: ReplaceBackingCaches {
166
- new_caches ,
168
+ new_piece_caches ,
167
169
acknowledgement,
168
170
} => {
169
- self . initialize ( piece_getter, worker_state, new_caches )
171
+ self . initialize ( piece_getter, worker_state, new_piece_caches )
170
172
. await ;
171
173
// Doesn't matter if receiver is still waiting for acknowledgement
172
174
let _ = acknowledgement. send ( ( ) ) ;
@@ -215,31 +217,31 @@ where
215
217
& self ,
216
218
piece_getter : & PG ,
217
219
worker_state : & mut CacheWorkerState ,
218
- new_caches : Vec < DiskPieceCache > ,
220
+ new_piece_caches : Vec < DiskPieceCache > ,
219
221
) where
220
222
PG : PieceGetter ,
221
223
{
222
224
info ! ( "Initializing piece cache" ) ;
223
225
// Pull old cache state since it will be replaced with a new one and reuse its allocations
224
226
let cache_state = mem:: take ( & mut * self . caches . write ( ) ) ;
225
- let mut stored_pieces = Vec :: with_capacity ( new_caches . len ( ) ) ;
226
- let mut free_offsets = Vec :: with_capacity ( new_caches . len ( ) ) ;
227
+ let mut stored_pieces = Vec :: with_capacity ( new_piece_caches . len ( ) ) ;
228
+ let mut free_offsets = Vec :: with_capacity ( new_piece_caches . len ( ) ) ;
227
229
for mut state in cache_state {
228
230
state. stored_pieces . clear ( ) ;
229
231
stored_pieces. push ( state. stored_pieces ) ;
230
232
state. free_offsets . clear ( ) ;
231
233
free_offsets. push ( state. free_offsets ) ;
232
234
}
233
- stored_pieces. resize ( new_caches . len ( ) , HashMap :: default ( ) ) ;
234
- free_offsets. resize ( new_caches . len ( ) , VecDeque :: default ( ) ) ;
235
+ stored_pieces. resize ( new_piece_caches . len ( ) , HashMap :: default ( ) ) ;
236
+ free_offsets. resize ( new_piece_caches . len ( ) , VecDeque :: default ( ) ) ;
235
237
236
238
debug ! ( "Collecting pieces that were in the cache before" ) ;
237
239
238
240
// Build cache state of all backends
239
241
let maybe_caches_futures = stored_pieces
240
242
. into_iter ( )
241
243
. zip ( free_offsets)
242
- . zip ( new_caches )
244
+ . zip ( new_piece_caches )
243
245
. enumerate ( )
244
246
. map (
245
247
|( index, ( ( mut stored_pieces, mut free_offsets) , new_cache) ) | {
@@ -760,8 +762,12 @@ where
760
762
#[ derive( Debug , Clone ) ]
761
763
pub struct FarmerCache {
762
764
peer_id : PeerId ,
763
- /// Individual disk caches where pieces are stored
764
- caches : Arc < RwLock < Vec < DiskPieceCacheState > > > ,
765
+ /// Individual dedicated piece caches
766
+ piece_caches : Arc < RwLock < Vec < DiskPieceCacheState > > > ,
767
+ /// Additional piece caches
768
+ plot_caches : Arc < RwLock < Vec < DiskPlotCache > > > ,
769
+ /// Next plot cache to use for storing pieces
770
+ next_plot_cache : Arc < AtomicUsize > ,
765
771
handlers : Arc < Handlers > ,
766
772
// We do not want to increase capacity unnecessarily on clone
767
773
worker_sender : Arc < mpsc:: Sender < WorkerCommand > > ,
@@ -782,7 +788,9 @@ impl FarmerCache {
782
788
783
789
let instance = Self {
784
790
peer_id,
785
- caches : Arc :: clone ( & caches) ,
791
+ piece_caches : Arc :: clone ( & caches) ,
792
+ plot_caches : Arc :: default ( ) ,
793
+ next_plot_cache : Arc :: new ( AtomicUsize :: new ( 0 ) ) ,
786
794
handlers : Arc :: clone ( & handlers) ,
787
795
worker_sender : Arc :: new ( worker_sender) ,
788
796
} ;
@@ -801,34 +809,47 @@ impl FarmerCache {
801
809
pub async fn get_piece ( & self , key : RecordKey ) -> Option < Piece > {
802
810
let maybe_piece_fut = tokio:: task:: spawn_blocking ( {
803
811
let key = key. clone ( ) ;
804
- let caches = Arc :: clone ( & self . caches ) ;
812
+ let piece_caches = Arc :: clone ( & self . piece_caches ) ;
813
+ let plot_caches = Arc :: clone ( & self . plot_caches ) ;
805
814
let worker_sender = Arc :: clone ( & self . worker_sender ) ;
806
815
807
816
move || {
808
- for ( disk_farm_index, cache) in caches. read ( ) . iter ( ) . enumerate ( ) {
809
- let Some ( & offset) = cache. stored_pieces . get ( & key) else {
810
- continue ;
811
- } ;
812
- match cache. backend . read_piece ( offset) {
813
- Ok ( maybe_piece) => {
814
- return maybe_piece;
815
- }
816
- Err ( error) => {
817
- error ! (
818
- %error,
819
- %disk_farm_index,
820
- ?key,
821
- %offset,
822
- "Error while reading piece from cache, might be a disk corruption"
823
- ) ;
817
+ {
818
+ let piece_caches = piece_caches. read ( ) ;
819
+ for ( disk_farm_index, cache) in piece_caches. iter ( ) . enumerate ( ) {
820
+ let Some ( & offset) = cache. stored_pieces . get ( & key) else {
821
+ continue ;
822
+ } ;
823
+ match cache. backend . read_piece ( offset) {
824
+ Ok ( maybe_piece) => {
825
+ return maybe_piece;
826
+ }
827
+ Err ( error) => {
828
+ error ! (
829
+ %error,
830
+ %disk_farm_index,
831
+ ?key,
832
+ %offset,
833
+ "Error while reading piece from cache, might be a disk corruption"
834
+ ) ;
835
+
836
+ if let Err ( error) =
837
+ worker_sender. blocking_send ( WorkerCommand :: ForgetKey { key } )
838
+ {
839
+ trace ! ( %error, "Failed to send ForgetKey command to worker" ) ;
840
+ }
824
841
825
- if let Err ( error) =
826
- worker_sender. blocking_send ( WorkerCommand :: ForgetKey { key } )
827
- {
828
- trace ! ( %error, "Failed to send ForgetKey command to worker" ) ;
842
+ return None ;
829
843
}
844
+ }
845
+ }
846
+ }
830
847
831
- return None ;
848
+ {
849
+ let plot_caches = plot_caches. read ( ) ;
850
+ for cache in plot_caches. iter ( ) {
851
+ if let Some ( piece) = cache. read_piece ( & key) {
852
+ return Some ( piece) ;
832
853
}
833
854
}
834
855
}
@@ -846,24 +867,92 @@ impl FarmerCache {
846
867
}
847
868
}
848
869
870
+ /// Try to store a piece in additional downloaded pieces, if there is space for them
871
+ pub async fn maybe_store_additional_piece ( & self , piece_index : PieceIndex , piece : & Piece ) {
872
+ let key = RecordKey :: from ( piece_index. to_multihash ( ) ) ;
873
+
874
+ let mut should_store = false ;
875
+ for cache in self . plot_caches . read ( ) . iter ( ) {
876
+ match cache. is_piece_maybe_stored ( & key) {
877
+ MaybePieceStoredResult :: No => {
878
+ // Try another one if there is any
879
+ }
880
+ MaybePieceStoredResult :: Vacant => {
881
+ should_store = true ;
882
+ break ;
883
+ }
884
+ MaybePieceStoredResult :: Yes => {
885
+ // Already stored, nothing else left to do
886
+ return ;
887
+ }
888
+ }
889
+ }
890
+
891
+ if !should_store {
892
+ return ;
893
+ }
894
+
895
+ let should_store_fut = tokio:: task:: spawn_blocking ( {
896
+ let plot_caches = Arc :: clone ( & self . plot_caches ) ;
897
+ let next_plot_cache = Arc :: clone ( & self . next_plot_cache ) ;
898
+ let piece = piece. clone ( ) ;
899
+
900
+ move || {
901
+ let plot_caches = plot_caches. read ( ) ;
902
+ let plot_caches_len = plot_caches. len ( ) ;
903
+
904
+ // Store pieces in plots using round-robin distribution
905
+ for _ in 0 ..plot_caches_len {
906
+ let plot_cache_index =
907
+ next_plot_cache. fetch_add ( 1 , Ordering :: Relaxed ) % plot_caches_len;
908
+
909
+ match plot_caches[ plot_cache_index] . try_store_piece ( piece_index, & piece) {
910
+ Ok ( true ) => {
911
+ return ;
912
+ }
913
+ Ok ( false ) => {
914
+ continue ;
915
+ }
916
+ Err ( error) => {
917
+ error ! (
918
+ %error,
919
+ %piece_index,
920
+ %plot_cache_index,
921
+ "Failed to store additional piece in cache"
922
+ ) ;
923
+ continue ;
924
+ }
925
+ }
926
+ }
927
+ }
928
+ } ) ;
929
+
930
+ if let Err ( error) = AsyncJoinOnDrop :: new ( should_store_fut, true ) . await {
931
+ error ! ( %error, %piece_index, "Failed to store additional piece in cache" ) ;
932
+ }
933
+ }
934
+
849
935
/// Initialize replacement of backing caches, returns acknowledgement receiver that can be used
850
936
/// to identify when cache initialization has finished
851
937
pub async fn replace_backing_caches (
852
938
& self ,
853
- new_caches : Vec < DiskPieceCache > ,
939
+ new_piece_caches : Vec < DiskPieceCache > ,
940
+ new_plot_caches : Vec < DiskPlotCache > ,
854
941
) -> oneshot:: Receiver < ( ) > {
855
942
let ( sender, receiver) = oneshot:: channel ( ) ;
856
943
if let Err ( error) = self
857
944
. worker_sender
858
945
. send ( WorkerCommand :: ReplaceBackingCaches {
859
- new_caches ,
946
+ new_piece_caches ,
860
947
acknowledgement : sender,
861
948
} )
862
949
. await
863
950
{
864
951
warn ! ( %error, "Failed to replace backing caches, worker exited" ) ;
865
952
}
866
953
954
+ * self . plot_caches . write ( ) = new_plot_caches;
955
+
867
956
receiver
868
957
}
869
958
@@ -876,10 +965,27 @@ impl FarmerCache {
876
965
impl LocalRecordProvider for FarmerCache {
877
966
fn record ( & self , key : & RecordKey ) -> Option < ProviderRecord > {
878
967
// It is okay to take read lock here, writes locks are very infrequent and very short
879
- for cache in self . caches . read ( ) . iter ( ) {
880
- if cache. stored_pieces . contains_key ( key) {
968
+ for piece_cache in self . piece_caches . read ( ) . iter ( ) {
969
+ if piece_cache. stored_pieces . contains_key ( key) {
970
+ // Note: We store our own provider records locally without local addresses
971
+ // to avoid redundant storage and outdated addresses. Instead, these are
972
+ // acquired on demand when returning a `ProviderRecord` for the local node.
973
+ return Some ( ProviderRecord {
974
+ key : key. clone ( ) ,
975
+ provider : self . peer_id ,
976
+ expires : None ,
977
+ addresses : Vec :: new ( ) ,
978
+ } ) ;
979
+ } ;
980
+ }
981
+ // It is okay to take read lock here, writes locks almost never happen
982
+ for plot_cache in self . plot_caches . read ( ) . iter ( ) {
983
+ if matches ! (
984
+ plot_cache. is_piece_maybe_stored( key) ,
985
+ MaybePieceStoredResult :: Yes
986
+ ) {
881
987
// Note: We store our own provider records locally without local addresses
882
- // to avoid redundant storage and outdated addresses. Instead these are
988
+ // to avoid redundant storage and outdated addresses. Instead, these are
883
989
// acquired on demand when returning a `ProviderRecord` for the local node.
884
990
return Some ( ProviderRecord {
885
991
key : key. clone ( ) ,
0 commit comments