Skip to content

Commit

Permalink
feat: allow disable txs_verify_cache
Browse files Browse the repository at this point in the history
  • Loading branch information
jjyr committed Feb 18, 2019
1 parent a64d940 commit cbd80b2
Show file tree
Hide file tree
Showing 8 changed files with 141 additions and 106 deletions.
54 changes: 27 additions & 27 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use ckb_shared::error::SharedError;
use ckb_shared::index::ChainIndex;
use ckb_shared::shared::{ChainProvider, ChainState, Shared};
use ckb_shared::txo_set::TxoSetDiff;
use ckb_verification::{verify_transactions, BlockVerifier, Verifier};
use ckb_verification::{BlockVerifier, TransactionsVerifier, Verifier};
use crossbeam_channel::{self, select, Receiver, Sender};
use faketime::unix_time_as_millis;
use fnv::{FnvHashMap, FnvHashSet};
Expand Down Expand Up @@ -455,37 +455,37 @@ impl<CI: ChainIndex + 'static> ChainService<CI> {
push_new(b, &mut new_inputs, &mut new_outputs);
}

let max_cycles = self.shared.consensus().max_block_cycles();
let mut txs_cycles = self.shared.txs_cycles().write();
let mut txs_cache = self.shared.txs_verify_cache().write();
// The verify function
let mut verify =
|b, new_inputs: &FnvHashSet<OutPoint>, new_outputs: &FnvHashMap<H256, usize>| -> bool {
verify_transactions(b, max_cycles, &mut *txs_cycles, |op| {
self.shared.cell_at(op, |op| {
if new_inputs.contains(op) {
Some(true)
} else if let Some(x) = new_outputs.get(&op.hash) {
if op.index < (*x as u32) {
Some(false)
} else {
Some(true)
}
} else if old_outputs.contains(&op.hash) {
None
} else {
chain_state
.is_spent(op)
.map(|x| x && !old_inputs.contains(op))
}
})
})
.is_ok()
};
let txs_verifier = TransactionsVerifier::new(self.shared.consensus().max_block_cycles());

let mut found_error = false;
// verify transaction
for (ext, b) in fork.open_exts.iter_mut().zip(fork.new_blocks.iter()).rev() {
if !found_error || skip_verify || verify(b, &new_inputs, &new_outputs) {
if !found_error
|| skip_verify
|| txs_verifier
.verify(&mut *txs_cache, b, |op: &OutPoint| {
self.shared.cell_at(op, |op| {
if new_inputs.contains(op) {
Some(true)
} else if let Some(x) = new_outputs.get(&op.hash) {
if op.index < (*x as u32) {
Some(false)
} else {
Some(true)
}
} else if old_outputs.contains(&op.hash) {
None
} else {
chain_state
.is_spent(op)
.map(|x| x && !old_inputs.contains(op))
}
})
})
.is_ok()
{
push_new(b, &mut new_inputs, &mut new_outputs);
ext.valid = Some(true);
} else {
Expand Down
2 changes: 1 addition & 1 deletion nodes_template/default.json
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,5 @@
"block_assembler": {
"type_hash": "0x0da2fe99fe549e082d4ed483c2e968a89ea8d11aabf5d79e5cbf06522de6e674"
},
"cycles_cache_size": 100000
"txs_verify_cache_size": 100000
}
24 changes: 18 additions & 6 deletions pool/src/txs_pool/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -361,13 +361,16 @@ where
}

//readd txs
let mut txs_cycles = self.shared.txs_cycles().write();
let mut txs_verify_cache = self.shared.txs_verify_cache().write();
for tx in b.commit_transactions().iter().rev() {
if tx.is_cellbase() {
continue;
}
let tx_hash = tx.hash();
let cycles = match txs_cycles.get(&tx_hash).cloned() {
let cycles = match txs_verify_cache
.as_ref()
.and_then(|cache| cache.get(&tx_hash).cloned())
{
Some(cycles) => cycles,
None => {
let rtx = self.resolve_transaction(&tx);
Expand All @@ -376,7 +379,10 @@ where
.verify(self.shared.consensus().max_block_cycles())
.map_err(PoolError::InvalidTx)
.unwrap();
txs_cycles.insert(tx_hash, cycles);
// write cache
txs_verify_cache
.as_mut()
.and_then(|cache| cache.insert(tx_hash, cycles));
cycles
}
};
Expand Down Expand Up @@ -511,14 +517,20 @@ where
}

fn verify_transaction(&self, rtx: &ResolvedTransaction) -> Result<Cycle, TransactionError> {
let mut txs_cycles = self.shared.txs_cycles().write();
let mut txs_cache = self.shared.txs_verify_cache().write();
let tx_hash = rtx.transaction.hash();
match txs_cycles.get(&tx_hash).cloned() {
match txs_cache
.as_ref()
.and_then(|cache| cache.get(&tx_hash).cloned())
{
Some(cycles) => Ok(cycles),
None => {
let cycles = TransactionVerifier::new(&rtx)
.verify(self.shared.consensus().max_block_cycles())?;
txs_cycles.insert(tx_hash, cycles);
// write cache
txs_cache
.as_mut()
.and_then(|cache| cache.insert(tx_hash, cycles));
Ok(cycles)
}
}
Expand Down
34 changes: 20 additions & 14 deletions shared/src/shared.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ impl ChainState {
pub struct Shared<CI> {
store: Arc<CI>,
chain_state: Arc<RwLock<ChainState>>,
txs_cycles: Arc<RwLock<LruCache<H256, Cycle>>>,
txs_verify_cache: Arc<RwLock<Option<LruCache<H256, Cycle>>>>,
consensus: Arc<Consensus>,
}

Expand All @@ -87,14 +87,18 @@ impl<CI: ChainIndex> ::std::clone::Clone for Shared<CI> {
Shared {
store: Arc::clone(&self.store),
chain_state: Arc::clone(&self.chain_state),
txs_cycles: Arc::clone(&self.txs_cycles),
txs_verify_cache: Arc::clone(&self.txs_verify_cache),
consensus: Arc::clone(&self.consensus),
}
}
}

impl<CI: ChainIndex> Shared<CI> {
pub fn new(store: CI, consensus: Consensus, cache_size: usize) -> Self {
pub fn new(
store: CI,
consensus: Consensus,
txs_verify_cache: Arc<RwLock<Option<LruCache<H256, Cycle>>>>,
) -> Self {
let chain_state = {
// check head in store or save the genesis block as head
let header = {
Expand Down Expand Up @@ -122,12 +126,10 @@ impl<CI: ChainIndex> Shared<CI> {
)))
};

let txs_cycles = Arc::new(RwLock::new(LruCache::new(cache_size)));

Shared {
store: Arc::new(store),
chain_state,
txs_cycles,
txs_verify_cache,
consensus: Arc::new(consensus),
}
}
Expand All @@ -140,8 +142,8 @@ impl<CI: ChainIndex> Shared<CI> {
&self.store
}

pub fn txs_cycles(&self) -> &RwLock<LruCache<H256, Cycle>> {
&self.txs_cycles
pub fn txs_verify_cache(&self) -> &RwLock<Option<LruCache<H256, Cycle>>> {
&self.txs_verify_cache
}

pub fn init_txo_set(store: &CI, number: u64) -> TxoSet {
Expand Down Expand Up @@ -446,15 +448,15 @@ impl<CI: ChainIndex> BlockMedianTimeContext for Shared<CI> {
pub struct SharedBuilder<DB: KeyValueDB> {
db: Option<DB>,
consensus: Option<Consensus>,
cycles_cache_size: usize,
txs_verify_cache_size: usize,
}

impl<DB: KeyValueDB> Default for SharedBuilder<DB> {
fn default() -> Self {
SharedBuilder {
db: None,
consensus: None,
cycles_cache_size: 100_000,
txs_verify_cache_size: 100_000,
}
}
}
Expand All @@ -464,7 +466,7 @@ impl SharedBuilder<MemoryKeyValueDB> {
SharedBuilder {
db: Some(MemoryKeyValueDB::open(COLUMNS as usize)),
consensus: None,
cycles_cache_size: 100_000,
txs_verify_cache_size: 100_000,
}
}
}
Expand All @@ -489,14 +491,18 @@ impl<DB: 'static + KeyValueDB> SharedBuilder<DB> {
self
}

pub fn cycles_cache_size(mut self, value: usize) -> Self {
self.cycles_cache_size = value;
pub fn txs_verify_cache_size(mut self, value: usize) -> Self {
self.txs_verify_cache_size = value;
self
}

pub fn build(self) -> Shared<ChainKVStore<DB>> {
let store = ChainKVStore::new(self.db.unwrap());
let consensus = self.consensus.unwrap_or_else(Consensus::default);
Shared::new(store, consensus, self.cycles_cache_size)
Shared::new(
store,
consensus,
Arc::new(RwLock::new(Some(LruCache::new(self.txs_verify_cache_size)))),
)
}
}
2 changes: 1 addition & 1 deletion src/cli/run_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub fn run(setup: Setup) {
let shared = SharedBuilder::<CacheDB<RocksDB>>::default()
.consensus(consensus)
.db(&setup.configs.db)
.cycles_cache_size(setup.configs.cycles_cache_size)
.txs_verify_cache_size(setup.configs.txs_verify_cache_size)
.build();

let notify = NotifyService::default().start(Some("notify"));
Expand Down
2 changes: 1 addition & 1 deletion src/setup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ pub struct Configs {
pub block_assembler: BlockAssemblerConfig,
pub sync: SyncConfig,
pub pool: PoolConfig,
pub cycles_cache_size: usize,
pub txs_verify_cache_size: usize,
}

pub fn get_config_path(matches: &ArgMatches) -> PathBuf {
Expand Down
Loading

0 comments on commit cbd80b2

Please sign in to comment.