Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Shadow tracking #11689

Merged
merged 5 commits into from
Jul 5, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions chain/chain/src/test_utils/kv_runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -957,6 +957,25 @@ impl EpochManagerAdapter for MockEpochManager {
Ok(true)
}

fn cares_about_shard_in_epoch(
&self,
epoch_id: EpochId,
account_id: &AccountId,
shard_id: ShardId,
) -> Result<bool, EpochError> {
// This `unwrap` here tests that in all code paths we check that the epoch exists before
// we check if we care about a shard. Please do not remove the unwrap, fix the logic of
// the calling function.
let epoch_valset = self.get_valset_for_epoch(&epoch_id).unwrap();
let chunk_producers = self.get_chunk_producers(epoch_valset, shard_id);
for validator in chunk_producers {
if validator.account_id() == account_id {
return Ok(true);
}
}
Ok(false)
}

fn cares_about_shard_from_prev_block(
&self,
parent_hash: &CryptoHash,
Expand Down
20 changes: 18 additions & 2 deletions chain/epoch-manager/src/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ use near_primitives::block::Tip;
use near_primitives::block_header::{Approval, ApprovalInner, BlockHeader};
use near_primitives::epoch_manager::block_info::BlockInfo;
use near_primitives::epoch_manager::epoch_info::EpochInfo;
use near_primitives::epoch_manager::EpochConfig;
use near_primitives::epoch_manager::ShardConfig;
use near_primitives::epoch_manager::{EpochConfig, ShardConfig};
use near_primitives::errors::EpochError;
use near_primitives::hash::CryptoHash;
use near_primitives::shard_layout::{account_id_to_shard_id, ShardLayout, ShardLayoutError};
Expand Down Expand Up @@ -422,6 +421,13 @@ pub trait EpochManagerAdapter: Send + Sync {
partial_witness: &PartialEncodedStateWitness,
) -> Result<bool, Error>;

fn cares_about_shard_in_epoch(
&self,
epoch_id: EpochId,
account_id: &AccountId,
shard_id: ShardId,
) -> Result<bool, EpochError>;

fn cares_about_shard_from_prev_block(
&self,
parent_hash: &CryptoHash,
Expand Down Expand Up @@ -1142,4 +1148,14 @@ impl EpochManagerAdapter for EpochManagerHandle {
let epoch_manager = self.read();
Ok(epoch_manager.get_epoch_info(epoch_id)?.validators_iter().collect::<Vec<_>>())
}

fn cares_about_shard_in_epoch(
&self,
epoch_id: EpochId,
account_id: &AccountId,
shard_id: ShardId,
) -> Result<bool, EpochError> {
let epoch_manager = self.read();
epoch_manager.cares_about_shard_in_epoch(epoch_id, account_id, shard_id)
}
}
8 changes: 4 additions & 4 deletions chain/epoch-manager/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1626,11 +1626,8 @@ impl EpochManager {
let stake_divisor = { config.minimum_stake_divisor as Balance };
Ok(seat_price / stake_divisor)
}
}

/// Private utilities for EpochManager.
impl EpochManager {
fn cares_about_shard_in_epoch(
pub fn cares_about_shard_in_epoch(
&self,
epoch_id: EpochId,
account_id: &AccountId,
Expand All @@ -1648,7 +1645,10 @@ impl EpochManager {
}
Ok(false)
}
}

/// Private utilities for EpochManager.
impl EpochManager {
#[inline]
pub(crate) fn block_producer_from_info(
epoch_info: &EpochInfo,
Expand Down
6 changes: 6 additions & 0 deletions chain/epoch-manager/src/shard_tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use near_primitives::types::{AccountId, EpochId, ShardId};
#[derive(Clone)]
pub enum TrackedConfig {
Accounts(Vec<AccountId>),
Shadow(AccountId),
AllShards,
// Rotates between sets of shards to track.
Schedule(Vec<Vec<ShardId>>),
Expand All @@ -26,6 +27,8 @@ impl TrackedConfig {
TrackedConfig::AllShards
} else if !config.tracked_shard_schedule.is_empty() {
TrackedConfig::Schedule(config.tracked_shard_schedule.clone())
} else if let Some(account_id) = config.shadow_tracked.as_ref() {
TrackedConfig::Shadow(account_id.clone())
} else {
TrackedConfig::Accounts(config.tracked_accounts.clone())
}
Expand Down Expand Up @@ -90,6 +93,9 @@ impl ShardTracker {
let subset = &schedule[index as usize];
Ok(subset.contains(&shard_id))
}
TrackedConfig::Shadow(account_id) => {
self.epoch_manager.cares_about_shard_in_epoch(*epoch_id, account_id, shard_id)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we also give some warning somewhere (potentially here) if the accountid is not eligible to be a chunk validator as all? I was thinking about when loading the config but at that point I am not sure if the node will have the full info about the other validators.

Copy link
Contributor Author

@staffik staffik Jul 5, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Logging it here is spammy: resulted in thousands of logs in a very short nayduck test.

}
}
}

Expand Down
3 changes: 3 additions & 0 deletions core/chain-configs/src/client_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,8 @@ pub struct ClientConfig {
pub gc: GCConfig,
/// Accounts that this client tracks.
pub tracked_accounts: Vec<AccountId>,
/// Track shards that should be tracked by given validator.
pub shadow_tracked: Option<AccountId>,
/// Shards that this client tracks.
pub tracked_shards: Vec<ShardId>,
/// Rotate between these sets of tracked shards.
Expand Down Expand Up @@ -539,6 +541,7 @@ impl ClientConfig {
block_header_fetch_horizon: 50,
gc: GCConfig { gc_blocks_limit: 100, ..GCConfig::default() },
tracked_accounts: vec![],
shadow_tracked: None,
tracked_shards: vec![],
tracked_shard_schedule: vec![],
archive,
Expand Down
3 changes: 3 additions & 0 deletions nearcore/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ pub struct Config {
pub network: near_network::config_json::Config,
pub consensus: Consensus,
pub tracked_accounts: Vec<AccountId>,
pub shadow_tracked: Option<AccountId>,
pub tracked_shards: Vec<ShardId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tracked_shard_schedule: Option<Vec<Vec<ShardId>>>,
Expand Down Expand Up @@ -337,6 +338,7 @@ impl Default for Config {
network: Default::default(),
consensus: Consensus::default(),
tracked_accounts: vec![],
shadow_tracked: None,
tracked_shards: vec![],
tracked_shard_schedule: None,
archive: false,
Expand Down Expand Up @@ -557,6 +559,7 @@ impl NearConfig {
doosmslug_step_period: config.consensus.doomslug_step_period,
tracked_accounts: config.tracked_accounts,
tracked_shards: config.tracked_shards,
shadow_tracked: config.shadow_tracked,
tracked_shard_schedule: config.tracked_shard_schedule.unwrap_or(vec![]),
archive: config.archive,
save_trie_changes: config.save_trie_changes.unwrap_or(!config.archive),
Expand Down
4 changes: 4 additions & 0 deletions nearcore/src/config_duration_test.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
use std::str::FromStr;

use crate::config::Config;
use near_jsonrpc::RpcConfig;
use near_network::config_json::{ExperimentalConfig, NetworkConfigOverrides};
use near_o11y::testonly::init_test_logger;
use near_primitives::types::AccountId;
use near_store::StoreConfig;
use serde::ser::{
SerializeMap, SerializeSeq, SerializeStruct, SerializeStructVariant, SerializeTuple,
Expand Down Expand Up @@ -40,6 +43,7 @@ fn test_config_duration_all_std() {
rosetta_rpc: Some(Default::default()),
save_trie_changes: Some(Default::default()),
split_storage: Some(Default::default()),
shadow_tracked: Some(AccountId::from_str("test").unwrap()),
tracked_shard_schedule: Some(Default::default()),
transaction_pool_size_limit: Some(Default::default()),
state_sync: Some(Default::default()),
Expand Down
4 changes: 2 additions & 2 deletions pytest/tests/sanity/validator_switch_key_quick.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_validator_switch_key_quick(self):
# that it will be assigned to when becoming a validator.
config_map = {
2: {
"tracked_shards": [0],
"shadow_tracked": "test0",
"store.load_mem_tries_for_tracked_shards": True,
}
}
Expand All @@ -42,7 +42,7 @@ def test_validator_switch_key_quick(self):
["block_producer_kickout_threshold", 10],
["chunk_producer_kickout_threshold", 10]],
config_map)
wait_for_blocks(old_validator, count=2)
wait_for_blocks(old_validator, count=5)

new_validator.reset_validator_key(other_validator.validator_key)
other_validator.kill()
Expand Down
Loading