Skip to content

Commit

Permalink
Patch from tgmichel that removes schema cache task
Browse files Browse the repository at this point in the history
  • Loading branch information
Dinonard committed Jul 6, 2022
1 parent 43845ec commit efb2d22
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 449 deletions.
117 changes: 1 addition & 116 deletions client/rpc/src/eth/cache/mod.rs
Expand Up @@ -17,7 +17,6 @@
// along with this program. If not, see <https://www.gnu.org/licenses/>.

mod lru_cache;
mod tests;

use std::{
collections::{BTreeMap, HashMap},
Expand All @@ -40,7 +39,7 @@ use sp_api::ProvideRuntimeApi;
use sp_blockchain::HeaderBackend;
use sp_runtime::{
generic::BlockId,
traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, UniqueSaturatedInto, Zero},
traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, UniqueSaturatedInto},
};

use fc_rpc_core::types::*;
Expand Down Expand Up @@ -288,120 +287,6 @@ where
BE: Backend<B> + 'static,
BE::State: StateBackend<BlakeTwo256>,
{
/// Task that caches at which substrate hash a new EthereumStorageSchema was inserted in the Runtime Storage.
pub async fn ethereum_schema_cache_task(client: Arc<C>, backend: Arc<fc_db::Backend<B>>) {
if let Ok(None) = frontier_backend_client::load_cached_schema::<B>(backend.as_ref()) {
// Initialize the schema cache at genesis.
let mut cache: Vec<(EthereumStorageSchema, H256)> = Vec::new();
let id = BlockId::Number(Zero::zero());
if let Ok(Some(header)) = client.header(id) {
let genesis_schema_version = frontier_backend_client::onchain_storage_schema::<
B,
C,
BE,
>(client.as_ref(), id);
cache.push((genesis_schema_version, header.hash()));
let _ = frontier_backend_client::write_cached_schema::<B>(backend.as_ref(), cache)
.map_err(|err| {
log::warn!("Error schema cache insert for genesis: {:?}", err);
});
} else {
log::warn!("Error genesis header unreachable");
}
}

// Returns the schema for the given block hash and its parent.
let current_and_parent_schema =
|hash: B::Hash| -> Option<(EthereumStorageSchema, EthereumStorageSchema)> {
let id = BlockId::Hash(hash);
if let Ok(Some(header)) = client.header(id) {
let new_schema = frontier_backend_client::onchain_storage_schema::<B, C, BE>(
client.as_ref(),
id,
);

let parent_hash = header.parent_hash();
let parent_id: BlockId<B> = BlockId::Hash(*parent_hash);
let parent_schema = frontier_backend_client::onchain_storage_schema::<B, C, BE>(
client.as_ref(),
parent_id,
);
return Some((new_schema, parent_schema));
}
None
};

let mut notification_st = client.import_notification_stream();
while let Some(notification) = notification_st.next().await {
let imported_hash = notification.hash;
if let (Some((new_schema, parent_schema)), Ok(Some(old_cache))) = (
current_and_parent_schema(imported_hash),
frontier_backend_client::load_cached_schema::<B>(backend.as_ref()),
) {
let mut new_cache: Vec<(EthereumStorageSchema, H256)> = old_cache.clone();

if new_schema != parent_schema && notification.is_new_best {
// Always update cache on best block if there is a schema change.
new_cache.push((new_schema, imported_hash));
}

// Re-org handling.
if let Some(tree_route) = notification.tree_route {
// Imported block belongs to a re-org.
// First remove the retracted hashes from cache, if any.
let retracted = tree_route
.retracted()
.iter()
.map(|hash_and_number| hash_and_number.hash)
.collect::<Vec<_>>();
let to_remove = old_cache
.iter()
.enumerate()
.filter_map(|(index, (_, hash))| {
if retracted.contains(hash) {
Some(index)
} else {
None
}
})
.collect::<Vec<_>>();
for index in to_remove {
new_cache.remove(index);
}
// Next add if there is a schema change in the branch.
let to_add = tree_route
.enacted()
.iter()
.filter_map(|hash_and_number| {
if let Some((new_schema, parent_schema)) =
current_and_parent_schema(hash_and_number.hash)
{
if new_schema != parent_schema {
return Some((new_schema, hash_and_number.hash));
}
return None;
}
None
})
.collect::<Vec<_>>();
for item in to_add {
new_cache.push(item);
}
}
// Write cache.
if new_cache != old_cache {
let _ = frontier_backend_client::write_cached_schema::<B>(
backend.as_ref(),
new_cache,
)
.map_err(|err| {
log::warn!("Error schema cache insert: {:?}", err);
});
}
}
}
}

pub async fn filter_pool_task(
client: Arc<C>,
filter_pool: Arc<Mutex<BTreeMap<U256, FilterPoolItem>>>,
Expand Down

0 comments on commit efb2d22

Please sign in to comment.