Skip to content

Commit

Permalink
1, re insert the reachability data
Browse files Browse the repository at this point in the history
2, add mutex when dag commiting
  • Loading branch information
jackzhhuang committed Jan 7, 2025
1 parent 47078d4 commit a75c045
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 4 deletions.
15 changes: 12 additions & 3 deletions flexidag/src/blockdag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ use crate::process_key_already_error;
use crate::prune::pruning_point_manager::PruningPointManagerT;
use crate::reachability::ReachabilityError;
use anyhow::{bail, ensure, Ok};
use parking_lot::Mutex;
use rocksdb::WriteBatch;
use starcoin_config::temp_dir;
use starcoin_crypto::{HashValue as Hash, HashValue};
Expand Down Expand Up @@ -54,6 +55,7 @@ pub struct BlockDAG {
pub storage: FlexiDagStorage,
ghostdag_manager: DbGhostdagManager,
pruning_point_manager: PruningPointManager,
commit_lock: Arc<Mutex<FlexiDagStorage>>,
}

impl BlockDAG {
Expand All @@ -75,11 +77,12 @@ impl BlockDAG {
reachability_service.clone(),
);
let pruning_point_manager = PruningPointManager::new(reachability_service, ghostdag_store);

let commit_lock = Arc::new(Mutex::new(db.clone()));
Self {
ghostdag_manager,
storage: db,
pruning_point_manager,
commit_lock,
}
}

Expand Down Expand Up @@ -327,11 +330,12 @@ impl BlockDAG {
);
}

info!("start to commit via batch, header id: {:?}", header.id());

// Create a DB batch writer
let mut batch = WriteBatch::default();

info!("start to commit via batch, header id: {:?}", header.id());
let lock_guard = self.commit_lock.lock();

// lock the dag data to write in batch
// the cache will be written at the same time
// when the batch is written before flush to the disk and
Expand Down Expand Up @@ -410,6 +414,7 @@ impl BlockDAG {
.write_batch(batch)
.expect("failed to write dag data in batch");

drop(lock_guard);
info!("finish writing the batch, head id: {:?}", header.id());

Ok(())
Expand Down Expand Up @@ -469,6 +474,9 @@ impl BlockDAG {
// Create a DB batch writer
let mut batch = WriteBatch::default();

info!("start to commit via batch, header id: {:?}", header.id());
let lock_guard = self.commit_lock.lock();

// lock the dag data to write in batch, read lock.
// the cache will be written at the same time
// when the batch is written before flush to the disk and
Expand Down Expand Up @@ -548,6 +556,7 @@ impl BlockDAG {
.write_batch(batch)
.expect("failed to write dag data in batch");

drop(lock_guard);
info!("finish writing the batch, head id: {:?}", header.id());

Ok(())
Expand Down
24 changes: 23 additions & 1 deletion flexidag/src/reachability/inquirer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,30 @@ fn add_dag_block(
mergeset_iterator: HashIterator,
) -> Result<()> {
// Update the future covering set for blocks in the mergeset
let mut insert_future_set_result: Vec<std::result::Result<(), ReachabilityError>> = Vec::new();
for merged_block in mergeset_iterator {
insert_to_future_covering_set(store, merged_block, new_block)?;
let result = insert_to_future_covering_set(store, merged_block, new_block);
if result.is_err() {
match result {
Err(ReachabilityError::DataInconsistency) => {
// This is a data inconsistency error, which means that the block is already in the future covering set
// of the merged block. This is a serious error, and we should propagate it.
insert_future_set_result.push(Err(ReachabilityError::DataInconsistency));
}
Err(ReachabilityError::HashesNotOrdered) => {
// This is a hashes not ordered error, which means that the merged block is not in the future covering set
// of the new block. This is a serious error, and we should propagate it.
return Err(ReachabilityError::HashesNotOrdered);
}
_ => {
// This is an unexpected error, and we should propagate it.
return result;
}
}
}
}
for result in insert_future_set_result.into_iter() {
result?;
}
Ok(())
}
Expand Down

0 comments on commit a75c045

Please sign in to comment.