Skip to content

Commit

Permalink
Merge pull request #2454 from blockstack/fix/2444-rbf-fallbacks
Browse files Browse the repository at this point in the history
Fix: #2444, RBF fallback logic, and winning_txid fetching
  • Loading branch information
kantai committed Feb 15, 2021
2 parents 176fda1 + bb3171d commit d8dbbca
Show file tree
Hide file tree
Showing 6 changed files with 216 additions and 19 deletions.
15 changes: 14 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,28 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [2.0.6]
## [2.0.6] - 2021-02-15

The database schema has not changed since 2.0.5, so when spinning up a
2.0.6 node from a 2.0.5 chainstate, you do not need to use a fresh
working directory. Earlier versions' chainstate directories are
incompatible, however.

### Fixed

- Miner RBF logic has two "fallback" logic changes. First, if the RBF
logic has increased fees by more than 50%, do not submit a new
transaction. Second, fix the "same chainstate hash" fallback check.
- Winning block txid lookups in the SortitionDB have been corrected
to use the txid during the lookup.
- The miner will no longer attempt to mine a new Stacks block if it receives a
microblock in a discontinuous microblock stream.

## [2.0.5] - 2021-02-12

The database schema has changed since 2.0.4, so when spinning up a 2.0.5
node from an earlier chainstate, you must use a fresh working directory.

### Added

- Miner heuristic for handling relatively large or computationally
Expand Down
11 changes: 7 additions & 4 deletions src/chainstate/burn/db/sortdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3144,21 +3144,24 @@ impl SortitionDB {
consensus_hash: &ConsensusHash,
block_hash: &BlockHeaderHash,
) -> Result<Option<LeaderBlockCommitOp>, db_error> {
let sortition_id = match SortitionDB::get_block_snapshot_consensus(conn, consensus_hash)? {
let (sortition_id, winning_txid) = match SortitionDB::get_block_snapshot_consensus(
conn,
consensus_hash,
)? {
Some(sn) => {
if !sn.pox_valid {
warn!("Consensus hash {:?} corresponds to a sortition that is not on the canonical PoX fork", consensus_hash);
return Err(db_error::InvalidPoxSortition);
}
sn.sortition_id
(sn.sortition_id, sn.winning_block_txid)
}
None => {
return Ok(None);
}
};

let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2";
let args: [&dyn ToSql; 2] = [&sortition_id, &block_hash];
let qry = "SELECT * FROM block_commits WHERE sortition_id = ?1 AND block_header_hash = ?2 AND txid = ?3";
let args: [&dyn ToSql; 3] = [&sortition_id, &block_hash, &winning_txid];
query_row_panic(conn, qry, &args, || {
format!("FATAL: multiple block commits for {}", &block_hash)
})
Expand Down
2 changes: 1 addition & 1 deletion src/chainstate/stacks/db/blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1632,7 +1632,7 @@ impl StacksChainState {
/// order, this method does not check that.
/// The consensus_hash and anchored_block_hash correspond to the _parent_ Stacks block.
/// Microblocks ought to only be stored if they are first confirmed to have been signed.
fn store_staging_microblock<'a>(
pub fn store_staging_microblock<'a>(
tx: &mut DBTx<'a>,
parent_consensus_hash: &ConsensusHash,
parent_anchored_block_hash: &BlockHeaderHash,
Expand Down
43 changes: 31 additions & 12 deletions testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,12 @@ use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_op
#[cfg(test)]
use stacks::chainstate::burn::Opcodes;

/// The number of bitcoin blocks that can have
/// passed since the UTXO cache was last refreshed before
/// the cache is force-reset.
const UTXO_CACHE_STALENESS_LIMIT: u64 = 6;
const DUST_UTXO_LIMIT: u64 = 5500;

pub struct BitcoinRegtestController {
config: Config,
indexer_config: BitcoinIndexerConfig,
Expand Down Expand Up @@ -179,8 +185,6 @@ impl LeaderBlockCommitFees {
}
}

const DUST_UTXO_LIMIT: u64 = 5500;

impl BitcoinRegtestController {
pub fn new(config: Config, coordinator_channel: Option<CoordinatorChannels>) -> Self {
BitcoinRegtestController::with_burnchain(config, coordinator_channel, None)
Expand Down Expand Up @@ -1005,20 +1009,34 @@ impl BitcoinRegtestController {
}
}

// Stop as soon as the fee_rate is 1.25 higher, stop RBF
// Stop as soon as the fee_rate is 1.50 higher, stop RBF
if ongoing_op.fees.fee_rate > (self.config.burnchain.satoshis_per_byte * 150 / 100) {
let res = self.send_block_commit_operation(payload, signer, None, None, None, &vec![]);
return res;
warn!("RBF'd block commits reached 1.5x satoshi per byte fee rate, not resubmitting");
self.ongoing_block_commit = Some(ongoing_op);
return None;
}

// Did a re-org occurred since we fetched our UTXOs
let sortdb = self.sortdb_mut();
let sort_id = SortitionId::stubbed(&ongoing_op.utxos.bhh);
let ic = sortdb.index_conn();
if let Err(e) = SortitionDB::get_block_snapshot(&ic, &sort_id) {
// Did a re-org occurred since we fetched our UTXOs, or are the UTXOs so stale that they should be abandoned?
let mut traversal_depth = 0;
let mut burn_chain_tip = burnchain_db.get_canonical_chain_tip().ok()?;
let mut found_last_mined_at = false;
while traversal_depth < UTXO_CACHE_STALENESS_LIMIT {
if &burn_chain_tip.block_hash == &ongoing_op.utxos.bhh {
found_last_mined_at = true;
break;
}

let parent = burnchain_db
.get_burnchain_block(&burn_chain_tip.parent_block_hash)
.ok()?;
burn_chain_tip = parent.header;
traversal_depth += 1;
}

if !found_last_mined_at {
info!(
"Possible presence of fork, invalidating cached set of UTXOs - {:?}",
e
"Possible presence of fork or stale UTXO cache, invalidating cached set of UTXOs.";
"cached_burn_block_hash" => %ongoing_op.utxos.bhh,
);
let res = self.send_block_commit_operation(payload, signer, None, None, None, &vec![]);
return res;
Expand All @@ -1035,6 +1053,7 @@ impl BitcoinRegtestController {
// Let's start by early returning 1)
if payload == ongoing_op.payload {
info!("Abort attempt to re-submit identical LeaderBlockCommit");
self.ongoing_block_commit = Some(ongoing_op);
return None;
}

Expand Down
10 changes: 10 additions & 0 deletions testnet/stacks-node/src/neon_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,16 @@ fn inner_process_tenure(
) -> Result<bool, ChainstateError> {
let stacks_blocks_processed = coord_comms.get_stacks_blocks_processed();

if StacksChainState::has_stored_block(
&chain_state.db(),
&chain_state.blocks_path,
consensus_hash,
&anchored_block.block_hash(),
)? {
// already processed my tenure
return Ok(true);
}

let ic = burn_db.index_conn();

// Preprocess the anchored block
Expand Down
154 changes: 153 additions & 1 deletion testnet/stacks-node/src/tests/neon_integrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use super::{
make_contract_call, make_contract_publish, make_contract_publish_microblock_only,
make_microblock, make_stacks_transfer_mblock_only, to_addr, ADDR_4, SK_1, SK_2,
};
use stacks::chainstate::burn::ConsensusHash;
use stacks::chainstate::stacks::{
db::StacksChainState, StacksAddress, StacksBlock, StacksBlockHeader, StacksPrivateKey,
StacksPublicKey, StacksTransaction, TransactionPayload,
Expand All @@ -15,6 +14,13 @@ use stacks::vm::database::ClarityDeserializable;
use stacks::vm::execute;
use stacks::vm::types::PrincipalData;
use stacks::vm::Value;
use stacks::{
burnchains::db::BurnchainDB,
chainstate::{
burn::{db::sortdb::SortitionDB, BlockHeaderHash, ConsensusHash},
stacks::{StacksBlockId, StacksMicroblock},
},
};
use stacks::{
burnchains::{Address, Burnchain, PoxConstants},
vm::costs::ExecutionCost,
Expand Down Expand Up @@ -376,6 +382,27 @@ fn get_account<F: std::fmt::Display>(http_origin: &str, account: &F) -> Account
}
}

fn get_chain_tip(http_origin: &str) -> (ConsensusHash, BlockHeaderHash) {
let client = reqwest::blocking::Client::new();
let path = format!("{}/v2/info", http_origin);
let res = client
.get(&path)
.send()
.unwrap()
.json::<serde_json::Value>()
.unwrap();
(
ConsensusHash::from_hex(
res.get("stacks_tip_consensus_hash")
.unwrap()
.as_str()
.unwrap(),
)
.unwrap(),
BlockHeaderHash::from_hex(res.get("stacks_tip").unwrap().as_str().unwrap()).unwrap(),
)
}

#[test]
#[ignore]
fn liquid_ustx_integration() {
Expand Down Expand Up @@ -805,6 +832,131 @@ fn stx_transfer_btc_integration_test() {
channel.stop_chains_coordinator();
}

#[test]
#[ignore]
fn bitcoind_resubmission_test() {
if env::var("BITCOIND_TEST") != Ok("1".into()) {
return;
}

let (mut conf, _miner_account) = neon_integration_test_conf();

let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap();
let spender_addr: PrincipalData = to_addr(&spender_sk).into();

conf.initial_balances.push(InitialBalance {
address: spender_addr.clone(),
amount: 100300,
});

let mut btcd_controller = BitcoinCoreController::new(conf.clone());
btcd_controller
.start_bitcoind()
.map_err(|_e| ())
.expect("Failed starting bitcoind");

let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None);
let http_origin = format!("http://{}", &conf.node.rpc_bind);

btc_regtest_controller.bootstrap_chain(201);

eprintln!("Chain bootstrapped...");

let mut run_loop = neon::RunLoop::new(conf.clone());
let blocks_processed = run_loop.get_blocks_processed_arc();

let channel = run_loop.get_coordinator_channel().unwrap();

thread::spawn(move || run_loop.start(None, 0));

// give the run loop some time to start up!
wait_for_runloop(&blocks_processed);

// first block wakes up the run loop
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

// first block will hold our VRF registration
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

// next block, issue a commit
next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

// let's figure out the current chain tip
let chain_tip = get_chain_tip(&http_origin);

// HACK: this test relies on manually inserting a bad microblock into the chain state.
// this behavior is not guaranteed to continue to work like this, so at some point this
// test will need to be updated to handle that.
{
let (mut chainstate, _) =
StacksChainState::open(false, conf.burnchain.chain_id, &conf.get_chainstate_path())
.unwrap();
let mut tx = chainstate.db_tx_begin().unwrap();

let (consensus_hash, stacks_block) = get_tip_anchored_block(&conf);

// let tip_hash = StacksBlockId::new(&consensus_hash, &stacks_block.header.block_hash());

let ublock_privk =
find_microblock_privkey(&conf, &stacks_block.header.microblock_pubkey_hash, 1024)
.unwrap();

let garbage_tx = make_stacks_transfer_mblock_only(
&spender_sk,
0,
100,
&PrincipalData::from(StacksAddress::burn_address(false)),
1000,
);
let mut garbage_block = StacksMicroblock::first_unsigned(
&chain_tip.1,
vec![StacksTransaction::consensus_deserialize(&mut garbage_tx.as_slice()).unwrap()],
);
garbage_block.header.prev_block = BlockHeaderHash([3; 32]);
garbage_block.header.sequence = 1;
garbage_block.sign(&ublock_privk).unwrap();

eprintln!("Minting microblock at {}/{}", &chain_tip.0, &chain_tip.1);
StacksChainState::store_staging_microblock(
&mut tx,
&consensus_hash,
&stacks_block.header.block_hash(),
&garbage_block,
)
.unwrap();
tx.commit().unwrap();
}

thread::sleep(Duration::from_secs(30));

next_block_and_wait(&mut btc_regtest_controller, &blocks_processed);

let burnchain_db = BurnchainDB::open(
&btc_regtest_controller
.get_burnchain()
.get_burnchaindb_path(),
false,
)
.unwrap();

let burn_tip = burnchain_db.get_canonical_chain_tip().unwrap();
let last_burn_block = burnchain_db
.get_burnchain_block(&burn_tip.block_hash)
.unwrap();

assert_eq!(
last_burn_block.ops.len(),
1,
"Should only have ONE operation in the last burn block"
);

channel.stop_chains_coordinator();
}

#[test]
#[ignore]
fn bitcoind_forking_test() {
Expand Down

0 comments on commit d8dbbca

Please sign in to comment.