ret
}
- fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
- return self.simple_monitor.fetch_pending_htlc_updated();
+ fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+ return self.simple_monitor.get_and_clear_pending_htlcs_updated();
}
}
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
- for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+ for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
// restart. This is doubly true for the fail/fulfill-backs from monitor events!
{
//TODO: This behavior should be documented.
- for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+ for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
/// forward channel and from which info are needed to update HTLC in a backward channel.
+#[derive(Clone, PartialEq)]
pub struct HTLCUpdate {
pub(super) payment_hash: PaymentHash,
pub(super) payment_preimage: Option<PaymentPreimage>,
pub(super) source: HTLCSource
}
+impl_writeable!(HTLCUpdate, 0, { payment_hash, payment_preimage, source });
/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
/// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
- /// with success or failure backward
- fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate>;
+ /// with success or failure.
+ ///
+ /// You should probably just call through to
+ /// ChannelMonitor::get_and_clear_pending_htlcs_updated() for each ChannelMonitor and return
+ /// the full list.
+ fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate>;
}
/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
chain_monitor: Arc<ChainWatchInterface>,
broadcaster: Arc<BroadcasterInterface>,
pending_events: Mutex<Vec<events::Event>>,
- pending_htlc_updated: Mutex<HashMap<PaymentHash, Vec<(HTLCSource, Option<PaymentPreimage>)>>>,
logger: Arc<Logger>,
fee_estimator: Arc<FeeEstimator>
}
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[u32]) {
let block_hash = header.bitcoin_hash();
let mut new_events: Vec<events::Event> = Vec::with_capacity(0);
- let mut htlc_updated_infos = Vec::new();
{
let mut monitors = self.monitors.lock().unwrap();
for monitor in monitors.values_mut() {
- let (txn_outputs, spendable_outputs, mut htlc_updated) = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
+ let (txn_outputs, spendable_outputs) = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
if spendable_outputs.len() > 0 {
new_events.push(events::Event::SpendableOutputs {
outputs: spendable_outputs,
self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
}
}
- htlc_updated_infos.append(&mut htlc_updated);
- }
- }
- {
- // ChannelManager will just need to fetch pending_htlc_updated and pass state backward
- let mut pending_htlc_updated = self.pending_htlc_updated.lock().unwrap();
- for htlc in htlc_updated_infos.drain(..) {
- match pending_htlc_updated.entry(htlc.2) {
- hash_map::Entry::Occupied(mut e) => {
- // In case of reorg we may have htlc outputs solved in a different way so
- // we prefer to keep claims but don't store duplicate updates for a given
- // (payment_hash, HTLCSource) pair.
- let mut existing_claim = false;
- e.get_mut().retain(|htlc_data| {
- if htlc.0 == htlc_data.0 {
- if htlc_data.1.is_some() {
- existing_claim = true;
- true
- } else { false }
- } else { true }
- });
- if !existing_claim {
- e.get_mut().push((htlc.0, htlc.1));
- }
- }
- hash_map::Entry::Vacant(e) => {
- e.insert(vec![(htlc.0, htlc.1)]);
- }
- }
}
}
let mut pending_events = self.pending_events.lock().unwrap();
chain_monitor,
broadcaster,
pending_events: Mutex::new(Vec::new()),
- pending_htlc_updated: Mutex::new(HashMap::new()),
logger,
fee_estimator: feeest,
};
}
}
- fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
- let mut updated = self.pending_htlc_updated.lock().unwrap();
- let mut pending_htlcs_updated = Vec::with_capacity(updated.len());
- for (k, v) in updated.drain() {
- for htlc_data in v {
- pending_htlcs_updated.push(HTLCUpdate {
- payment_hash: k,
- payment_preimage: htlc_data.1,
- source: htlc_data.0,
- });
- }
+ fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+ let mut pending_htlcs_updated = Vec::new();
+ for chan in self.monitors.lock().unwrap().values_mut() {
+ pending_htlcs_updated.append(&mut chan.get_and_clear_pending_htlcs_updated());
}
pending_htlcs_updated
}
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
+ pending_htlcs_updated: Vec<HTLCUpdate>,
+
destination_script: Script,
// Thanks to data loss protection, we may be able to claim our non-htlc funds
// back, this is the script we have to spend from but we need to
self.current_remote_commitment_number != other.current_remote_commitment_number ||
self.current_local_signed_commitment_tx != other.current_local_signed_commitment_tx ||
self.payment_preimages != other.payment_preimages ||
+ self.pending_htlcs_updated != other.pending_htlcs_updated ||
self.destination_script != other.destination_script ||
self.to_remote_rescue != other.to_remote_rescue ||
self.pending_claim_requests != other.pending_claim_requests ||
writer.write_all(&payment_preimage.0[..])?;
}
+ writer.write_all(&byte_utils::be64_to_array(self.pending_htlcs_updated.len() as u64))?;
+ for data in self.pending_htlcs_updated.iter() {
+ data.write(writer)?;
+ }
+
self.last_block_hash.write(writer)?;
self.destination_script.write(writer)?;
if let Some((ref to_remote_script, ref local_key)) = self.to_remote_rescue {
current_remote_commitment_number: 1 << 48,
payment_preimages: HashMap::new(),
+ pending_htlcs_updated: Vec::new(),
+
destination_script: destination_script,
to_remote_rescue: None,
res
}
+ /// Get the list of HTLCs who's status has been updated on chain. This should be called by
+ /// ChannelManager via ManyChannelMonitor::get_and_clear_pending_htlcs_updated().
+ pub fn get_and_clear_pending_htlcs_updated(&mut self) -> Vec<HTLCUpdate> {
+ let mut ret = Vec::new();
+ mem::swap(&mut ret, &mut self.pending_htlcs_updated);
+ ret
+ }
+
/// Can only fail if idx is < get_min_seen_secret
pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
for i in 0..self.old_secrets.len() {
/// Eventually this should be pub and, roughly, implement ChainListener, however this requires
/// &mut self, as well as returns new spendable outputs and outpoints to watch for spending of
/// on-chain.
- fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>, Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)>) {
+ fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>) {
for tx in txn_matched {
let mut output_val = 0;
for out in tx.output.iter() {
log_trace!(self, "Block {} at height {} connected with {} txn matched", block_hash, height, txn_matched.len());
let mut watch_outputs = Vec::new();
let mut spendable_outputs = Vec::new();
- let mut htlc_updated = Vec::new();
let mut bump_candidates = HashSet::new();
for tx in txn_matched {
if tx.input.len() == 1 {
// While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
// can also be resolved in a few other ways which can have more than one output. Thus,
// we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
- let mut updated = self.is_resolving_htlc_output(&tx, height);
- if updated.len() > 0 {
- htlc_updated.append(&mut updated);
- }
+ self.is_resolving_htlc_output(&tx, height);
// Scan all input to verify is one of the outpoint spent is of interest for us
let mut claimed_outputs_material = Vec::new();
},
OnchainEvent::HTLCUpdate { htlc_update } => {
log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
- htlc_updated.push((htlc_update.0, None, htlc_update.1));
+ self.pending_htlcs_updated.push(HTLCUpdate {
+ payment_hash: htlc_update.1,
+ payment_preimage: None,
+ source: htlc_update.0,
+ });
},
OnchainEvent::ContentiousOutpoint { outpoint, .. } => {
self.claimable_outpoints.remove(&outpoint);
for &(ref txid, ref output_scripts) in watch_outputs.iter() {
self.outputs_to_watch.insert(txid.clone(), output_scripts.iter().map(|o| o.script_pubkey.clone()).collect());
}
- (watch_outputs, spendable_outputs, htlc_updated)
+ (watch_outputs, spendable_outputs)
}
fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator) {
/// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a local
/// or remote commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
- fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32) -> Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)> {
- let mut htlc_updated = Vec::new();
-
+ fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32) {
'outer_loop: for input in &tx.input {
let mut payment_data = None;
let revocation_sig_claim = (input.witness.len() == 3 && HTLCType::scriptlen_to_htlctype(input.witness[2].len()) == Some(HTLCType::OfferedHTLC) && input.witness[1].len() == 33)
let mut payment_preimage = PaymentPreimage([0; 32]);
if accepted_preimage_claim {
payment_preimage.0.copy_from_slice(&input.witness[3]);
- htlc_updated.push((source, Some(payment_preimage), payment_hash));
+ self.pending_htlcs_updated.push(HTLCUpdate {
+ source,
+ payment_preimage: Some(payment_preimage),
+ payment_hash
+ });
} else if offered_preimage_claim {
payment_preimage.0.copy_from_slice(&input.witness[1]);
- htlc_updated.push((source, Some(payment_preimage), payment_hash));
+ self.pending_htlcs_updated.push(HTLCUpdate {
+ source,
+ payment_preimage: Some(payment_preimage),
+ payment_hash
+ });
} else {
log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
}
}
}
- htlc_updated
}
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize coutnerparty onchain) lays on the assumption of claim transactions getting confirmed before timelock expiration
}
}
+ let pending_htlcs_updated_len: u64 = Readable::read(reader)?;
+ let mut pending_htlcs_updated = Vec::with_capacity(cmp::min(pending_htlcs_updated_len as usize, MAX_ALLOC_SIZE / (32 + 8*3)));
+ for _ in 0..pending_htlcs_updated_len {
+ pending_htlcs_updated.push(Readable::read(reader)?);
+ }
+
let last_block_hash: Sha256dHash = Readable::read(reader)?;
let destination_script = Readable::read(reader)?;
let to_remote_rescue = match <u8 as Readable<R>>::read(reader)? {
current_remote_commitment_number,
payment_preimages,
+ pending_htlcs_updated,
destination_script,
to_remote_rescue,
}
}
+macro_rules! expect_payment_failed {
+ ($node: expr, $expected_payment_hash: expr, $rejected_by_dest: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { ref payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(*payment_hash, $expected_payment_hash);
+ assert_eq!(rejected_by_dest, $rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
pub fn send_along_route_with_hash<'a, 'b>(origin_node: &Node<'a, 'b>, route: Route, expected_route: &[&Node<'a, 'b>], recv_value: u64, our_payment_hash: PaymentHash) {
let mut payment_event = {
origin_node.node.send_payment(route, our_payment_hash).unwrap();
mod functional_tests;
#[cfg(test)]
mod chanmon_update_fail_tests;
+#[cfg(test)]
+mod reorg_tests;
--- /dev/null
+//! Further functional tests which test blockchain reorganizations.
+
+use ln::channelmonitor::ANTI_REORG_DELAY;
+use ln::features::InitFeatures;
+use ln::msgs::{ChannelMessageHandler, ErrorAction, HTLCFailChannelUpdate};
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+
+use bitcoin::util::hash::BitcoinHash;
+use bitcoin::blockdata::block::{Block, BlockHeader};
+
+use std::default::Default;
+
+use ln::functional_test_utils::*;
+
+fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) {
+ // Our on-chain HTLC-claim learning has a few properties worth testing:
+ // * If an upstream HTLC is claimed with a preimage (both against our own commitment
+ // transaction our counterparty's), we claim it backwards immediately.
+ // * If an upstream HTLC is claimed with a timeout, we delay ANTI_REORG_DELAY before failing
+ // it backwards to ensure our counterparty can't claim with a preimage in a reorg.
+ //
+ // Here we test both properties in any combination based on the two bools passed in as
+ // arguments.
+ //
+ // If local_commitment is set, we first broadcast a local commitment containing an offered HTLC
+ // and an HTLC-Timeout tx, otherwise we broadcast a remote commitment containing a received
+ // HTLC and a local HTLC-Timeout tx spending it.
+ //
+ // We then either allow these transactions to confirm (if !claim) or we wait until one block
+ // before they otherwise would and reorg them out, confirming an HTLC-Success tx instead.
+ let node_cfgs = create_node_cfgs(3);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
+
+ let (our_payment_preimage, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+
+ // Provide preimage to node 2 by claiming payment
+ nodes[2].node.claim_funds(our_payment_preimage, 1000000);
+ check_added_monitors!(nodes[2], 1);
+ get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+
+ let mut headers = Vec::new();
+ let mut header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let claim_txn = if local_commitment {
+ // Broadcast node 1 commitment txn to broadcast the HTLC-Timeout
+ let node_1_commitment_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+ assert_eq!(node_1_commitment_txn.len(), 2); // 1 local commitment tx, 1 Outbound HTLC-Timeout
+ assert_eq!(node_1_commitment_txn[0].output.len(), 2); // to-self and Offered HTLC (to-remote/to-node-3 is dust)
+ check_spends!(node_1_commitment_txn[0], chan_2.3);
+ check_spends!(node_1_commitment_txn[1], node_1_commitment_txn[0].clone());
+
+ // Give node 2 node 1's transactions and get its response (claiming the HTLC instead).
+ nodes[2].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+ check_closed_broadcast!(nodes[2], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+ let node_2_commitment_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_2_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Claim, ChannelManger: 1 local commitment tx, 1 Received HTLC-Claim
+ assert_eq!(node_2_commitment_txn[1].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
+ check_spends!(node_2_commitment_txn[1], chan_2.3);
+ check_spends!(node_2_commitment_txn[2], node_2_commitment_txn[1].clone());
+ check_spends!(node_2_commitment_txn[0], node_1_commitment_txn[0]);
+
+ // Confirm node 1's commitment txn (and HTLC-Timeout) on node 1
+ nodes[1].block_notifier.block_connected(&Block { header, txdata: node_1_commitment_txn.clone() }, CHAN_CONFIRM_DEPTH + 1);
+
+ // ...but return node 1's commitment tx in case claim is set and we're preparing to reorg
+ vec![node_1_commitment_txn[0].clone(), node_2_commitment_txn[0].clone()]
+ } else {
+ // Broadcast node 2 commitment txn
+ let node_2_commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get_mut(&chan_2.2).unwrap().channel_monitor().get_latest_local_commitment_txn();
+ assert_eq!(node_2_commitment_txn.len(), 2); // 1 local commitment tx, 1 Received HTLC-Claim
+ assert_eq!(node_2_commitment_txn[0].output.len(), 2); // to-remote and Received HTLC (to-self is dust)
+ check_spends!(node_2_commitment_txn[0], chan_2.3);
+ check_spends!(node_2_commitment_txn[1], node_2_commitment_txn[0].clone());
+
+ // Give node 1 node 2's commitment transaction and get its response (timing the HTLC out)
+ nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+ let node_1_commitment_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_1_commitment_txn.len(), 3); // ChannelMonitor: 1 offered HTLC-Timeout, ChannelManger: 1 local commitment tx, 1 Offered HTLC-Timeout
+ assert_eq!(node_1_commitment_txn[1].output.len(), 2); // to-local and Offered HTLC (to-remote is dust)
+ check_spends!(node_1_commitment_txn[1], chan_2.3);
+ check_spends!(node_1_commitment_txn[2], node_1_commitment_txn[1].clone());
+ check_spends!(node_1_commitment_txn[0], node_2_commitment_txn[0]);
+
+ // Confirm node 2's commitment txn (and node 1's HTLC-Timeout) on node 1
+ nodes[1].block_notifier.block_connected(&Block { header, txdata: vec![node_2_commitment_txn[0].clone(), node_1_commitment_txn[0].clone()] }, CHAN_CONFIRM_DEPTH + 1);
+ // ...but return node 2's commitment tx (and claim) in case claim is set and we're preparing to reorg
+ node_2_commitment_txn
+ };
+ check_closed_broadcast!(nodes[1], false); // We should get a BroadcastChannelUpdate (and *only* a BroadcstChannelUpdate)
+ headers.push(header.clone());
+ // At CHAN_CONFIRM_DEPTH + 1 we have a confirmation count of 1, so CHAN_CONFIRM_DEPTH +
+ // ANTI_REORG_DELAY - 1 will give us a confirmation count of ANTI_REORG_DELAY - 1.
+ for i in CHAN_CONFIRM_DEPTH + 2..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].block_notifier.block_connected_checked(&header, i, &vec![], &[0; 0]);
+ headers.push(header.clone());
+ }
+ check_added_monitors!(nodes[1], 0);
+ assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+
+ if claim {
+ // Now reorg back to CHAN_CONFIRM_DEPTH and confirm node 2's broadcasted transactions:
+ for (height, header) in (CHAN_CONFIRM_DEPTH + 1..CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY - 1).zip(headers.iter()).rev() {
+ nodes[1].block_notifier.block_disconnected(&header, height);
+ }
+
+ header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].block_notifier.block_connected(&Block { header, txdata: claim_txn }, CHAN_CONFIRM_DEPTH + 1);
+
+ // ChannelManager only polls ManyChannelMonitor::get_and_clear_pending_htlcs_updated when we
+ // probe it for events, so we probe non-message events here (which should still end up empty):
+ assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0);
+ } else {
+ // Confirm the timeout tx and check that we fail the HTLC backwards
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].block_notifier.block_connected_checked(&header, CHAN_CONFIRM_DEPTH + ANTI_REORG_DELAY, &vec![], &[0; 0]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ }
+
+ check_added_monitors!(nodes[1], 1);
+ // Which should result in an immediate claim/fail of the HTLC:
+ let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ if claim {
+ assert_eq!(htlc_updates.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fulfill_htlcs[0]);
+ } else {
+ assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]);
+ }
+ commitment_signed_dance!(nodes[0], nodes[1], htlc_updates.commitment_signed, false, true);
+ if claim {
+ expect_payment_sent!(nodes[0], our_payment_preimage);
+ } else {
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let MessageSendEvent::PaymentFailureNetworkUpdate { update: HTLCFailChannelUpdate::ChannelClosed { ref is_permanent, .. } } = events[0] {
+ assert!(is_permanent);
+ } else { panic!("Unexpected event!"); }
+ expect_payment_failed!(nodes[0], our_payment_hash, false);
+ }
+}
+
+#[test]
+fn test_onchain_htlc_claim_reorg_local_commitment() {
+ do_test_onchain_htlc_reorg(true, true);
+}
+#[test]
+fn test_onchain_htlc_timeout_delay_local_commitment() {
+ do_test_onchain_htlc_reorg(true, false);
+}
+#[test]
+fn test_onchain_htlc_claim_reorg_remote_commitment() {
+ do_test_onchain_htlc_reorg(false, true);
+}
+#[test]
+fn test_onchain_htlc_timeout_delay_remote_commitment() {
+ do_test_onchain_htlc_reorg(false, false);
+}
self.update_ret.lock().unwrap().clone()
}
- fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
- return self.simple_monitor.fetch_pending_htlc_updated();
+ fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
+ return self.simple_monitor.get_and_clear_pending_htlcs_updated();
}
}