X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=52819526b727232907178c653d17b0c7ba542ea4;hb=96d17ee7370af557330ccd8618648e82a6899ac0;hp=44dee8d4bb4c65af5804ac3a9c3ea46ac7a8c628;hpb=aa0a37cc4bffacddd1ca75c44876b628dd915e44;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 44dee8d4..52819526 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -90,7 +90,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub struct HTLCPreviousHopData { pub(super) short_channel_id: u64, pub(super) htlc_id: u64, @@ -98,7 +98,7 @@ mod channel_held_info { } /// Tracks the inbound corresponding to an outbound HTLC - #[derive(Clone)] + #[derive(Clone, PartialEq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { @@ -679,13 +679,6 @@ impl ChannelManager { for tx in local_txn { self.tx_broadcaster.broadcast_transaction(&tx); } - //TODO: We need to have a way where outbound HTLC claims can result in us claiming the - //now-on-chain HTLC output for ourselves (and, thereafter, passing the HTLC backwards). - //TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and - //may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after - //timeouts are hit and our claims confirm). - //TODO: In any case, we need to make sure we remove any pending htlc tracking (via - //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel } /// Force closes a channel, immediately broadcasting the latest local commitment transaction to @@ -2718,6 +2711,13 @@ impl ChainListener for ChannelManager { for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + { + for htlc_update in self.monitor.fetch_pending_htlc_updated() { + if let Some(preimage) = htlc_update.payment_preimage { + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + } + } + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash(); } @@ -5001,52 +5001,6 @@ mod tests { assert!(nodes[2].node.list_channels().is_empty()); } - #[test] - fn update_fee_async_shutdown() { - // Test update_fee works after shutdown start if messages are delivered out-of-order - let nodes = create_network(2); - let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); - - let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate(); - nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap(); - check_added_monitors!(nodes[0], 1); - let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fulfill_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_some()); - - nodes[1].node.close_channel(&chan_1.2).unwrap(); - let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap(); - // Note that we don't actually test normative behavior here. The spec indicates we could - // actually send a closing_signed here, but is kinda unclear and could possibly be amended - // to require waiting on the full commitment dance before doing so (see - // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid - // ambiguity, we should wait until after the full commitment dance to send closing_signed. - let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap(); - nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap(); - check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap(); - let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true); - - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() { - MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg - }, - _ => panic!("Unexpected event"), - }).unwrap(); - let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap(); - let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); - assert!(node_0_none.is_none()); - } - fn do_test_shutdown_rebroadcast(recv_count: u8) { // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of // messages delivered prior to disconnect @@ -5390,7 +5344,10 @@ mod tests { false } else { true } }); - assert_eq!(res.len(), 2); + assert!(res.len() == 2 || res.len() == 3); + if res.len() == 3 { + assert_eq!(res[1], res[2]); + } } assert!(node_txn.is_empty()); @@ -8475,10 +8432,12 @@ mod tests { _ => panic!("Unexpected event"), } let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 133); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); + check_spends!(revoked_htlc_txn[1], chan_1.3.clone()); // B will generate justice tx from A's revoked commitment/HTLC tx nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1); @@ -8525,7 +8484,8 @@ mod tests { } let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - assert_eq!(revoked_htlc_txn.len(), 2); + assert_eq!(revoked_htlc_txn.len(), 3); + assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]); assert_eq!(revoked_htlc_txn[0].input.len(), 1); assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), 138); check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone()); @@ -8586,8 +8546,9 @@ mod tests { // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[1], 1); - assert_eq!(spend_txn.len(), 1); + assert_eq!(spend_txn.len(), 2); check_spends!(spend_txn[0], node_txn[0].clone()); + check_spends!(spend_txn[1], node_txn[2].clone()); } #[test] @@ -8617,9 +8578,13 @@ mod tests { // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[0], 1); - assert_eq!(spend_txn.len(), 4); + assert_eq!(spend_txn.len(), 8); assert_eq!(spend_txn[0], spend_txn[2]); + assert_eq!(spend_txn[0], spend_txn[4]); + assert_eq!(spend_txn[0], spend_txn[6]); assert_eq!(spend_txn[1], spend_txn[3]); + assert_eq!(spend_txn[1], spend_txn[5]); + assert_eq!(spend_txn[1], spend_txn[7]); check_spends!(spend_txn[0], local_txn[0].clone()); check_spends!(spend_txn[1], node_txn[0].clone()); }