X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=1e6a865e2d2a9faa9dc3708578c5b47c36bb1cf8;hb=8ec7cebc184ca706ccd34ad51518fec0c818b56e;hp=dcc80382c33a9ea9728ba4eb4a3a3c96d95472d6;hpb=0dd987fc41240d96a13f2011cb0679eb6abdb3b1;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index dcc80382..1e6a865e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -52,7 +52,7 @@ use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureDescriptor}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; @@ -258,7 +258,7 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: Some(channel_id), + chan_id: None, shutdown_finish: None, } } @@ -324,7 +324,7 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: Some(channel_id), + chan_id: None, shutdown_finish: None, } } @@ -824,6 +824,7 @@ macro_rules! handle_error { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. assert!($self.channel_state.try_lock().is_ok()); + assert!($self.pending_events.try_lock().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -836,7 +837,7 @@ macro_rules! handle_error { }); } if let Some(channel_id) = chan_id { - $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, err: ClosureDescriptor::ProcessingError }); + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } }); } } @@ -1446,7 +1447,9 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { + /// `peer_node_id` should be set when we receive a message from a peer, but not set when the user closes, which will be re-exposed as the `ChannelClosed` + /// reason. + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1459,6 +1462,14 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + let mut pending_events_lock = self.pending_events.lock().unwrap(); + if peer_node_id.is_some() { + if let Some(peer_msg) = peer_msg { + pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } }); + } + } else { + pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed }); + } chan.remove_entry().1 } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); @@ -1472,7 +1483,6 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureDescriptor::ForceClosed }); Ok(chan.get_counterparty_node_id()) } @@ -1481,7 +1491,7 @@ impl ChannelMana /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None) { + match self.force_close_channel_with_peer(channel_id, None, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -2425,7 +2435,7 @@ impl ChannelMana if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - // ChannelClosed event is generated by handle_errors for us. + // ChannelClosed event is generated by handle_error for us. Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } @@ -3555,7 +3565,7 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, err: ClosureDescriptor::CooperativeClosure }); + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure }); } Ok(()) } @@ -3967,7 +3977,7 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureDescriptor::UnknownOnchainCommitment }); + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted }); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -4503,7 +4513,7 @@ where msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), err: ClosureDescriptor::UnknownOnchainCommitment }); + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted }); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4694,7 +4704,7 @@ impl msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureDescriptor::DisconnectedPeer }); + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); false } else { true @@ -4709,7 +4719,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureDescriptor::ProcessingError }); + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); return false; } else { no_channels_remain = false; @@ -4800,12 +4810,12 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } else { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } @@ -5668,10 +5678,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -5736,6 +5744,7 @@ mod tests { #[test] fn test_keysend_dup_payment_hash() { + // (1): Test that a keysend payment with a duplicate payment hash to an existing pending // outbound regular payment fails as expected. // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment @@ -5762,10 +5771,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -5803,10 +5810,8 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); - let events = nodes[1].node.get_and_clear_pending_events(); - expect_pending_htlcs_forwardable!(nodes[1], events); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty());