X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=912e22abb63d06eccf3f95c8d93a98f5e8ddcb2f;hb=3117e1812b2e448ac1386d7c37b673c99d4ef723;hp=766571cd8a3c74c2dcdae8a9681feae688583325;hpb=70d06b46106a49d1434ac168175859278db92620;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 766571cd..912e22ab 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -20,7 +20,7 @@ use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::cmp::fixed_time_eq; use secp256k1::key::{SecretKey,PublicKey}; -use secp256k1::{Secp256k1,Message}; +use secp256k1::Secp256k1; use secp256k1::ecdh::SharedSecret; use secp256k1; @@ -108,7 +108,7 @@ impl HTLCSource { pub fn dummy() -> Self { HTLCSource::OutboundRoute { route: Route { hops: Vec::new() }, - session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(), + session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, } } @@ -161,6 +161,16 @@ impl MsgHandleErrInternal { } } #[inline] + fn ignore_no_close(err: &'static str) -> Self { + Self { + err: HandleError { + err, + action: Some(msgs::ErrorAction::IgnoreError), + }, + shutdown_finish: None, + } + } + #[inline] fn from_no_close(err: msgs::HandleError) -> Self { Self { err, shutdown_finish: None } } @@ -239,7 +249,7 @@ pub(super) struct ChannelHolder { pub(super) next_forward: Instant, /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about there existing a channel with the short id here, nor the short + /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingForwardHTLCInfo! pub(super) forward_htlcs: HashMap>, /// Note that while this is held in the same mutex as the channels themselves, no consistency @@ -334,7 +344,7 @@ pub struct ChannelManager { /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the -/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). +/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? @@ -381,7 +391,7 @@ pub struct ChannelDetails { } macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr) => { + ($self: ident, $internal: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { @@ -439,17 +449,10 @@ macro_rules! try_chan_entry { } macro_rules! return_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { - if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { let (channel_id, mut chan) = $entry.remove_entry(); @@ -468,7 +471,13 @@ macro_rules! return_monitor_err { return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + if !$resend_commitment { + debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); + } + if !$resend_raa { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); + } + $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); }, } @@ -477,7 +486,7 @@ macro_rules! return_monitor_err { // Does not break in case of TemporaryFailure! macro_rules! maybe_break_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { let (channel_id, mut chan) = $entry.remove_entry(); @@ -487,7 +496,7 @@ macro_rules! maybe_break_monitor_err { break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()); }, } } @@ -733,7 +742,7 @@ impl ChannelManager { let shared_secret = { let mut arr = [0; 32]; - arr.copy_from_slice(&SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); + arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); arr }; let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(&shared_secret); @@ -741,7 +750,7 @@ impl ChannelManager { if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way, - //the hash doesn't really serve any purpuse - in the case of hashing all data, the + //the hash doesn't really serve any purpose - in the case of hashing all data, the //receiving node would have to brute force to figure out which version was put in the //packet by the node that send us the message, in the case of hashing the hop_data, the //node knows the HMAC matched, so they already know what is there... @@ -827,10 +836,10 @@ impl ChannelManager { let mut sha = Sha256::engine(); sha.input(&new_pubkey.serialize()[..]); sha.input(&shared_secret); - SecretKey::from_slice(&self.secp_ctx, &Sha256::from_engine(sha).into_inner()).expect("SHA-256 is broken?") + Sha256::from_engine(sha).into_inner() }; - let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) { + let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { Err(e) } else { Ok(new_pubkey) }; @@ -937,7 +946,7 @@ impl ChannelManager { }; let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); + let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { signature: sig, @@ -1018,7 +1027,7 @@ impl ChannelManager { } { Some((update_add, commitment_signed, chan_monitor)) => { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) // that we will resent the commitment update once we unfree monitor // updating, so we have to take special care that we don't return @@ -1044,7 +1053,7 @@ impl ChannelManager { return Ok(()); }; - match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -1087,7 +1096,7 @@ impl ChannelManager { None => return } }; - match handle_error!(self, res, chan.get_their_node_id()) { + match handle_error!(self, res) { Ok(funding_msg) => { (chan, funding_msg.0, funding_msg.1) }, @@ -1130,7 +1139,7 @@ impl ChannelManager { Ok(res) => res, Err(_) => return None, // Only in case of state precondition violations eg channel is closing }; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); Some(msgs::AnnouncementSignatures { @@ -1143,7 +1152,7 @@ impl ChannelManager { /// Processes HTLCs which are pending waiting on random forward delay. /// - /// Should only really ever be called in response to an PendingHTLCsForwardable event. + /// Should only really ever be called in response to a PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { let _ = self.total_consistency_lock.read().unwrap(); @@ -1248,7 +1257,7 @@ impl ChannelManager { // messages when we can. // We don't need any kind of timer here as they should fail // the channel onto the chain if they can't get our - // update_fail_htlc in time, its not our problem. + // update_fail_htlc in time, it's not our problem. } } }, @@ -1477,7 +1486,7 @@ impl ChannelManager { None => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel already hit the chain and that's - // why its missing. + // why it's missing. return } }; @@ -1487,7 +1496,7 @@ impl ChannelManager { Ok((msgs, monitor_option)) => { if let Some(chan_monitor) = monitor_option { if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... + unimplemented!();// but def don't push the event... } } if let Some((msg, commitment_signed)) = msgs { @@ -1545,7 +1554,7 @@ impl ChannelManager { // knowledge of those gets moved into the appropriate in-memory // ChannelMonitor and they get failed backwards once we get // on-chain confirmations. - // Note I think #198 addresses this, so once its merged a test + // Note I think #198 addresses this, so once it's merged a test // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); @@ -1845,7 +1854,7 @@ impl ChannelManager { // //TODO: There exists a further attack where a node may garble the onion data, forward it to //us repeatedly garbled in different ways, and compare our error messages, which are - //encrypted with the same key. Its not immediately obvious how to usefully exploit that, + //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); @@ -1962,7 +1971,7 @@ impl ChannelManager { let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { @@ -2037,10 +2046,16 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); + if was_frozen_for_monitor { + assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA")); + } else { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures); + } } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2101,7 +2116,7 @@ impl ChannelManager { try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); @@ -2147,7 +2162,7 @@ impl ChannelManager { if commitment_update.is_none() { order = RAACommitmentOrder::RevokeAndACKFirst; } - return_monitor_err!(self, e, channel_state, chan, order); + return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some()); //TODO: Resend the funding_locked if needed once we get the monitor running again } } @@ -2243,7 +2258,7 @@ impl ChannelManager { return Ok(()) }; - match handle_error!(self, err, their_node_id) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -2263,7 +2278,7 @@ impl ChannelManager { impl events::MessageSendEventsProvider for ChannelManager { fn get_and_clear_pending_msg_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2288,7 +2303,7 @@ impl events::MessageSendEventsProvider for ChannelManager { impl events::EventsProvider for ChannelManager { fn get_and_clear_pending_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2429,82 +2444,82 @@ impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_open_channel(their_node_id, msg)) } fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_accept_channel(their_node_id, msg)) } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_created(their_node_id, msg)) } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_signed(their_node_id, msg)) } fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_locked(their_node_id, msg)) } fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_shutdown(their_node_id, msg)) } fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_closing_signed(their_node_id, msg)) } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) } fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) } fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fee(their_node_id, msg)) } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) } fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { @@ -2888,7 +2903,7 @@ pub struct ChannelManagerReadArgs<'a> { /// value.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will - /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This + /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This /// is true for missing channels as well. If there is a monitor missing for which we find /// channel data Err(DecodeError::InvalidValue) will be returned. ///