X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=912e22abb63d06eccf3f95c8d93a98f5e8ddcb2f;hb=3117e1812b2e448ac1386d7c37b673c99d4ef723;hp=03d7e0866f1de10208b5ef04ac39f4d52f09f6f9;hpb=04ff26efa9c31c0543b17422f6f2bde1d0413d21;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 03d7e086..912e22ab 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -20,7 +20,7 @@ use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::cmp::fixed_time_eq; use secp256k1::key::{SecretKey,PublicKey}; -use secp256k1::{Secp256k1,Message}; +use secp256k1::Secp256k1; use secp256k1::ecdh::SharedSecret; use secp256k1; @@ -108,7 +108,7 @@ impl HTLCSource { pub fn dummy() -> Self { HTLCSource::OutboundRoute { route: Route { hops: Vec::new() }, - session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(), + session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, } } @@ -161,6 +161,16 @@ impl MsgHandleErrInternal { } } #[inline] + fn ignore_no_close(err: &'static str) -> Self { + Self { + err: HandleError { + err, + action: Some(msgs::ErrorAction::IgnoreError), + }, + shutdown_finish: None, + } + } + #[inline] fn from_no_close(err: msgs::HandleError) -> Self { Self { err, shutdown_finish: None } } @@ -208,13 +218,16 @@ impl MsgHandleErrInternal { /// probably increase this significantly. const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50; -pub(super) struct HTLCForwardInfo { - prev_short_channel_id: u64, - prev_htlc_id: u64, - #[cfg(test)] - pub(super) forward_info: PendingForwardHTLCInfo, - #[cfg(not(test))] - forward_info: PendingForwardHTLCInfo, +pub(super) enum HTLCForwardInfo { + AddHTLC { + prev_short_channel_id: u64, + prev_htlc_id: u64, + forward_info: PendingForwardHTLCInfo, + }, + FailHTLC { + htlc_id: u64, + err_packet: msgs::OnionErrorPacket, + }, } /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should @@ -236,7 +249,7 @@ pub(super) struct ChannelHolder { pub(super) next_forward: Instant, /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about there existing a channel with the short id here, nor the short + /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingForwardHTLCInfo! pub(super) forward_htlcs: HashMap>, /// Note that while this is held in the same mutex as the channels themselves, no consistency @@ -331,7 +344,7 @@ pub struct ChannelManager { /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the -/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). +/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? @@ -378,7 +391,7 @@ pub struct ChannelDetails { } macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr) => { + ($self: ident, $internal: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { @@ -436,17 +449,10 @@ macro_rules! try_chan_entry { } macro_rules! return_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { - if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { let (channel_id, mut chan) = $entry.remove_entry(); @@ -465,7 +471,13 @@ macro_rules! return_monitor_err { return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); + if !$resend_commitment { + debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); + } + if !$resend_raa { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); + } + $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); }, } @@ -474,7 +486,7 @@ macro_rules! return_monitor_err { // Does not break in case of TemporaryFailure! macro_rules! maybe_break_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { let (channel_id, mut chan) = $entry.remove_entry(); @@ -484,7 +496,7 @@ macro_rules! maybe_break_monitor_err { break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()); }, } } @@ -730,7 +742,7 @@ impl ChannelManager { let shared_secret = { let mut arr = [0; 32]; - arr.copy_from_slice(&SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); + arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]); arr }; let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(&shared_secret); @@ -738,7 +750,7 @@ impl ChannelManager { if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way, - //the hash doesn't really serve any purpuse - in the case of hashing all data, the + //the hash doesn't really serve any purpose - in the case of hashing all data, the //receiving node would have to brute force to figure out which version was put in the //packet by the node that send us the message, in the case of hashing the hop_data, the //node knows the HMAC matched, so they already know what is there... @@ -824,10 +836,10 @@ impl ChannelManager { let mut sha = Sha256::engine(); sha.input(&new_pubkey.serialize()[..]); sha.input(&shared_secret); - SecretKey::from_slice(&self.secp_ctx, &Sha256::from_engine(sha).into_inner()).expect("SHA-256 is broken?") + Sha256::from_engine(sha).into_inner() }; - let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) { + let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { Err(e) } else { Ok(new_pubkey) }; @@ -934,7 +946,7 @@ impl ChannelManager { }; let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); + let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { signature: sig, @@ -1015,7 +1027,7 @@ impl ChannelManager { } { Some((update_add, commitment_signed, chan_monitor)) => { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) // that we will resent the commitment update once we unfree monitor // updating, so we have to take special care that we don't return @@ -1041,7 +1053,7 @@ impl ChannelManager { return Ok(()); }; - match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -1084,7 +1096,7 @@ impl ChannelManager { None => return } }; - match handle_error!(self, res, chan.get_their_node_id()) { + match handle_error!(self, res) { Ok(funding_msg) => { (chan, funding_msg.0, funding_msg.1) }, @@ -1127,7 +1139,7 @@ impl ChannelManager { Ok(res) => res, Err(_) => return None, // Only in case of state precondition violations eg channel is closing }; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); Some(msgs::AnnouncementSignatures { @@ -1140,7 +1152,7 @@ impl ChannelManager { /// Processes HTLCs which are pending waiting on random forward delay. /// - /// Should only really ever be called in response to an PendingHTLCsForwardable event. + /// Should only really ever be called in response to a PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { let _ = self.total_consistency_lock.read().unwrap(); @@ -1161,13 +1173,23 @@ impl ChannelManager { Some(chan_id) => chan_id.clone(), None => { failed_forwards.reserve(pending_forwards.len()); - for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None)); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None)); + }, + HTLCForwardInfo::FailHTLC { .. } => { + // Channel went away before we could fail it. This implies + // the channel is now on chain and our counterparty is + // trying to broadcast the HTLC-Timeout, but that's their + // problem, not ours. + } + } } continue; } @@ -1175,36 +1197,74 @@ impl ChannelManager { let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap(); let mut add_htlc_msgs = Vec::new(); - for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }); - match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { - Err(_e) => { - let chan_update = self.get_channel_update(forward_chan).unwrap(); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); - continue; + let mut fail_htlc_msgs = Vec::new(); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let chan_update = self.get_channel_update(forward_chan).unwrap(); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + continue; + }, + Ok(update_add) => { + match update_add { + Some(msg) => { add_htlc_msgs.push(msg); }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } + } + } + } }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + match forward_chan.get_update_fail_htlc(htlc_id, err_packet) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(None) => { // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } } - } + }, } } - if !add_htlc_msgs.is_empty() { + if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { let (commitment_msg, monitor) = match forward_chan.send_commitment() { Ok(res) => res, Err(e) => { @@ -1223,7 +1283,7 @@ impl ChannelManager { updates: msgs::CommitmentUpdate { update_add_htlcs: add_htlc_msgs, update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, update_fail_malformed_htlcs: Vec::new(), update_fee: None, commitment_signed: commitment_msg, @@ -1231,20 +1291,27 @@ impl ChannelManager { }); } } else { - for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) { - let prev_hop_data = HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }; - match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { - hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), - hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, - }; - new_events.push(events::Event::PaymentReceived { - payment_hash: forward_info.payment_hash, - amt: forward_info.amt_to_forward, - }); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + let prev_hop_data = HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }; + match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { + hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), + hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, + }; + new_events.push(events::Event::PaymentReceived { + payment_hash: forward_info.payment_hash, + amt: forward_info.amt_to_forward, + }); + }, + HTLCForwardInfo::FailHTLC { .. } => { + panic!("Got pending fail of our own HTLC"); + } + } } } } @@ -1289,6 +1356,10 @@ impl ChannelManager { /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can + //identify whether we sent it or not based on the (I presume) very different runtime + //between the branches here. We should make this async and move it into the forward HTLCs + //timer handling. match source { HTLCSource::OutboundRoute { ref route, .. } => { log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@ -1353,36 +1424,25 @@ impl ChannelManager { } }; - let channel_state = channel_state_lock.borrow_parts(); - - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => return - }; - - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) { - Ok(Some((msg, commitment_msg, chan_monitor))) => { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); - } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: vec![msg], - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); - }, - Ok(None) => {}, - Err(_e) => { - //TODO: Do something with e? - return; + let mut forward_event = None; + if channel_state_lock.forward_htlcs.is_empty() { + forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); + channel_state_lock.next_forward = forward_event.unwrap(); + } + match channel_state_lock.forward_htlcs.entry(short_channel_id) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); + } + } + mem::drop(channel_state_lock); + if let Some(time) = forward_event { + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PendingHTLCsForwardable { + time_forwardable: time + }); } }, } @@ -1426,7 +1486,7 @@ impl ChannelManager { None => { // TODO: There is probably a channel manager somewhere that needs to // learn the preimage as the channel already hit the chain and that's - // why its missing. + // why it's missing. return } }; @@ -1436,7 +1496,7 @@ impl ChannelManager { Ok((msgs, monitor_option)) => { if let Some(chan_monitor) = monitor_option { if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... + unimplemented!();// but def don't push the event... } } if let Some((msg, commitment_signed)) = msgs { @@ -1494,7 +1554,7 @@ impl ChannelManager { // knowledge of those gets moved into the appropriate in-memory // ChannelMonitor and they get failed backwards once we get // on-chain confirmations. - // Note I think #198 addresses this, so once its merged a test + // Note I think #198 addresses this, so once it's merged a test // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); @@ -1794,7 +1854,7 @@ impl ChannelManager { // //TODO: There exists a further attack where a node may garble the onion data, forward it to //us repeatedly garbled in different ways, and compare our error messages, which are - //encrypted with the same key. Its not immediately obvious how to usefully exploit that, + //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); @@ -1911,7 +1971,7 @@ impl ChannelManager { let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { @@ -1956,10 +2016,10 @@ impl ChannelManager { for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }); + entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }); }, hash_map::Entry::Vacant(entry) => { - entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info })); + entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info })); } } } @@ -1986,10 +2046,16 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); + if was_frozen_for_monitor { + assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA")); + } else { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures); + } } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2050,7 +2116,7 @@ impl ChannelManager { try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); @@ -2096,7 +2162,7 @@ impl ChannelManager { if commitment_update.is_none() { order = RAACommitmentOrder::RevokeAndACKFirst; } - return_monitor_err!(self, e, channel_state, chan, order); + return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some()); //TODO: Resend the funding_locked if needed once we get the monitor running again } } @@ -2192,7 +2258,7 @@ impl ChannelManager { return Ok(()) }; - match handle_error!(self, err, their_node_id) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -2212,7 +2278,7 @@ impl ChannelManager { impl events::MessageSendEventsProvider for ChannelManager { fn get_and_clear_pending_msg_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2237,7 +2303,7 @@ impl events::MessageSendEventsProvider for ChannelManager { impl events::EventsProvider for ChannelManager { fn get_and_clear_pending_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2378,82 +2444,82 @@ impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_open_channel(their_node_id, msg)) } fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_accept_channel(their_node_id, msg)) } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_created(their_node_id, msg)) } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_signed(their_node_id, msg)) } fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_locked(their_node_id, msg)) } fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_shutdown(their_node_id, msg)) } fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_closing_signed(their_node_id, msg)) } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) } fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) } fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fee(their_node_id, msg)) } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) } fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { @@ -2561,12 +2627,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for PendingForwardHTLCInfo { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - if let &Some(ref onion) = &self.onion_packet { - 1u8.write(writer)?; - onion.write(writer)?; - } else { - 0u8.write(writer)?; - } + self.onion_packet.write(writer)?; self.incoming_shared_secret.write(writer)?; self.payment_hash.write(writer)?; self.short_channel_id.write(writer)?; @@ -2578,13 +2639,8 @@ impl Writeable for PendingForwardHTLCInfo { impl Readable for PendingForwardHTLCInfo { fn read(reader: &mut R) -> Result { - let onion_packet = match >::read(reader)? { - 0 => None, - 1 => Some(msgs::OnionPacket::read(reader)?), - _ => return Err(DecodeError::InvalidValue), - }; Ok(PendingForwardHTLCInfo { - onion_packet, + onion_packet: Readable::read(reader)?, incoming_shared_secret: Readable::read(reader)?, payment_hash: Readable::read(reader)?, short_channel_id: Readable::read(reader)?, @@ -2714,11 +2770,41 @@ impl Readable for HTLCFailReason { } } -impl_writeable!(HTLCForwardInfo, 0, { - prev_short_channel_id, - prev_htlc_id, - forward_info -}); +impl Writeable for HTLCForwardInfo { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + match self { + &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_htlc_id, ref forward_info } => { + 0u8.write(writer)?; + prev_short_channel_id.write(writer)?; + prev_htlc_id.write(writer)?; + forward_info.write(writer)?; + }, + &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => { + 1u8.write(writer)?; + htlc_id.write(writer)?; + err_packet.write(writer)?; + }, + } + Ok(()) + } +} + +impl Readable for HTLCForwardInfo { + fn read(reader: &mut R) -> Result { + match >::read(reader)? { + 0 => Ok(HTLCForwardInfo::AddHTLC { + prev_short_channel_id: Readable::read(reader)?, + prev_htlc_id: Readable::read(reader)?, + forward_info: Readable::read(reader)?, + }), + 1 => Ok(HTLCForwardInfo::FailHTLC { + htlc_id: Readable::read(reader)?, + err_packet: Readable::read(reader)?, + }), + _ => Err(DecodeError::InvalidValue), + } + } +} impl Writeable for ChannelManager { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { @@ -2817,7 +2903,7 @@ pub struct ChannelManagerReadArgs<'a> { /// value.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will - /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This + /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This /// is true for missing channels as well. If there is a monitor missing for which we find /// channel data Err(DecodeError::InvalidValue) will be returned. ///