X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=src%2Fln%2Fchannelmanager.rs;h=64a9f2ebb8e575c3895d77efd3ab4eaa1b82ccf0;hb=963f002056d86365447dc1ce244251a449df5b6d;hp=f3d51e4bb1d3d841cd8049177a460d96b13e78fd;hpb=12d25576c1661c09c6ece239d286dd3ab6380cd5;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index f3d51e4b..64a9f2eb 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -12,22 +12,23 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; +use bitcoin::util::hash::BitcoinHash; use bitcoin_hashes::{Hash, HashEngine}; use bitcoin_hashes::hmac::{Hmac, HmacEngine}; use bitcoin_hashes::sha256::Hash as Sha256; +use bitcoin_hashes::sha256d::Hash as Sha256dHash; use bitcoin_hashes::cmp::fixed_time_eq; use secp256k1::key::{SecretKey,PublicKey}; -use secp256k1::{Secp256k1,Message}; +use secp256k1::Secp256k1; use secp256k1::ecdh::SharedSecret; use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::router::Route; use ln::msgs; use ln::onion_utils; @@ -161,6 +162,16 @@ impl MsgHandleErrInternal { } } #[inline] + fn ignore_no_close(err: &'static str) -> Self { + Self { + err: HandleError { + err, + action: Some(msgs::ErrorAction::IgnoreError), + }, + shutdown_finish: None, + } + } + #[inline] fn from_no_close(err: msgs::HandleError) -> Self { Self { err, shutdown_finish: None } } @@ -239,13 +250,15 @@ pub(super) struct ChannelHolder { pub(super) next_forward: Instant, /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about there existing a channel with the short id here, nor the short + /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingForwardHTLCInfo! pub(super) forward_htlcs: HashMap>, + /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and + /// can be failed/claimed by the user /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - pub(super) claimable_htlcs: HashMap>, + pub(super) claimable_htlcs: HashMap>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -255,7 +268,7 @@ pub(super) struct MutChannelHolder<'a> { pub(super) short_to_id: &'a mut HashMap, pub(super) next_forward: &'a mut Instant, pub(super) forward_htlcs: &'a mut HashMap>, - pub(super) claimable_htlcs: &'a mut HashMap>, + pub(super) claimable_htlcs: &'a mut HashMap>, pub(super) pending_msg_events: &'a mut Vec, } impl ChannelHolder { @@ -334,24 +347,25 @@ pub struct ChannelManager { /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out /// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the -/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more). +/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more). const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + -// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within -// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it -// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel -// on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, +// ie that if the next-hop peer fails the HTLC within +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, +// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and +// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before +// LATENCY_GRACE_PERIOD_BLOCKS. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; macro_rules! secp_call { ( $res: expr, $err: expr ) => { @@ -381,7 +395,7 @@ pub struct ChannelDetails { } macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr) => { + ($self: ident, $internal: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { @@ -438,20 +452,14 @@ macro_rules! try_chan_entry { } } -macro_rules! return_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new()) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $raa_first_dropped_cs: expr) => { - if $action_type != RAACommitmentOrder::RevokeAndACKFirst { panic!("Bad return_monitor_err call!"); } - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, Vec::new(), Vec::new(), $raa_first_dropped_cs) +macro_rules! handle_monitor_err { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $failed_forwards, $failed_fails, false) - }; - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $failed_forwards: expr, $failed_fails: expr, $raa_first_dropped_cs: expr) => { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { + log_error!($self, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..])); let (channel_id, mut chan) = $entry.remove_entry(); if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); @@ -465,30 +473,52 @@ macro_rules! return_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())); + res }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $failed_forwards, $failed_fails, $raa_first_dropped_cs); - return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + log_info!($self, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", + log_bytes!($entry.key()[..]), + if $resend_commitment && $resend_raa { + match $action_type { + RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" }, + RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" }, + } + } else if $resend_commitment { "commitment" } + else if $resend_raa { "RAA" } + else { "nothing" }, + (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(), + (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len()); + if !$resend_commitment { + debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); + } + if !$resend_raa { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); + } + $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) }, } } } +macro_rules! return_monitor_err { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment); + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + } +} + // Does not break in case of TemporaryFailure! macro_rules! maybe_break_monitor_err { - ($self: expr, $err: expr, $channel_state: expr, $entry: expr, $action_type: path) => { - match $err { - ChannelMonitorUpdateErr::PermanentFailure => { - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } - break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, Vec::new(), Vec::new(), false); + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) { + (e, ChannelMonitorUpdateErr::PermanentFailure) => { + break e; }, + (_, ChannelMonitorUpdateErr::TemporaryFailure) => { }, } } } @@ -741,7 +771,7 @@ impl ChannelManager { if msg.onion_routing_packet.version != 0 { //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way, - //the hash doesn't really serve any purpuse - in the case of hashing all data, the + //the hash doesn't really serve any purpose - in the case of hashing all data, the //receiving node would have to brute force to figure out which version was put in the //packet by the node that send us the message, in the case of hashing the hop_data, the //node knows the HMAC matched, so they already know what is there... @@ -791,7 +821,7 @@ impl ChannelManager { let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! // final_expiry_too_soon - if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } // final_incorrect_htlc_amount @@ -883,8 +913,8 @@ impl ChannelManager { break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -936,8 +966,8 @@ impl ChannelManager { excess_data: Vec::new(), }; - let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); - let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); + let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); + let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { signature: sig, @@ -1018,7 +1048,7 @@ impl ChannelManager { } { Some((update_add, commitment_signed, chan_monitor)) => { if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst); + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) // that we will resent the commitment update once we unfree monitor // updating, so we have to take special care that we don't return @@ -1044,7 +1074,7 @@ impl ChannelManager { return Ok(()); }; - match handle_error!(self, err, route.hops.first().unwrap().pubkey) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -1087,7 +1117,7 @@ impl ChannelManager { None => return } }; - match handle_error!(self, res, chan.get_their_node_id()) { + match handle_error!(self, res) { Ok(funding_msg) => { (chan, funding_msg.0, funding_msg.1) }, @@ -1130,7 +1160,7 @@ impl ChannelManager { Ok(res) => res, Err(_) => return None, // Only in case of state precondition violations eg channel is closing }; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); Some(msgs::AnnouncementSignatures { @@ -1143,13 +1173,14 @@ impl ChannelManager { /// Processes HTLCs which are pending waiting on random forward delay. /// - /// Should only really ever be called in response to an PendingHTLCsForwardable event. + /// Should only really ever be called in response to a PendingHTLCsForwardable event. /// Will likely generate further events. pub fn process_pending_htlc_forwards(&self) { let _ = self.total_consistency_lock.read().unwrap(); let mut new_events = Vec::new(); let mut failed_forwards = Vec::new(); + let mut handle_errors = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1185,101 +1216,104 @@ impl ChannelManager { continue; } }; - let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap(); - - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { - log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }); - match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let chan_update = self.get_channel_update(forward_chan).unwrap(); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { + let mut add_htlc_msgs = Vec::new(); + let mut fail_htlc_msgs = Vec::new(); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let chan_update = self.get_channel_update(chan.get()).unwrap(); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + continue; + }, + Ok(update_add) => { + match update_add { + Some(msg) => { add_htlc_msgs.push(msg); }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } } } } - } - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); - match forward_chan.get_update_fail_htlc(htlc_id, err_packet) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(None) => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, its not our problem. } - } - }, + }, + } } - } - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor) = match forward_chan.send_commitment() { - Ok(res) => res, - Err(e) => { - if let ChannelError::Ignore(_) = e { - panic!("Stated return value requirements in send_commitment() were not met"); - } - //TODO: Handle...this is bad! + if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { + let (commitment_msg, monitor) = match chan.get_mut().send_commitment() { + Ok(res) => res, + Err(e) => { + if let ChannelError::Ignore(_) = e { + panic!("Stated return value requirements in send_commitment() were not met"); + } + //TODO: Handle...this is bad! + continue; + }, + }; + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; - }, - }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: forward_chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); + } else { + unreachable!(); } } else { for forward_info in pending_forwards.drain(..) { @@ -1291,8 +1325,8 @@ impl ChannelManager { incoming_packet_shared_secret: forward_info.incoming_shared_secret, }; match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { - hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), - hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, + hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)), + hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); }, }; new_events.push(events::Event::PaymentReceived { payment_hash: forward_info.payment_hash, @@ -1315,26 +1349,43 @@ impl ChannelManager { }; } + for (their_node_id, err) in handle_errors.drain(..) { + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); + } + }, + } + } + if new_events.is_empty() { return } let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_events); } /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect - /// after a PaymentReceived event. - /// expected_value is the value you expected the payment to be for (not the amount it actually - /// was for from the PaymentReceived event). - pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, expected_value: u64) -> bool { + /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources + /// along the path (including in our own channel on which we received it). + /// Returns false if no payment was found to fail backwards, true if the process of failing the + /// HTLC backwards has been started. + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool { let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); if let Some(mut sources) = removed_source { - for htlc_with_hash in sources.drain(..) { + for (recvd_value, htlc_with_hash) in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(expected_value).to_vec() }); + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() }); } true } else { false } @@ -1452,7 +1503,10 @@ impl ChannelManager { let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); if let Some(mut sources) = removed_source { - for htlc_with_hash in sources.drain(..) { + // TODO: We should require the user specify the expected amount so that we can claim + // only payments for the correct amount, and reject payments for incorrect amounts + // (which are probably middle nodes probing to break our privacy). + for (_, htlc_with_hash) in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage); } @@ -1460,56 +1514,79 @@ impl ChannelManager { } else { false } } fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage) { - match source { - HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state_lock); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); - }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { - //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = channel_state_lock.borrow_parts(); - - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel already hit the chain and that's - // why its missing. - return - } - }; + let (their_node_id, err) = loop { + match source { + HTLCSource::OutboundRoute { .. } => { + mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentSent { + payment_preimage + }); + }, + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { + //TODO: Delay the claimed_funds relaying just like we do outbound relay! + let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok((msgs, monitor_option)) => { - if let Some(chan_monitor) = monitor_option { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def dont push the event... - } + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel already hit the chain and that's + // why it's missing. + return } - if let Some((msg, commitment_signed)) = msgs { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, + }; + + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { + Ok((msgs, monitor_option)) => { + if let Some(chan_monitor) = monitor_option { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + if was_frozen_for_monitor { + assert!(msgs.is_none()); + } else { + break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some())); + } + } } - }); + if let Some((msg, commitment_signed)) = msgs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + }, + Err(_e) => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel may be about to hit the chain. + //TODO: Do something with e? + return + }, } - }, - Err(_e) => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel may be about to hit the chain. - //TODO: Do something with e? - return - }, + } else { unreachable!(); } + }, + } + return; + }; + + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); } }, } @@ -1545,7 +1622,7 @@ impl ChannelManager { // knowledge of those gets moved into the appropriate in-memory // ChannelMonitor and they get failed backwards once we get // on-chain confirmations. - // Note I think #198 addresses this, so once its merged a test + // Note I think #198 addresses this, so once it's merged a test // should be written. if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); @@ -1845,7 +1922,7 @@ impl ChannelManager { // //TODO: There exists a further attack where a node may garble the onion data, forward it to //us repeatedly garbled in different ways, and compare our error messages, which are - //encrypted with the same key. Its not immediately obvious how to usefully exploit that, + //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); @@ -1962,7 +2039,7 @@ impl ChannelManager { let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some()); + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { @@ -2037,10 +2114,16 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan); if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, pending_forwards, pending_failures); + if was_frozen_for_monitor { + assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA")); + } else { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures); + } } if let Some(updates) = commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -2101,7 +2184,7 @@ impl ChannelManager { try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap(); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); @@ -2147,7 +2230,7 @@ impl ChannelManager { if commitment_update.is_none() { order = RAACommitmentOrder::RevokeAndACKFirst; } - return_monitor_err!(self, e, channel_state, chan, order); + return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some()); //TODO: Resend the funding_locked if needed once we get the monitor running again } } @@ -2243,7 +2326,7 @@ impl ChannelManager { return Ok(()) }; - match handle_error!(self, err, their_node_id) { + match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { if let Some(msgs::ErrorAction::IgnoreError) = e.action { @@ -2263,7 +2346,7 @@ impl ChannelManager { impl events::MessageSendEventsProvider for ChannelManager { fn get_and_clear_pending_msg_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2288,7 +2371,7 @@ impl events::MessageSendEventsProvider for ChannelManager { impl events::EventsProvider for ChannelManager { fn get_and_clear_pending_events(&self) -> Vec { - // TODO: Event release to users and serialization is currently race-y: its very easy for a + // TODO: Event release to users and serialization is currently race-y: it's very easy for a // user to serialize a ChannelManager with pending events in it and lose those events on // restart. This is doubly true for the fail/fulfill-backs from monitor events! { @@ -2392,7 +2475,7 @@ impl ChainListener for ChannelManager { } /// We force-close the channel without letting our counterparty participate in the shutdown - fn block_disconnected(&self, header: &BlockHeader) { + fn block_disconnected(&self, header: &BlockHeader, _: u32) { let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { @@ -2429,82 +2512,82 @@ impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_open_channel(their_node_id, msg)) } fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_accept_channel(their_node_id, msg)) } fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_created(their_node_id, msg)) } fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_signed(their_node_id, msg)) } fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_funding_locked(their_node_id, msg)) } fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_shutdown(their_node_id, msg)) } fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_closing_signed(their_node_id, msg)) } fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) } fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) } fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) } fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) } fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) } fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) } fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_update_fee(their_node_id, msg)) } fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) } fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { let _ = self.total_consistency_lock.read().unwrap(); - handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id) + handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) } fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) { @@ -2554,6 +2637,25 @@ impl ChannelMessageHandler for ChannelManager { true }) } + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, + } + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); @@ -2612,12 +2714,7 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for PendingForwardHTLCInfo { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - if let &Some(ref onion) = &self.onion_packet { - 1u8.write(writer)?; - onion.write(writer)?; - } else { - 0u8.write(writer)?; - } + self.onion_packet.write(writer)?; self.incoming_shared_secret.write(writer)?; self.payment_hash.write(writer)?; self.short_channel_id.write(writer)?; @@ -2629,13 +2726,8 @@ impl Writeable for PendingForwardHTLCInfo { impl Readable for PendingForwardHTLCInfo { fn read(reader: &mut R) -> Result { - let onion_packet = match >::read(reader)? { - 0 => None, - 1 => Some(msgs::OnionPacket::read(reader)?), - _ => return Err(DecodeError::InvalidValue), - }; Ok(PendingForwardHTLCInfo { - onion_packet, + onion_packet: Readable::read(reader)?, incoming_shared_secret: Readable::read(reader)?, payment_hash: Readable::read(reader)?, short_channel_id: Readable::read(reader)?, @@ -2839,7 +2931,8 @@ impl Writeable for ChannelManager { for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; - for previous_hop in previous_hops { + for &(recvd_amt, ref previous_hop) in previous_hops.iter() { + recvd_amt.write(writer)?; previous_hop.write(writer)?; } } @@ -2898,7 +2991,7 @@ pub struct ChannelManagerReadArgs<'a> { /// value.get_funding_txo() should be the key). /// /// If a monitor is inconsistent with the channel state during deserialization the channel will - /// be force-closed using the data in the channelmonitor and the Channel will be dropped. This + /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This /// is true for missing channels as well. If there is a monitor missing for which we find /// channel data Err(DecodeError::InvalidValue) will be returned. /// @@ -2976,7 +3069,7 @@ impl<'a, R : ::std::io::Read> ReadableArgs> for (S let previous_hops_len: u64 = Readable::read(reader)?; let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2)); for _ in 0..previous_hops_len { - previous_hops.push(Readable::read(reader)?); + previous_hops.push((Readable::read(reader)?, Readable::read(reader)?)); } claimable_htlcs.insert(payment_hash, previous_hops); }