X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=64a9f2ebb8e575c3895d77efd3ab4eaa1b82ccf0;hb=963f002056d86365447dc1ce244251a449df5b6d;hp=b824b99b26dc2f95acba8e88fea8046da057e39c;hpb=d7ac68701f6d169438b0337919fde7c715b2e94c;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index b824b99b..64a9f2eb 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -12,11 +12,12 @@ use bitcoin::blockdata::block::BlockHeader; use bitcoin::blockdata::transaction::Transaction; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; -use bitcoin::util::hash::{BitcoinHash, Sha256dHash}; +use bitcoin::util::hash::BitcoinHash; use bitcoin_hashes::{Hash, HashEngine}; use bitcoin_hashes::hmac::{Hmac, HmacEngine}; use bitcoin_hashes::sha256::Hash as Sha256; +use bitcoin_hashes::sha256d::Hash as Sha256dHash; use bitcoin_hashes::cmp::fixed_time_eq; use secp256k1::key::{SecretKey,PublicKey}; @@ -27,7 +28,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::router::Route; use ln::msgs; use ln::onion_utils; @@ -252,10 +253,12 @@ pub(super) struct ChannelHolder { /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingForwardHTLCInfo! pub(super) forward_htlcs: HashMap>, + /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and + /// can be failed/claimed by the user /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - pub(super) claimable_htlcs: HashMap>, + pub(super) claimable_htlcs: HashMap>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -265,7 +268,7 @@ pub(super) struct MutChannelHolder<'a> { pub(super) short_to_id: &'a mut HashMap, pub(super) next_forward: &'a mut Instant, pub(super) forward_htlcs: &'a mut HashMap>, - pub(super) claimable_htlcs: &'a mut HashMap>, + pub(super) claimable_htlcs: &'a mut HashMap>, pub(super) pending_msg_events: &'a mut Vec, } impl ChannelHolder { @@ -348,20 +351,21 @@ pub struct ChannelManager { const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO? pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO? -// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS + -// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within -// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it -// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel -// on-chain to time out the HTLC. +// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS, +// ie that if the next-hop peer fails the HTLC within +// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain, +// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and +// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before +// LATENCY_GRACE_PERIOD_BLOCKS. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY; +const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS; // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See // ChannelMontior::would_broadcast_at_height for a description of why this is needed. #[deny(const_err)] #[allow(dead_code)] -const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER; +const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; macro_rules! secp_call { ( $res: expr, $err: expr ) => { @@ -448,13 +452,14 @@ macro_rules! try_chan_entry { } } -macro_rules! return_monitor_err { +macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - return_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { + log_error!($self, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..])); let (channel_id, mut chan) = $entry.remove_entry(); if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); @@ -468,30 +473,52 @@ macro_rules! return_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())); + res }, ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())); + log_info!($self, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", + log_bytes!($entry.key()[..]), + if $resend_commitment && $resend_raa { + match $action_type { + RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" }, + RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" }, + } + } else if $resend_commitment { "commitment" } + else if $resend_raa { "RAA" } + else { "nothing" }, + (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(), + (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len()); + if !$resend_commitment { + debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); + } + if !$resend_raa { + debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); + } + $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key())) }, } } } +macro_rules! return_monitor_err { + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment); + }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + } +} + // Does not break in case of TemporaryFailure! macro_rules! maybe_break_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - match $err { - ChannelMonitorUpdateErr::PermanentFailure => { - let (channel_id, mut chan) = $entry.remove_entry(); - if let Some(short_id) = chan.get_short_channel_id() { - $channel_state.short_to_id.remove(&short_id); - } - break Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - $entry.get_mut().monitor_update_failed($action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()); + match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) { + (e, ChannelMonitorUpdateErr::PermanentFailure) => { + break e; }, + (_, ChannelMonitorUpdateErr::TemporaryFailure) => { }, } } } @@ -794,7 +821,7 @@ impl ChannelManager { let pending_forward_info = if next_hop_data.hmac == [0; 32] { // OUR PAYMENT! // final_expiry_too_soon - if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 { + if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } // final_incorrect_htlc_amount @@ -886,8 +913,8 @@ impl ChannelManager { break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon + // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration + if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -939,7 +966,7 @@ impl ChannelManager { excess_data: Vec::new(), }; - let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]); + let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]); let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key); Ok(msgs::ChannelUpdate { @@ -1133,7 +1160,7 @@ impl ChannelManager { Ok(res) => res, Err(_) => return None, // Only in case of state precondition violations eg channel is closing }; - let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key); Some(msgs::AnnouncementSignatures { @@ -1153,6 +1180,7 @@ impl ChannelManager { let mut new_events = Vec::new(); let mut failed_forwards = Vec::new(); + let mut handle_errors = Vec::new(); { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1188,101 +1216,104 @@ impl ChannelManager { continue; } }; - let forward_chan = &mut channel_state.by_id.get_mut(&forward_chan_id).unwrap(); - - let mut add_htlc_msgs = Vec::new(); - let mut fail_htlc_msgs = Vec::new(); - for forward_info in pending_forwards.drain(..) { - match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { - log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }); - match forward_chan.send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); - } else { - panic!("Stated return value requirements in send_htlc() were not met"); - } - let chan_update = self.get_channel_update(forward_chan).unwrap(); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); - continue; - }, - Ok(update_add) => { - match update_add { - Some(msg) => { add_htlc_msgs.push(msg); }, - None => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can add anymore HTLCs. The Channel - // will automatically handle building the update_add_htlc and - // commitment_signed messages when we can. - // TODO: Do some kind of timer to set the channel as !is_live() - // as we don't really want others relying on us relaying through - // this channel currently :/. + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) { + let mut add_htlc_msgs = Vec::new(); + let mut fail_htlc_msgs = Vec::new(); + for forward_info in pending_forwards.drain(..) { + match forward_info { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { + log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: forward_info.incoming_shared_secret, + }); + match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + let chan_update = self.get_channel_update(chan.get()).unwrap(); + failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + continue; + }, + Ok(update_add) => { + match update_add { + Some(msg) => { add_htlc_msgs.push(msg); }, + None => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can add anymore HTLCs. The Channel + // will automatically handle building the update_add_htlc and + // commitment_signed messages when we can. + // TODO: Do some kind of timer to set the channel as !is_live() + // as we don't really want others relying on us relaying through + // this channel currently :/. + } } } } - } - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { - log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); - match forward_chan.get_update_fail_htlc(htlc_id, err_packet) { - Err(e) => { - if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); - } else { - panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); + match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { + Err(e) => { + if let ChannelError::Ignore(msg) = e { + log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg); + } else { + panic!("Stated return value requirements in get_update_fail_htlc() were not met"); + } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; + }, + Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, + Ok(None) => { + // Nothing to do here...we're waiting on a remote + // revoke_and_ack before we can update the commitment + // transaction. The Channel will automatically handle + // building the update_fail_htlc and commitment_signed + // messages when we can. + // We don't need any kind of timer here as they should fail + // the channel onto the chain if they can't get our + // update_fail_htlc in time, it's not our problem. } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; - }, - Ok(Some(msg)) => { fail_htlc_msgs.push(msg); }, - Ok(None) => { - // Nothing to do here...we're waiting on a remote - // revoke_and_ack before we can update the commitment - // transaction. The Channel will automatically handle - // building the update_fail_htlc and commitment_signed - // messages when we can. - // We don't need any kind of timer here as they should fail - // the channel onto the chain if they can't get our - // update_fail_htlc in time, it's not our problem. } - } - }, + }, + } } - } - if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { - let (commitment_msg, monitor) = match forward_chan.send_commitment() { - Ok(res) => res, - Err(e) => { - if let ChannelError::Ignore(_) = e { - panic!("Stated return value requirements in send_commitment() were not met"); - } - //TODO: Handle...this is bad! + if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() { + let (commitment_msg, monitor) = match chan.get_mut().send_commitment() { + Ok(res) => res, + Err(e) => { + if let ChannelError::Ignore(_) = e { + panic!("Stated return value requirements in send_commitment() were not met"); + } + //TODO: Handle...this is bad! + continue; + }, + }; + if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { + handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); continue; - }, - }; - if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { - unimplemented!(); + } + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: add_htlc_msgs, + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: fail_htlc_msgs, + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed: commitment_msg, + }, + }); } - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: forward_chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: add_htlc_msgs, - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: fail_htlc_msgs, - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed: commitment_msg, - }, - }); + } else { + unreachable!(); } } else { for forward_info in pending_forwards.drain(..) { @@ -1294,8 +1325,8 @@ impl ChannelManager { incoming_packet_shared_secret: forward_info.incoming_shared_secret, }; match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { - hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data), - hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); }, + hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)), + hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); }, }; new_events.push(events::Event::PaymentReceived { payment_hash: forward_info.payment_hash, @@ -1318,26 +1349,43 @@ impl ChannelManager { }; } + for (their_node_id, err) in handle_errors.drain(..) { + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); + } + }, + } + } + if new_events.is_empty() { return } let mut events = self.pending_events.lock().unwrap(); events.append(&mut new_events); } /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect - /// after a PaymentReceived event. - /// expected_value is the value you expected the payment to be for (not the amount it actually - /// was for from the PaymentReceived event). - pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, expected_value: u64) -> bool { + /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources + /// along the path (including in our own channel on which we received it). + /// Returns false if no payment was found to fail backwards, true if the process of failing the + /// HTLC backwards has been started. + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool { let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); if let Some(mut sources) = removed_source { - for htlc_with_hash in sources.drain(..) { + for (recvd_value, htlc_with_hash) in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(expected_value).to_vec() }); + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() }); } true } else { false } @@ -1455,7 +1503,10 @@ impl ChannelManager { let mut channel_state = Some(self.channel_state.lock().unwrap()); let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); if let Some(mut sources) = removed_source { - for htlc_with_hash in sources.drain(..) { + // TODO: We should require the user specify the expected amount so that we can claim + // only payments for the correct amount, and reject payments for incorrect amounts + // (which are probably middle nodes probing to break our privacy). + for (_, htlc_with_hash) in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage); } @@ -1463,56 +1514,79 @@ impl ChannelManager { } else { false } } fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard, source: HTLCSource, payment_preimage: PaymentPreimage) { - match source { - HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state_lock); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); - }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { - //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = channel_state_lock.borrow_parts(); - - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel already hit the chain and that's - // why it's missing. - return - } - }; + let (their_node_id, err) = loop { + match source { + HTLCSource::OutboundRoute { .. } => { + mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentSent { + payment_preimage + }); + }, + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { + //TODO: Delay the claimed_funds relaying just like we do outbound relay! + let channel_state = channel_state_lock.borrow_parts(); - let chan = channel_state.by_id.get_mut(&chan_id).unwrap(); - match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok((msgs, monitor_option)) => { - if let Some(chan_monitor) = monitor_option { - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!();// but def don't push the event... - } + let chan_id = match channel_state.short_to_id.get(&short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel already hit the chain and that's + // why it's missing. + return } - if let Some((msg, commitment_signed)) = msgs { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, + }; + + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { + Ok((msgs, monitor_option)) => { + if let Some(chan_monitor) = monitor_option { + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + if was_frozen_for_monitor { + assert!(msgs.is_none()); + } else { + break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some())); + } + } } - }); + if let Some((msg, commitment_signed)) = msgs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + }, + Err(_e) => { + // TODO: There is probably a channel manager somewhere that needs to + // learn the preimage as the channel may be about to hit the chain. + //TODO: Do something with e? + return + }, } - }, - Err(_e) => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel may be about to hit the chain. - //TODO: Do something with e? - return - }, + } else { unreachable!(); } + }, + } + return; + }; + + match handle_error!(self, err) { + Ok(_) => {}, + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); } }, } @@ -2110,7 +2184,7 @@ impl ChannelManager { try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan); let were_node_one = announcement.node_id_1 == our_node_id; - let msghash = hash_to_message!(&Sha256dHash::from_data(&announcement.encode()[..])[..]); + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() || self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() { try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan); @@ -2401,7 +2475,7 @@ impl ChainListener for ChannelManager { } /// We force-close the channel without letting our counterparty participate in the shutdown - fn block_disconnected(&self, header: &BlockHeader) { + fn block_disconnected(&self, header: &BlockHeader, _: u32) { let _ = self.total_consistency_lock.read().unwrap(); let mut failed_channels = Vec::new(); { @@ -2563,6 +2637,25 @@ impl ChannelMessageHandler for ChannelManager { true }) } + pending_msg_events.retain(|msg| { + match msg { + &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, + &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, + } + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); @@ -2838,7 +2931,8 @@ impl Writeable for ChannelManager { for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() { payment_hash.write(writer)?; (previous_hops.len() as u64).write(writer)?; - for previous_hop in previous_hops { + for &(recvd_amt, ref previous_hop) in previous_hops.iter() { + recvd_amt.write(writer)?; previous_hop.write(writer)?; } } @@ -2975,7 +3069,7 @@ impl<'a, R : ::std::io::Read> ReadableArgs> for (S let previous_hops_len: u64 = Readable::read(reader)?; let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2)); for _ in 0..previous_hops_len { - previous_hops.push(Readable::read(reader)?); + previous_hops.push((Readable::read(reader)?, Readable::read(reader)?)); } claimable_htlcs.insert(payment_hash, previous_hops); }