X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fln%2Fchannelmanager.rs;h=a0e066371cb4c06fb686a4659bffe8f586329482;hb=148c79719e9177709c7b546d2636e3fa28046fc9;hp=d48dcea4269f7186117b08a5fca04dbf08437c91;hpb=cd8f1de394591c85a635060b861105db9d51716c;p=rust-lightning diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index d48dcea4..a0e06637 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -33,7 +33,7 @@ use ln::router::Route; use ln::msgs; use ln::msgs::LocalFeatures; use ln::onion_utils; -use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError}; +use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; use chain::keysinterface::KeysInterface; use util::config::UserConfig; use util::{byte_utils, events}; @@ -118,7 +118,7 @@ impl HTLCSource { #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) enum HTLCFailReason { - ErrorPacket { + LightningError { err: msgs::OnionErrorPacket, }, Reason { @@ -143,21 +143,21 @@ type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); /// this struct and call handle_error!() on it. struct MsgHandleErrInternal { - err: msgs::HandleError, + err: msgs::LightningError, shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { #[inline] fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self { Self { - err: HandleError { + err: LightningError { err, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: err.to_string() }, - }), + }, }, shutdown_finish: None, } @@ -165,28 +165,28 @@ impl MsgHandleErrInternal { #[inline] fn ignore_no_close(err: &'static str) -> Self { Self { - err: HandleError { + err: LightningError { err, - action: Some(msgs::ErrorAction::IgnoreError), + action: msgs::ErrorAction::IgnoreError, }, shutdown_finish: None, } } #[inline] - fn from_no_close(err: msgs::HandleError) -> Self { + fn from_no_close(err: msgs::LightningError) -> Self { Self { err, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { - err: HandleError { + err: LightningError { err, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: err.to_string() }, - }), + }, }, shutdown_finish: Some((shutdown_res, channel_update)), } @@ -195,18 +195,27 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { - ChannelError::Ignore(msg) => HandleError { + ChannelError::Ignore(msg) => LightningError { err: msg, - action: Some(msgs::ErrorAction::IgnoreError), + action: msgs::ErrorAction::IgnoreError, }, - ChannelError::Close(msg) => HandleError { + ChannelError::Close(msg) => LightningError { err: msg, - action: Some(msgs::ErrorAction::SendErrorMessage { + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, data: msg.to_string() }, - }), + }, + }, + ChannelError::CloseDelayBroadcast { msg, .. } => LightningError { + err: msg, + action: msgs::ErrorAction::SendErrorMessage { + msg: msgs::ErrorMessage { + channel_id, + data: msg.to_string() + }, + }, }, }, shutdown_finish: None, @@ -341,6 +350,12 @@ pub struct ChannelManager { logger: Arc, } +/// The amount of time we require our counterparty wait to claim their money (ie time between when +/// we, or our watchtower, must check for them having broadcast a theft transaction). +pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24; +/// The amount of time we're willing to wait to claim money back to us +pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7; + /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound /// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER, /// ie the node we forwarded the payment on to should always have enough room to reliably time out @@ -441,6 +456,7 @@ macro_rules! break_chan_entry { } break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, + Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } } } } @@ -460,6 +476,31 @@ macro_rules! try_chan_entry { } return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) }, + Err(ChannelError::CloseDelayBroadcast { msg, update }) => { + log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg); + let (channel_id, mut chan) = $entry.remove_entry(); + if let Some(short_id) = chan.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + if let Some(update) = update { + if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) { + match e { + // Upstream channel is dead, but we want at least to fail backward HTLCs to save + // downstream channels. In case of PermanentFailure, we are not going to be able + // to claim back to_remote output on remote commitment transaction. Doesn't + // make a difference here, we are concern about HTLCs circuit, not onchain funds. + ChannelMonitorUpdateErr::PermanentFailure => {}, + ChannelMonitorUpdateErr::TemporaryFailure => {}, + } + } + } + let mut shutdown_res = chan.force_shutdown(); + if shutdown_res.0.len() >= 1 { + log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid()); + } + shutdown_res.0.clear(); + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok())) + } } } } @@ -544,7 +585,10 @@ impl ChannelManager { /// Non-proportional fees are fixed according to our risk using the provided fee estimator. /// /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`! - pub fn new(network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc,keys_manager: Arc, config: UserConfig) -> Result, secp256k1::Error> { + /// + /// User must provide the current blockchain height from which to track onchain channel + /// funding outpoints and send payments with reliable timelocks. + pub fn new(network: Network, feeest: Arc, monitor: Arc, chain_monitor: Arc, tx_broadcaster: Arc, logger: Arc,keys_manager: Arc, config: UserConfig, current_blockchain_height: usize) -> Result, secp256k1::Error> { let secp_ctx = Secp256k1::new(); let res = Arc::new(ChannelManager { @@ -555,7 +599,7 @@ impl ChannelManager { chain_monitor, tx_broadcaster, - latest_block_height: AtomicUsize::new(0), //TODO: Get an init value + latest_block_height: AtomicUsize::new(current_blockchain_height), last_block_hash: Mutex::new(Default::default()), secp_ctx, @@ -968,9 +1012,9 @@ impl ChannelManager { /// only fails if the channel does not yet have an assigned short_id /// May be called with channel_state already locked! - fn get_channel_update(&self, chan: &Channel) -> Result { + fn get_channel_update(&self, chan: &Channel) -> Result { let short_channel_id = match chan.get_short_channel_id() { - None => return Err(HandleError{err: "Channel not yet established", action: None}), + None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}), Some(id) => id, }; @@ -1099,7 +1143,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { log_error!(self, "Got bad keys: {}!", e.err); let mut channel_state = self.channel_state.lock().unwrap(); @@ -1125,7 +1169,7 @@ impl ChannelManager { pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { let _ = self.total_consistency_lock.read().unwrap(); - let (chan, msg, chan_monitor) = { + let (mut chan, msg, chan_monitor) = { let (res, chan) = { let mut channel_state = self.channel_state.lock().unwrap(); match channel_state.by_id.remove(temporary_channel_id) { @@ -1156,8 +1200,30 @@ impl ChannelManager { }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_update_monitor - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) { + Err(e) => { + log_error!(self, "Failed to store ChannelMonitor update for funding tx generation"); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: chan.get_their_node_id(), + action: e.action, + }); + return; + }, + Ok(()) => unreachable!(), + } + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + // Its completely fine to continue with a FundingCreated until the monitor + // update is persisted, as long as we don't generate the FundingBroadcastSafe + // until the monitor has been safely persisted (as funding broadcast is not, + // in fact, safe). + chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + }, + } } let mut channel_state = self.channel_state.lock().unwrap(); @@ -1307,12 +1373,39 @@ impl ChannelManager { let (commitment_msg, monitor) = match chan.get_mut().send_commitment() { Ok(res) => res, Err(e) => { - if let ChannelError::Ignore(_) = e { - panic!("Stated return value requirements in send_commitment() were not met"); + // We surely failed send_commitment due to bad keys, in that case + // close channel and then send error message to peer. + let their_node_id = chan.get().get_their_node_id(); + let err: Result<(), _> = match e { + ChannelError::Ignore(_) => { + panic!("Stated return value requirements in send_commitment() were not met"); + }, + ChannelError::Close(msg) => { + log_trace!(self, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); + let (channel_id, mut channel) = chan.remove_entry(); + if let Some(short_id) = channel.get_short_channel_id() { + channel_state.short_to_id.remove(&short_id); + } + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok())) + }, + ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } + }; + match handle_error!(self, err) { + Ok(_) => unreachable!(), + Err(e) => { + if let Some(msgs::ErrorAction::IgnoreError) = e.action { + } else { + log_error!(self, "Got bad keys: {}!", e.err); + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { + node_id: their_node_id, + action: e.action, + }); + } + continue; + }, } - //TODO: Handle...this is bad! - continue; - }, + } }; if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) { handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); @@ -1371,7 +1464,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => {}, Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { @@ -1425,7 +1518,7 @@ impl ChannelManager { log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); mem::drop(channel_state_lock); match &onion_error { - &HTLCFailReason::ErrorPacket { ref err } => { + &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] @@ -1478,8 +1571,8 @@ impl ChannelManager { let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode(); onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet) }, - HTLCFailReason::ErrorPacket { err } => { - log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0)); + HTLCFailReason::LightningError { err } => { + log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0)); onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data) } }; @@ -1597,7 +1690,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => {}, Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { @@ -1621,6 +1714,7 @@ impl ChannelManager { let mut close_results = Vec::new(); let mut htlc_forwards = Vec::new(); let mut htlc_failures = Vec::new(); + let mut pending_events = Vec::new(); let _ = self.total_consistency_lock.read().unwrap(); { @@ -1655,7 +1749,7 @@ impl ChannelManager { ChannelMonitorUpdateErr::TemporaryFailure => true, } } else { - let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored(); + let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(); if !pending_forwards.is_empty() { htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards)); } @@ -1687,12 +1781,33 @@ impl ChannelManager { handle_cs!(); }, } + if needs_broadcast_safe { + pending_events.push(events::Event::FundingBroadcastSafe { + funding_txo: channel.get_funding_txo().unwrap(), + user_channel_id: channel.get_user_id(), + }); + } + if let Some(msg) = funding_locked { + pending_msg_events.push(events::MessageSendEvent::SendFundingLocked { + node_id: channel.get_their_node_id(), + msg, + }); + if let Some(announcement_sigs) = self.get_announcement_sigs(channel) { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: channel.get_their_node_id(), + msg: announcement_sigs, + }); + } + short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); + } true } } else { true } }); } + self.pending_events.lock().unwrap().append(&mut pending_events); + for failure in htlc_failures.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); } @@ -1753,7 +1868,7 @@ impl ChannelManager { } fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> { - let ((funding_msg, monitor_update), chan) = { + let ((funding_msg, monitor_update), mut chan) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = channel_lock.borrow_parts(); match channel_state.by_id.entry(msg.temporary_channel_id.clone()) { @@ -1769,8 +1884,23 @@ impl ChannelManager { }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_update_monitor - if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { + match e { + ChannelMonitorUpdateErr::PermanentFailure => { + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None)); + }, + ChannelMonitorUpdateErr::TemporaryFailure => { + // There's no problem signing a counterparty's funding transaction if our monitor + // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't + // accepted payment from yet. We do, however, need to wait to send our funding_locked + // until we have persisted our monitor. + chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + }, + } } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); @@ -1800,8 +1930,8 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan); - if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - unimplemented!(); + if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { + return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) }, @@ -2018,7 +2148,7 @@ impl ChannelManager { //TODO: here and below MsgHandleErrInternal, #153 case return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan); + try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id)) } @@ -2192,7 +2322,7 @@ impl ChannelManager { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } if !chan.get().is_usable() { - return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)})); + return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError})); } let our_node_id = self.get_our_node_id(); @@ -2345,7 +2475,7 @@ impl ChannelManager { match handle_error!(self, err) { Ok(_) => unreachable!(), Err(e) => { - if let Some(msgs::ErrorAction::IgnoreError) = e.action { + if let msgs::ErrorAction::IgnoreError = e.action { } else { log_error!(self, "Got bad keys: {}!", e.err); let mut channel_state = self.channel_state.lock().unwrap(); @@ -2438,7 +2568,7 @@ impl ChainListener for ChannelManager { } else if let Err(e) = chan_res { pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_their_node_id(), - action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }), + action: msgs::ErrorAction::SendErrorMessage { msg: e }, }); return false; } @@ -2526,82 +2656,82 @@ impl ChainListener for ChannelManager { impl ChannelMessageHandler for ChannelManager { //TODO: Handle errors and close channel (or so) - fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> { + fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg)) } - fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> { + fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg)) } - fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> { + fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_created(their_node_id, msg)) } - fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> { + fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_signed(their_node_id, msg)) } - fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> { + fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_funding_locked(their_node_id, msg)) } - fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> { + fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_shutdown(their_node_id, msg)) } - fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> { + fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_closing_signed(their_node_id, msg)) } - fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> { + fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_add_htlc(their_node_id, msg)) } - fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> { + fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg)) } - fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> { + fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg)) } - fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> { + fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg)) } - fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> { + fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_commitment_signed(their_node_id, msg)) } - fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> { + fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg)) } - fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> { + fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_update_fee(their_node_id, msg)) } - fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> { + fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_announcement_signatures(their_node_id, msg)) } - fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> { + fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), LightningError> { let _ = self.total_consistency_lock.read().unwrap(); handle_error!(self, self.internal_channel_reestablish(their_node_id, msg)) } @@ -2846,7 +2976,7 @@ impl Readable for HTLCSource { impl Writeable for HTLCFailReason { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { match self { - &HTLCFailReason::ErrorPacket { ref err } => { + &HTLCFailReason::LightningError { ref err } => { 0u8.write(writer)?; err.write(writer)?; }, @@ -2863,7 +2993,7 @@ impl Writeable for HTLCFailReason { impl Readable for HTLCFailReason { fn read(reader: &mut R) -> Result { match >::read(reader)? { - 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }), + 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }), 1 => Ok(HTLCFailReason::Reason { failure_code: Readable::read(reader)?, data: Readable::read(reader)?,