// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-pub use ln::channel::CounterpartyForwardingInfo;
use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use util::{byte_utils, events};
-use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
+use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::{Logger, Level};
use util::errors::APIError;
struct MsgHandleErrInternal {
err: msgs::LightningError,
+ chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
err,
action: msgs::ErrorAction::IgnoreError,
},
+ chan_id: None,
shutdown_finish: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, shutdown_finish: None }
+ Self { err, chan_id: None, shutdown_finish: None }
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
},
},
},
+ chan_id: Some(channel_id),
shutdown_finish: Some((shutdown_res, channel_update)),
}
}
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+/// Information needed for constructing an invoice route hint for this channel.
+#[derive(Clone, Debug, PartialEq)]
+pub struct CounterpartyForwardingInfo {
+ /// Base routing fee in millisatoshis.
+ pub fee_base_msat: u32,
+ /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
+ pub fee_proportional_millionths: u32,
+ /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
+ /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
+ /// `cltv_expiry_delta` for more details.
+ pub cltv_expiry_delta: u16,
+}
+
/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
/// to better separate parameters.
#[derive(Clone, Debug, PartialEq)]
($self: ident, $internal: expr, $counterparty_node_id: expr) => {
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
#[cfg(debug_assertions)]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
assert!($self.channel_state.try_lock().is_ok());
+ assert!($self.pending_events.try_lock().is_ok());
}
let mut msg_events = Vec::with_capacity(2);
msg: update
});
}
+ if let Some(channel_id) = chan_id {
+ $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+ }
}
log_error!($self.logger, "{}", err.err);
msg: channel_update
});
}
+ if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+ pending_events_lock.push(events::Event::ChannelClosed {
+ channel_id: *channel_id,
+ reason: ClosureReason::HolderForceClosed
+ });
+ }
}
break Ok(());
},
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+ /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+ /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let Some(short_id) = chan.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
+ let mut pending_events_lock = self.pending_events.lock().unwrap();
+ if peer_node_id.is_some() {
+ if let Some(peer_msg) = peer_msg {
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
+ }
+ } else {
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
+ }
chan.remove_entry().1
} else {
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, None) {
+ match self.force_close_channel_with_peer(channel_id, None, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
+ // ChannelClosed event is generated by handle_error for us.
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
payment_hash,
rejected_by_dest: false,
network_update: None,
+ all_paths_failed: sessions.get().len() == 0,
#[cfg(test)]
error_code: None,
#[cfg(test)]
let mut session_priv_bytes = [0; 32];
session_priv_bytes.copy_from_slice(&session_priv[..]);
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ let mut all_paths_failed = false;
if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) {
if !sessions.get_mut().remove(&session_priv_bytes) {
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
if sessions.get().len() == 0 {
+ all_paths_failed = true;
sessions.remove();
}
} else {
payment_hash: payment_hash.clone(),
rejected_by_dest: !payment_retryable,
network_update,
+ all_paths_failed,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
payment_hash: payment_hash.clone(),
rejected_by_dest: path.len() == 1,
network_update: None,
+ all_paths_failed,
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
let mut session_priv_bytes = [0; 32];
session_priv_bytes.copy_from_slice(&session_priv[..]);
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- if let Some(sessions) = outbounds.get_mut(&mpp_id) {
- if sessions.remove(&session_priv_bytes) {
- self.pending_events.lock().unwrap().push(
- events::Event::PaymentSent { payment_preimage }
- );
- if sessions.len() == 0 {
- outbounds.remove(&mpp_id);
- }
- } else {
- log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
- }
+ let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) {
+ sessions.remove(&session_priv_bytes)
+ } else { false };
+ if found_payment {
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentSent { payment_preimage }
+ );
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
}
msg: update
});
}
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure });
}
Ok(())
}
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
},
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+ MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let by_id = &mut channel_state.by_id;
msg: update
});
}
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxConfirmed });
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
Err(e) => {
let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ // ChannelClosed event is generated by handle_error for us
!close_channel
}
}
});
}
+ if let Ok(mut pending_events_lock) = self.pending_events.lock() {
+ pending_events_lock.push(events::Event::ChannelClosed {
+ channel_id: *channel_id,
+ reason: ClosureReason::CooperativeClosure
+ });
+ }
+
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transaction(&tx);
false
msg: update
});
}
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxConfirmed });
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
msg: update
});
}
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
false
} else {
true
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
return false;
} else {
no_channels_remain = false;
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
} else {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
}
(8, outgoing_cltv_value, required)
});
-impl_writeable_tlv_based_enum!(HTLCFailureMsg, ;
- (0, Relay),
- (1, Malformed),
-);
+
+impl Writeable for HTLCFailureMsg {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ match self {
+ HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
+ 0u8.write(writer)?;
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ reason.write(writer)?;
+ },
+ HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id, htlc_id, sha256_of_onion, failure_code
+ }) => {
+ 1u8.write(writer)?;
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ sha256_of_onion.write(writer)?;
+ failure_code.write(writer)?;
+ },
+ }
+ Ok(())
+ }
+}
+
+impl Readable for HTLCFailureMsg {
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ reason: Readable::read(reader)?,
+ }))
+ },
+ 1 => {
+ Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ sha256_of_onion: Readable::read(reader)?,
+ failure_code: Readable::read(reader)?,
+ }))
+ },
+ // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
+ // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
+ // messages contained in the variants.
+ // In version 0.0.101, support for reading the variants with these types was added, and
+ // we should migrate to writing these variants when UpdateFailHTLC or
+ // UpdateFailMalformedHTLC get TLV fields.
+ 2 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Relay(res))
+ },
+ 3 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Malformed(res))
+ },
+ _ => Err(DecodeError::UnknownRequiredFeature),
+ }
+ }
+}
+
impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
(0, Forward),
(1, Fail),
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut channel_closures = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ channel_closures.push(events::Event::ChannelClosed {
+ channel_id: channel.channel_id(),
+ reason: ClosureReason::OutdatedChannelManager
+ });
} else {
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_id.insert(short_channel_id, channel.channel_id());
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
check_added_monitors!(nodes[0], 1);
- // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here.
+ // Note that successful MPP payments will generate 1 event upon the first path's success. No
+ // further events will be generated for subsequence path successes.
let events = nodes[0].node.get_and_clear_pending_events();
match events[0] {
Event::PaymentSent { payment_preimage: ref preimage } => {
},
_ => panic!("Unexpected event"),
}
- match events[1] {
- Event::PaymentSent { payment_preimage: ref preimage } => {
- assert_eq!(payment_preimage, *preimage);
- },
- _ => panic!("Unexpected event"),
- }
}
#[test]