use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::features::{InitFeatures, NodeFeatures};
use ln::router::Route;
-use ln::features::InitFeatures;
use ln::msgs;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+enum PendingHTLCRouting {
+ Forward {
+ onion_packet: msgs::OnionPacket,
+ short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
+ },
+ Receive {
+ payment_data: Option<msgs::FinalOnionHopData>,
+ },
+}
+
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub(super) struct PendingHTLCInfo {
- onion_packet: Option<msgs::OnionPacket>,
+ routing: PendingHTLCRouting,
incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
- short_channel_id: u64,
pub(super) amt_to_forward: u64,
pub(super) outgoing_cltv_value: u32,
}
incoming_packet_shared_secret: [u8; 32],
}
+struct ClaimableHTLC {
+ prev_hop: HTLCPreviousHopData,
+ value: u64,
+ /// Filled in when the HTLC was received with a payment_secret packet, which contains a
+ /// total_msat (which may differ from value if this is a Multi-Path Payment) and a
+ /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
+ /// are part of the same payment.
+ payment_data: Option<msgs::FinalOnionHopData>,
+}
+
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, PartialEq)]
pub(super) enum HTLCSource {
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentPreimage(pub [u8;32]);
-type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
/// guarantees are made about the existence of a channel with the short id here, nor the short
/// ids in the PendingHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
- /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
- /// can be failed/claimed by the user
+ /// Tracks HTLCs that were to us and can be failed/claimed by the user
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
- pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
+ claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
channel_state: Mutex<ChannelHolder<ChanSigner>>,
our_network_key: SecretKey,
+ /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
+ /// value increases strictly since we don't assume access to a time source.
+ last_node_announcement_serial: AtomicUsize,
+
/// The bulk of our storage will eventually be here (channels and message queues and the like).
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
}
macro_rules! handle_error {
- ($self: ident, $internal: expr, $their_node_id: expr, $locked_channel_state: expr) => {
+ ($self: ident, $internal: expr, $their_node_id: expr) => {
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ #[cfg(debug_assertions)]
+ {
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ assert!($self.channel_state.try_lock().is_ok());
+ }
+
+ let mut msg_events = Vec::with_capacity(2);
+
if let Some((shutdown_res, update_option)) = shutdown_finish {
$self.finish_force_close_channel(shutdown_res);
if let Some(update) = update_option {
- $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
}
+
log_error!($self, "{}", err.err);
if let msgs::ErrorAction::IgnoreError = err.action {
- } else { $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() }); }
+ } else {
+ msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: $their_node_id,
+ action: err.action.clone()
+ });
+ }
+
+ if !msg_events.is_empty() {
+ $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events);
+ }
+
// Return error in case higher-API need one
Err(err)
},
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
- break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
- },
+ break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) },
Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
}
}
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+ return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
},
Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
ChannelMonitorUpdateErr::TemporaryFailure => {},
}
}
- let mut shutdown_res = chan.force_shutdown();
- if shutdown_res.0.len() >= 1 {
- log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
- }
- shutdown_res.0.clear();
+ let shutdown_res = chan.force_shutdown(false);
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
}
}
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
res
},
ChannelMonitorUpdateErr::TemporaryFailure => {
}),
our_network_key: keys_manager.get_node_secret(),
+ last_node_announcement_serial: AtomicUsize::new(0),
+
per_peer_state: RwLock::new(HashMap::new()),
pending_events: Mutex::new(Vec::new()),
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
- let (local_txn, mut failed_htlcs) = shutdown_res;
- log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
+ let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+ log_trace!(self, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- for tx in local_txn {
- log_trace!(self, "Broadcast onchain {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transaction(&tx);
+ if let Some(funding_txo) = funding_txo_option {
+ // There isn't anything we can do if we get an update failure - we're already
+ // force-closing. The monitor update on the required in-memory copy should broadcast
+ // the latest local state, which is the best we can do anyway. Thus, it is safe to
+ // ignore the result here.
+ let _ = self.monitor.update_monitor(funding_txo, monitor_update);
}
}
}
};
log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown());
+ self.finish_force_close_channel(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
}
+ let payment_data = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { .. } => None,
+ msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+ msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
+ };
+
// Note that we could obviously respond immediately with an update_fulfill_htlc
// message, however that would leak that we are the recipient of this payment, so
// instead we stay symmetric with the forwarding case, only responding (after a
// delay) once they've send us a commitment_signed!
PendingHTLCStatus::Forward(PendingHTLCInfo {
- onion_packet: None,
+ routing: PendingHTLCRouting::Receive { payment_data },
payment_hash: msg.payment_hash.clone(),
- short_channel_id: 0,
incoming_shared_secret: shared_secret,
amt_to_forward: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
let short_channel_id = match next_hop_data.format {
msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::FinalNode => {
+ msgs::OnionHopDataFormat::FinalNode { .. } => {
return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
},
};
PendingHTLCStatus::Forward(PendingHTLCInfo {
- onion_packet: Some(outgoing_packet),
+ routing: PendingHTLCRouting::Forward {
+ onion_packet: outgoing_packet,
+ short_channel_id: short_channel_id,
+ },
payment_hash: msg.payment_hash.clone(),
- short_channel_id: short_channel_id,
incoming_shared_secret: shared_secret,
amt_to_forward: next_hop_data.amt_to_forward,
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
};
channel_state = Some(self.channel_state.lock().unwrap());
- if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
- if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
+ if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
+ // with a short_channel_id of 0. This is important as various things later assume
+ // short_channel_id is non-0 in any ::Forward.
+ if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
let forwarding_id = match id_option {
None => { // unknown_next_peer
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
short_channel_id: short_channel_id,
- timestamp: chan.get_channel_update_count(),
+ timestamp: chan.get_update_time_counter(),
flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
cltv_expiry_delta: CLTV_EXPIRY_DELTA,
htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
let _ = self.total_consistency_lock.read().unwrap();
- let mut channel_lock = self.channel_state.lock().unwrap();
let err: Result<(), _> = loop {
-
+ let mut channel_lock = self.channel_state.lock().unwrap();
let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
Some(id) => id.clone(),
return Ok(());
};
- match handle_error!(self, err, route.hops.first().unwrap().pubkey, channel_lock) {
+ match handle_error!(self, err, route.hops.first().unwrap().pubkey) {
Ok(_) => unreachable!(),
Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) }
}
let _ = self.total_consistency_lock.read().unwrap();
let (mut chan, msg, chan_monitor) = {
- let mut channel_state = self.channel_state.lock().unwrap();
- let (res, chan) = match channel_state.by_id.remove(temporary_channel_id) {
+ let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
Some(mut chan) => {
(chan.get_outbound_funding_created(funding_txo)
.map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
} else { unreachable!(); })
, chan)
},
None => return
};
- match handle_error!(self, res, chan.get_their_node_id(), channel_state) {
+ match handle_error!(self, res, chan.get_their_node_id()) {
Ok(funding_msg) => {
(chan, funding_msg.0, funding_msg.1)
},
if let Err(e) = self.monitor.add_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
match e {
ChannelMonitorUpdateErr::PermanentFailure => {
- {
- let mut channel_state = self.channel_state.lock().unwrap();
- match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) {
- Err(_) => { return; },
- Ok(()) => unreachable!(),
- }
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id()) {
+ Err(_) => { return; },
+ Ok(()) => unreachable!(),
}
},
ChannelMonitorUpdateErr::TemporaryFailure => {
})
}
+ #[allow(dead_code)]
+ // Messages of up to 64KB should never end up more than half full with addresses, as that would
+ // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
+ // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
+ // message...
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2;
+ #[deny(const_err)]
+ #[allow(dead_code)]
+ // ...by failing to compile if the number of addresses that would be half of a message is
+ // smaller than 500:
+ const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
+
+ /// Generates a signed node_announcement from the given arguments and creates a
+ /// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
+ /// seen a channel_announcement from us (ie unless we have public channels open).
+ ///
+ /// RGB is a node "color" and alias is a printable human-readable string to describe this node
+ /// to humans. They carry no in-protocol meaning.
+ ///
+ /// addresses represent the set (possibly empty) of socket addresses on which this node accepts
+ /// incoming connections. These will be broadcast to the network, publicly tying these
+ /// addresses together. If you wish to preserve user privacy, addresses should likely contain
+ /// only Tor Onion addresses.
+ ///
+ /// Panics if addresses is absurdly large (more than 500).
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<msgs::NetAddress>) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ if addresses.len() > 500 {
+ panic!("More than half the message size was taken up by public addresses!");
+ }
+
+ let announcement = msgs::UnsignedNodeAnnouncement {
+ features: NodeFeatures::supported(),
+ timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
+ node_id: self.get_our_node_id(),
+ rgb, alias, addresses,
+ excess_address_data: Vec::new(),
+ excess_data: Vec::new(),
+ };
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: msgs::NodeAnnouncement {
+ signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
+ contents: announcement
+ },
+ });
+ }
+
/// Processes HTLCs which are pending waiting on random forward delay.
///
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
let mut fail_htlc_msgs = Vec::new();
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
- log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+ routing: PendingHTLCRouting::Forward {
+ onion_packet, ..
+ }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, } => {
+ log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ incoming_packet_shared_secret: incoming_shared_secret,
});
- match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
+ match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
- log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
+ log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}
let chan_update = self.get_channel_update(chan.get()).unwrap();
- failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
+ failed_forwards.push((htlc_source, payment_hash, 0x1000 | 7, Some(chan_update)));
continue;
},
Ok(update_add) => {
}
}
},
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
},
ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
- match handle_error!(self, err, their_node_id, channel_state) {
- Ok(_) => unreachable!(),
- Err(_) => { continue; },
- }
+ handle_errors.push((their_node_id, err));
+ continue;
}
};
if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
} else {
for forward_info in pending_forwards.drain(..) {
match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
- let prev_hop_data = HTLCPreviousHopData {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+ routing: PendingHTLCRouting::Receive { payment_data },
+ incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => {
+ let prev_hop = HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: forward_info.incoming_shared_secret,
- };
- match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
- hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)),
- hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); },
+ incoming_packet_shared_secret: incoming_shared_secret,
};
+ channel_state.claimable_htlcs.entry(payment_hash).or_insert(Vec::new()).push(ClaimableHTLC {
+ prev_hop,
+ value: amt_to_forward,
+ payment_data,
+ });
new_events.push(events::Event::PaymentReceived {
- payment_hash: forward_info.payment_hash,
- amt: forward_info.amt_to_forward,
+ payment_hash: payment_hash,
+ amt: amt_to_forward,
});
},
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
+ },
HTLCForwardInfo::FailHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
};
}
- if handle_errors.len() > 0 {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- for (their_node_id, err) in handle_errors.drain(..) {
- let _ = handle_error!(self, err, their_node_id, channel_state_lock);
- }
+ for (their_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, their_node_id);
}
if new_events.is_empty() { return }
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
- for (recvd_value, htlc_with_hash) in sources.drain(..) {
+ for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc_with_hash), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() });
+ HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(htlc.value).to_vec() });
}
true
} else { false }
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some(mut sources) = removed_source {
- for (received_amount, htlc_with_hash) in sources.drain(..) {
+ for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if received_amount < expected_amount || received_amount > expected_amount * 2 {
- let mut htlc_msat_data = byte_utils::be64_to_array(received_amount).to_vec();
+ if htlc.value < expected_amount || htlc.value > expected_amount * 2 {
+ let mut htlc_msat_data = byte_utils::be64_to_array(htlc.value).to_vec();
let mut height_data = byte_utils::be32_to_array(self.latest_block_height.load(Ordering::Acquire) as u32).to_vec();
htlc_msat_data.append(&mut height_data);
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc_with_hash), &payment_hash,
+ HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_data });
} else {
- self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
+ self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), payment_preimage);
}
}
true
return;
};
- let _ = handle_error!(self, err, their_node_id, channel_state_lock);
+ mem::drop(channel_state_lock);
+ let _ = handle_error!(self, err, their_node_id);
}
/// Gets the node_id held by this ChannelManager
// channel, not the temporary_channel_id. This is compatible with ourselves, but the
// spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
// any messages referencing a previously-closed channel anyway.
- return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
+ return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
},
ChannelMonitorUpdateErr::TemporaryFailure => {
// There's no problem signing a counterparty's funding transaction if our monitor
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
- match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
+ match channel_state.forward_htlcs.entry(match forward_info.routing {
+ PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
+ PendingHTLCRouting::Receive { .. } => 0,
+ }) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
},
#[doc(hidden)]
pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
let _ = self.total_consistency_lock.read().unwrap();
- let mut channel_state_lock = self.channel_state.lock().unwrap();
let their_node_id;
let err: Result<(), _> = loop {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id) {
return Ok(())
};
- match handle_error!(self, err, their_node_id, channel_state_lock) {
+ match handle_error!(self, err, their_node_id) {
Ok(_) => unreachable!(),
Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
}
// It looks like our counterparty went on-chain. We go ahead and
// broadcast our latest local state as well here, just in case its
// some kind of SPV attack, though we expect these to be dropped.
- failed_channels.push(channel.force_shutdown());
+ failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(channel.force_shutdown());
// If would_broadcast_at_height() is true, the channel_monitor will broadcast
// the latest local tx for us, so we should skip that here (it doesn't really
// hurt anything, but does make tests a bit simpler).
- failed_channels.last_mut().unwrap().0 = Vec::new();
+ failed_channels.push(channel.force_shutdown(false));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
}
self.latest_block_height.store(height as usize, Ordering::Release);
*self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
+ loop {
+ // Update last_node_announcement_serial to be the max of its current value and the
+ // block timestamp. This should keep us close to the current time without relying on
+ // having an explicit local time source.
+ // Just in case we end up in a race, we loop until we either successfully update
+ // last_node_announcement_serial or decide we don't need to.
+ let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
+ if old_serial >= header.time as usize { break; }
+ if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
+ break;
+ }
+ }
}
/// We force-close the channel without letting our counterparty participate in the shutdown
if let Some(short_id) = v.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(v.force_shutdown());
+ failed_channels.push(v.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&v) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
{
fn handle_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_open_channel(their_node_id, their_features, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_open_channel(their_node_id, their_features, msg), *their_node_id);
}
fn handle_accept_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_accept_channel(their_node_id, their_features, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_accept_channel(their_node_id, their_features, msg), *their_node_id);
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_created(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_created(their_node_id, msg), *their_node_id);
}
fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_signed(their_node_id, msg), *their_node_id);
}
fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_funding_locked(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_funding_locked(their_node_id, msg), *their_node_id);
}
fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_shutdown(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_shutdown(their_node_id, msg), *their_node_id);
}
fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_closing_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_closing_signed(their_node_id, msg), *their_node_id);
}
fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_add_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fulfill_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fail_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), *their_node_id);
}
fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fail_malformed_htlc(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), *their_node_id);
}
fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_commitment_signed(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_commitment_signed(their_node_id, msg), *their_node_id);
}
fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_revoke_and_ack(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), *their_node_id);
}
fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_update_fee(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_update_fee(their_node_id, msg), *their_node_id);
}
fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_announcement_signatures(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), *their_node_id);
}
fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
let _ = self.total_consistency_lock.read().unwrap();
- let res = self.internal_channel_reestablish(their_node_id, msg);
- if res.is_err() {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let _ = handle_error!(self, res, *their_node_id, channel_state_lock);
- }
+ let _ = handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), *their_node_id);
}
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(chan.force_shutdown());
+ failed_channels.push(chan.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
&events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+ &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
impl Writeable for PendingHTLCInfo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- self.onion_packet.write(writer)?;
+ match &self.routing {
+ &PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => {
+ 0u8.write(writer)?;
+ onion_packet.write(writer)?;
+ short_channel_id.write(writer)?;
+ },
+ &PendingHTLCRouting::Receive { ref payment_data } => {
+ 1u8.write(writer)?;
+ payment_data.write(writer)?;
+ },
+ }
self.incoming_shared_secret.write(writer)?;
self.payment_hash.write(writer)?;
- self.short_channel_id.write(writer)?;
self.amt_to_forward.write(writer)?;
self.outgoing_cltv_value.write(writer)?;
Ok(())
impl Readable for PendingHTLCInfo {
fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
Ok(PendingHTLCInfo {
- onion_packet: Readable::read(reader)?,
+ routing: match Readable::read(reader)? {
+ 0u8 => PendingHTLCRouting::Forward {
+ onion_packet: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ },
+ 1u8 => PendingHTLCRouting::Receive {
+ payment_data: Readable::read(reader)?,
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ },
incoming_shared_secret: Readable::read(reader)?,
payment_hash: Readable::read(reader)?,
- short_channel_id: Readable::read(reader)?,
amt_to_forward: Readable::read(reader)?,
outgoing_cltv_value: Readable::read(reader)?,
})
incoming_packet_shared_secret
});
+impl_writeable!(ClaimableHTLC, 0, {
+ prev_hop,
+ value,
+ payment_data
+});
+
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
match self {
for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
- for &(recvd_amt, ref previous_hop) in previous_hops.iter() {
- recvd_amt.write(writer)?;
- previous_hop.write(writer)?;
+ for htlc in previous_hops.iter() {
+ htlc.write(writer)?;
}
}
peer_state.latest_features.write(writer)?;
}
+ (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+
Ok(())
}
}
let latest_block_height: u32 = Readable::read(reader)?;
let last_block_hash: Sha256dHash = Readable::read(reader)?;
- let mut closed_channels = Vec::new();
+ let mut failed_htlcs = Vec::new();
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
- if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
- channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
- channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
- channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
- let mut force_close_res = channel.force_shutdown();
- force_close_res.0 = monitor.get_latest_local_commitment_txn();
- closed_channels.push(force_close_res);
+ if channel.get_cur_local_commitment_transaction_number() < monitor.get_cur_local_commitment_number() ||
+ channel.get_revoked_remote_commitment_transaction_number() < monitor.get_min_seen_secret() ||
+ channel.get_cur_remote_commitment_transaction_number() < monitor.get_cur_remote_commitment_number() ||
+ channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
+ // If the channel is ahead of the monitor, return InvalidValue:
+ return Err(DecodeError::InvalidValue);
+ } else if channel.get_cur_local_commitment_transaction_number() > monitor.get_cur_local_commitment_number() ||
+ channel.get_revoked_remote_commitment_transaction_number() > monitor.get_min_seen_secret() ||
+ channel.get_cur_remote_commitment_transaction_number() > monitor.get_cur_remote_commitment_number() ||
+ channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
+ // But if the channel is behind of the monitor, close the channel:
+ let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+ failed_htlcs.append(&mut new_failed_htlcs);
+ monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
} else {
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_id.insert(short_channel_id, channel.channel_id());
for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
if !funding_txo_set.contains(funding_txo) {
- closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
+ monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster);
}
}
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
for _ in 0..previous_hops_len {
- previous_hops.push((Readable::read(reader)?, Readable::read(reader)?));
+ previous_hops.push(Readable::read(reader)?);
}
claimable_htlcs.insert(payment_hash, previous_hops);
}
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}
+ let last_node_announcement_serial: u32 = Readable::read(reader)?;
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
}),
our_network_key: args.keys_manager.get_node_secret(),
+ last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
+
per_peer_state: RwLock::new(per_peer_state),
pending_events: Mutex::new(Vec::new()),
default_configuration: args.default_config,
};
- for close_res in closed_channels.drain(..) {
- channel_manager.finish_force_close_channel(close_res);
- //TODO: Broadcast channel update for closed channels, but only after we've made a
- //connection or two.
+ for htlc_source in failed_htlcs.drain(..) {
+ channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
+ //TODO: Broadcast channel update for closed channels, but only after we've made a
+ //connection or two.
+
Ok((last_block_hash.clone(), channel_manager))
}
}