use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
use chain::transaction::OutPoint;
use ln::channel::{Channel, ChannelError};
-use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS, HTLC_FAIL_ANTI_REORG_DELAY};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use ln::router::Route;
use ln::msgs;
+use ln::msgs::LocalFeatures;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
use chain::keysinterface::KeysInterface;
use util::config::UserConfig;
-use util::{byte_utils, events, rng};
+use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, Writeable, Writer};
use util::chacha20::ChaCha20;
use util::logger::Logger;
use std::io::Cursor;
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::{Instant,Duration};
+use std::time::Duration;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
},
}),
},
+ ChannelError::CloseDelayBroadcast { msg, .. } => HandleError {
+ err: msg,
+ action: Some(msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: msg.to_string()
+ },
+ }),
+ },
},
shutdown_finish: None,
}
}
}
-/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
-/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
-/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
-/// probably increase this significantly.
-const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u32 = 50;
+/// We hold back HTLCs we intend to relay for a random interval greater than this (see
+/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
+/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
+/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
+const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
pub(super) enum HTLCForwardInfo {
AddHTLC {
pub(super) struct ChannelHolder {
pub(super) by_id: HashMap<[u8; 32], Channel>,
pub(super) short_to_id: HashMap<u64, [u8; 32]>,
- pub(super) next_forward: Instant,
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the existence of a channel with the short id here, nor the short
pub(super) struct MutChannelHolder<'a> {
pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>,
pub(super) short_to_id: &'a mut HashMap<u64, [u8; 32]>,
- pub(super) next_forward: &'a mut Instant,
pub(super) forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
pub(super) pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
MutChannelHolder {
by_id: &mut self.by_id,
short_to_id: &mut self.short_to_id,
- next_forward: &mut self.next_forward,
forward_htlcs: &mut self.forward_htlcs,
claimable_htlcs: &mut self.claimable_htlcs,
pending_msg_events: &mut self.pending_msg_events,
logger: Arc<Logger>,
}
+/// The amount of time we require our counterparty wait to claim their money (ie time between when
+/// we, or our watchtower, must check for them having broadcast a theft transaction).
+pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time we're willing to wait to claim money back to us
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
/// ie the node we forwarded the payment on to should always have enough room to reliably time out
const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
-// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS +
-// HTLC_FAIL_ANTI_REORG_DELAY, ie that if the next-hop peer fails the HTLC within
-// HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have HTLC_FAIL_TIMEOUT_BLOCKS left to fail it
-// backwards ourselves before hitting the CLTV_CLAIM_BUFFER point and failing the channel
-// on-chain to time out the HTLC.
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
+// ie that if the next-hop peer fails the HTLC within
+// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
+// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
+// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
+// LATENCY_GRACE_PERIOD_BLOCKS.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - 2*HTLC_FAIL_TIMEOUT_BLOCKS - CLTV_CLAIM_BUFFER - HTLC_FAIL_ANTI_REORG_DELAY;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - HTLC_FAIL_TIMEOUT_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
macro_rules! secp_call {
( $res: expr, $err: expr ) => {
pub channel_value_satoshis: u64,
/// The user_id passed in to create_channel, or 0 if the channel was inbound.
pub user_id: u64,
+ /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
+ /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new outbound HTLCs). This further does not include any pending
+ /// outgoing HTLCs which are awaiting some other resolution to be sent.
+ pub outbound_capacity_msat: u64,
+ /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
+ /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new inbound HTLCs).
+ /// Note that there are some corner cases not fully handled here, so the actual available
+ /// inbound capacity may be slightly higher than this.
+ pub inbound_capacity_msat: u64,
+ /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+ /// the peer is connected, and (c) no monitor update failure is pending resolution.
+ pub is_live: bool,
}
macro_rules! handle_error {
}
break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
},
+ Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
}
}
}
}
return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
},
+ Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
+ log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
+ let (channel_id, mut chan) = $entry.remove_entry();
+ if let Some(short_id) = chan.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ if let Some(update) = update {
+ if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) {
+ match e {
+ // Upstream channel is dead, but we want at least to fail backward HTLCs to save
+ // downstream channels. In case of PermanentFailure, we are not going to be able
+ // to claim back to_remote output on remote commitment transaction. Doesn't
+ // make a difference here, we are concern about HTLCs circuit, not onchain funds.
+ ChannelMonitorUpdateErr::PermanentFailure => {},
+ ChannelMonitorUpdateErr::TemporaryFailure => {},
+ }
+ }
+ }
+ let mut shutdown_res = chan.force_shutdown();
+ if shutdown_res.0.len() >= 1 {
+ log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
+ }
+ shutdown_res.0.clear();
+ return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+ }
}
}
}
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
- pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig) -> Result<Arc<ChannelManager>, secp256k1::Error> {
+ ///
+ /// User must provide the current blockchain height from which to track onchain channel
+ /// funding outpoints and send payments with reliable timelocks.
+ pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig, current_blockchain_height: usize) -> Result<Arc<ChannelManager>, secp256k1::Error> {
let secp_ctx = Secp256k1::new();
let res = Arc::new(ChannelManager {
chain_monitor,
tx_broadcaster,
- latest_block_height: AtomicUsize::new(0), //TODO: Get an init value
+ latest_block_height: AtomicUsize::new(current_blockchain_height),
last_block_hash: Mutex::new(Default::default()),
secp_ctx,
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
short_to_id: HashMap::new(),
- next_forward: Instant::now(),
forward_htlcs: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_their_node_id(),
channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
user_id: channel.get_user_id(),
+ is_live: channel.is_live(),
});
}
res
/// Gets the list of usable channels, in random order. Useful as an argument to
/// Router::get_route to ensure non-announced channels are used.
+ ///
+ /// These are guaranteed to have their is_live value set to true, see the documentation for
+ /// ChannelDetails::is_live for more info on exactly what the criteria are.
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
let channel_state = self.channel_state.lock().unwrap();
let mut res = Vec::with_capacity(channel_state.by_id.len());
// internal/external nomenclature, but that's ok cause that's probably what the user
// really wanted anyway.
if channel.is_live() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_their_node_id(),
channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
user_id: channel.get_user_id(),
+ is_live: true,
});
}
}
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
// OUR PAYMENT!
// final_expiry_too_soon
- if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
- // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
- if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+ // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
}
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
let _ = self.total_consistency_lock.read().unwrap();
- let (chan, msg, chan_monitor) = {
+ let (mut chan, msg, chan_monitor) = {
let (res, chan) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.remove(temporary_channel_id) {
};
// Because we have exclusive ownership of the channel here we can release the channel_state
// lock before add_update_monitor
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) {
+ Err(e) => {
+ log_error!(self, "Failed to store ChannelMonitor update for funding tx generation");
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_their_node_id(),
+ action: e.action,
+ });
+ return;
+ },
+ Ok(()) => unreachable!(),
+ }
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ // Its completely fine to continue with a FundingCreated until the monitor
+ // update is persisted, as long as we don't generate the FundingBroadcastSafe
+ // until the monitor has been safely persisted (as funding broadcast is not,
+ // in fact, safe).
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ },
+ }
}
let mut channel_state = self.channel_state.lock().unwrap();
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
- if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward {
- return;
- }
-
for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
if short_chan_id != 0 {
let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
let mut forward_event = None;
if channel_state_lock.forward_htlcs.is_empty() {
- forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
- channel_state_lock.next_forward = forward_event.unwrap();
+ forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
}
match channel_state_lock.forward_htlcs.entry(short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
let mut close_results = Vec::new();
let mut htlc_forwards = Vec::new();
let mut htlc_failures = Vec::new();
+ let mut pending_events = Vec::new();
let _ = self.total_consistency_lock.read().unwrap();
{
ChannelMonitorUpdateErr::TemporaryFailure => true,
}
} else {
- let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored();
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
if !pending_forwards.is_empty() {
htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
}
handle_cs!();
},
}
+ if needs_broadcast_safe {
+ pending_events.push(events::Event::FundingBroadcastSafe {
+ funding_txo: channel.get_funding_txo().unwrap(),
+ user_channel_id: channel.get_user_id(),
+ });
+ }
+ if let Some(msg) = funding_locked {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: channel.get_their_node_id(),
+ msg,
+ });
+ if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.get_their_node_id(),
+ msg: announcement_sigs,
+ });
+ }
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ }
true
}
} else { true }
});
}
+ self.pending_events.lock().unwrap().append(&mut pending_events);
+
for failure in htlc_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
}
}
- fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
}
- let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, Arc::clone(&self.logger), &self.default_configuration)
+ let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
Ok(())
}
- fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = channel_lock.borrow_parts();
//TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
//TODO: same as above
}
fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
- let ((funding_msg, monitor_update), chan) = {
+ let ((funding_msg, monitor_update), mut chan) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = channel_lock.borrow_parts();
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
};
// Because we have exclusive ownership of the channel here we can release the channel_state
// lock before add_update_monitor
- if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
- unimplemented!();
+ if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our funding_locked
+ // until we have persisted our monitor.
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ },
+ }
}
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
}
(chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
},
if !pending_forwards.is_empty() {
let mut channel_state = self.channel_state.lock().unwrap();
if channel_state.forward_htlcs.is_empty() {
- forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
- channel_state.next_forward = forward_event.unwrap();
+ forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
}
/// We force-close the channel without letting our counterparty participate in the shutdown
- fn block_disconnected(&self, header: &BlockHeader) {
+ fn block_disconnected(&self, header: &BlockHeader, _: u32) {
let _ = self.total_consistency_lock.read().unwrap();
let mut failed_channels = Vec::new();
{
impl ChannelMessageHandler for ChannelManager {
//TODO: Handle errors and close channel (or so)
- fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
+ fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
let _ = self.total_consistency_lock.read().unwrap();
- handle_error!(self, self.internal_open_channel(their_node_id, msg))
+ handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg))
}
- fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
+ fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
let _ = self.total_consistency_lock.read().unwrap();
- handle_error!(self, self.internal_accept_channel(their_node_id, msg))
+ handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg))
}
fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
channel_state: Mutex::new(ChannelHolder {
by_id,
short_to_id,
- next_forward: Instant::now(),
forward_htlcs,
claimable_htlcs,
pending_msg_events: Vec::new(),