use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
use chain::transaction::OutPoint;
-use ln::channel::{Channel, ChannelError, ChannelKeys};
-use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+use ln::channel::{Channel, ChannelError};
+use ln::channelmonitor::{ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
use ln::router::{Route,RouteHop};
use ln::msgs;
-use ln::msgs::{HandleError,ChannelMessageHandler};
+use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder};
+use chain::keysinterface::KeysInterface;
use util::{byte_utils, events, internal_traits, rng};
use util::sha2::Sha256;
use util::ser::{Readable, Writeable};
OutboundRoute {
route: Route,
session_priv: SecretKey,
+ /// Technically we can recalculate this from the route, but we cache it here to avoid
+ /// doing a double-pass on route when we get a failure back
+ first_hop_htlc_msat: u64,
},
}
#[cfg(test)]
HTLCSource::OutboundRoute {
route: Route { hops: Vec::new() },
session_priv: SecretKey::from_slice(&::secp256k1::Secp256k1::without_caps(), &[1; 32]).unwrap(),
+ first_hop_htlc_msat: 0,
}
}
}
}
}
+/// Pass to fail_htlc_backwwards to indicate the reason to fail the payment
+/// after a PaymentReceived event.
+#[derive(PartialEq)]
+pub enum PaymentFailReason {
+ /// Indicate the preimage for payment_hash is not known after a PaymentReceived event
+ PreimageUnknown,
+ /// Indicate the payment amount is incorrect ( received is < expected or > 2*expected ) after a PaymentReceived event
+ AmountMismatch,
+}
+
/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
pending_events: Mutex<Vec<events::Event>>,
+ keys_manager: Arc<KeysInterface>,
+
logger: Arc<Logger>,
}
/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
/// CLTV_CLAIM_BUFFER point (we static assert that its at least 3 blocks more).
const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
+const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + 2*HTLC_FAIL_TIMEOUT_BLOCKS, ie that
// if the next-hop peer fails the HTLC within HTLC_FAIL_TIMEOUT_BLOCKS then we'll still have
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
- pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
+ pub fn new(fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>, keys_manager: Arc<KeysInterface>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
let secp_ctx = Secp256k1::new();
let res = Arc::new(ChannelManager {
forward_htlcs: HashMap::new(),
claimable_htlcs: HashMap::new(),
}),
- our_network_key,
+ our_network_key: keys_manager.get_node_secret(),
pending_events: Mutex::new(Vec::new()),
+ keys_manager,
+
logger,
});
let weak_res = Arc::downgrade(&res);
///
/// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
- let chan_keys = if cfg!(feature = "fuzztarget") {
- ChannelKeys {
- funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(),
- commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- }
- } else {
- let mut key_seed = [0u8; 32];
- rng::fill_bytes(&mut key_seed);
- match ChannelKeys::new_from_seed(&key_seed) {
- Ok(key) => key,
- Err(_) => panic!("RNG is busted!")
- }
- };
-
- let channel = Channel::new_outbound(&*self.fee_estimator, chan_keys, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
+ let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, self.announce_channels_publicly, user_id, Arc::clone(&self.logger))?;
let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.entry(channel.channel_id()) {
}
}
+ fn handle_monitor_update_fail(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, channel_id: &[u8; 32], err: ChannelMonitorUpdateErr, reason: RAACommitmentOrder) {
+ match err {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ let mut chan = {
+ let channel_state = channel_state_lock.borrow_parts();
+ let chan = channel_state.by_id.remove(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
+ if let Some(short_id) = chan.get_short_channel_id() {
+ channel_state.short_to_id.remove(&short_id);
+ }
+ chan
+ };
+ mem::drop(channel_state_lock);
+ self.finish_force_close_channel(chan.force_shutdown());
+ let mut events = self.pending_events.lock().unwrap();
+ if let Ok(update) = self.get_channel_update(&chan) {
+ events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ let channel = channel_state_lock.by_id.get_mut(channel_id).expect("monitor_update_failed must be called within the same lock as the channel get!");
+ channel.monitor_update_failed(reason);
+ },
+ }
+ }
+
#[inline]
fn gen_rho_mu_from_shared_secret(shared_secret: &SharedSecret) -> ([u8; 32], [u8; 32]) {
({
}
};
- //TODO: Check that msg.cltv_expiry is within acceptable bounds!
-
let pending_forward_info = if next_hop_data.hmac == [0; 32] {
// OUR PAYMENT!
- if next_hop_data.data.amt_to_forward != msg.amount_msat {
+ // final_expiry_too_soon
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.data.amt_to_forward > msg.amount_msat {
return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
}
+ // final_incorrect_cltv_expiry
if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
}
if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
let forwarding_id = match id_option {
- None => {
+ None => { // unknown_next_peer
return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
},
Some(id) => id.clone(),
};
- if let Some((err, code, chan_update)) = {
+ if let Some((err, code, chan_update)) = loop {
let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
- if !chan.is_live() {
- Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap()))
- } else {
- let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
- if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward {
- Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap()))
- } else {
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 {
- Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap()))
- } else {
- None
- }
- }
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.is_live() { // channel_disabled
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+ if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
+ break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
- } {
- return_err!(err, code, &chan_update.encode_with_len()[..]);
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ break None;
+ }
+ {
+ let mut res = Vec::with_capacity(8 + 128);
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+ }
+ else if code == 0x1000 | 13 {
+ res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+ if let Some(chan_update) = chan_update {
+ res.extend_from_slice(&chan_update.encode_with_len()[..]);
+ }
+ return_err!(err, code, &res[..]);
}
}
}
}
/// only fails if the channel does not yet have an assigned short_id
+ /// May be called with channel_state already locked!
fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(HandleError{err: "Channel not yet established", action: None}),
let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
- let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = channel_state_lock.borrow_parts();
+ let (first_hop_node_id, update_add, commitment_signed) = {
+ let mut channel_state = self.channel_state.lock().unwrap();
let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
};
let res = {
- let chan = channel_state.by_id.get_mut(&id).unwrap();
- if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
- return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
- }
- if !chan.is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
+ let res = {
+ let chan = channel_state.by_id.get_mut(&id).unwrap();
+ if chan.get_their_node_id() != route.hops.first().unwrap().pubkey {
+ return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
+ }
+ if chan.is_awaiting_monitor_update() {
+ return Err(APIError::MonitorUpdateFailed);
+ }
+ if !chan.is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
+ }
+ chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
+ };
+ match res {
+ Some((update_add, commitment_signed, chan_monitor)) => {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ self.handle_monitor_update_fail(channel_state, &id, e, RAACommitmentOrder::CommitmentFirst);
+ return Err(APIError::MonitorUpdateFailed);
+ }
+ Some((update_add, commitment_signed))
+ },
+ None => None,
}
- chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- route: route.clone(),
- session_priv: session_priv.clone(),
- }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
};
let first_hop_node_id = route.hops.first().unwrap().pubkey;
match res {
- Some(msgs) => (first_hop_node_id, msgs),
+ Some((update_add, commitment_signed)) => {
+ (first_hop_node_id, update_add, commitment_signed)
+ },
None => return Ok(()),
}
};
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
- }
-
let mut events = self.pending_events.lock().unwrap();
events.push(events::Event::UpdateHTLCs {
node_id: first_hop_node_id,
},
None => return
}
- }; // Release channel lock for install_watch_outpoint call,
+ };
+ // Because we have exclusive ownership of the channel here we can release the channel_state
+ // lock before add_update_monitor
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
unimplemented!();
}
continue;
},
};
- new_events.push((Some(monitor), events::Event::UpdateHTLCs {
+ if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ unimplemented!();// but def dont push the event...
+ }
+ new_events.push(events::Event::UpdateHTLCs {
node_id: forward_chan.get_their_node_id(),
updates: msgs::CommitmentUpdate {
update_add_htlcs: add_htlc_msgs,
update_fee: None,
commitment_signed: commitment_msg,
},
- }));
+ });
}
} else {
for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
};
- new_events.push((None, events::Event::PaymentReceived {
+ new_events.push(events::Event::PaymentReceived {
payment_hash: forward_info.payment_hash,
amt: forward_info.amt_to_forward,
- }));
+ });
}
}
}
}
if new_events.is_empty() { return }
-
- new_events.retain(|event| {
- if let &Some(ref monitor) = &event.0 {
- if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) {
- unimplemented!();// but def dont push the event...
- }
- }
- true
- });
-
let mut events = self.pending_events.lock().unwrap();
- events.reserve(new_events.len());
- for event in new_events.drain(..) {
- events.push(event.1);
- }
+ events.append(&mut new_events);
}
- /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
- pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32]) -> bool {
+ /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect after a PaymentReceived event.
+ pub fn fail_htlc_backwards(&self, payment_hash: &[u8; 32], reason: PaymentFailReason) -> bool {
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
for htlc_with_hash in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: Vec::new() });
+ self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, HTLCFailReason::Reason { failure_code: if reason == PaymentFailReason::PreimageUnknown {0x4000 | 15} else {0x4000 | 16}, data: Vec::new() });
}
true
} else { false }
match source {
HTLCSource::OutboundRoute { .. } => {
mem::drop(channel_state);
-
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentFailed {
- payment_hash: payment_hash.clone()
- });
+ if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
+ let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
+ let mut pending_events = self.pending_events.lock().unwrap();
+ if let Some(channel_update) = channel_update {
+ pending_events.push(events::Event::PaymentFailureNetworkUpdate {
+ update: channel_update,
+ });
+ }
+ pending_events.push(events::Event::PaymentFailed {
+ payment_hash: payment_hash.clone(),
+ rejected_by_dest: !payment_retryable,
+ });
+ } else {
+ panic!("should have onion error packet here");
+ }
},
HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
let err_packet = match onion_error {
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
- Ok(msg) => (chan.get_their_node_id(), msg),
+ Ok(Some((msg, commitment_msg, chan_monitor))) => {
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();
+ }
+ (chan.get_their_node_id(), Some((msg, commitment_msg)))
+ },
+ Ok(None) => (chan.get_their_node_id(), None),
Err(_e) => {
//TODO: Do something with e?
return;
};
match fail_msgs {
- Some((msg, commitment_msg, chan_monitor)) => {
+ Some((msg, commitment_msg)) => {
mem::drop(channel_state);
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();// but def dont push the event...
- }
-
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::UpdateHTLCs {
node_id,
let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
- Ok(msg) => (chan.get_their_node_id(), msg),
+ Ok((msgs, Some(chan_monitor))) => {
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();// but def dont push the event...
+ }
+ (chan.get_their_node_id(), msgs)
+ },
+ Ok((msgs, None)) => (chan.get_their_node_id(), msgs),
Err(_e) => {
// TODO: There is probably a channel manager somewhere that needs to
// learn the preimage as the channel may be about to hit the chain.
};
mem::drop(channel_state);
- if let Some(chan_monitor) = fulfill_msgs.1 {
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();// but def dont push the event...
- }
- }
-
- if let Some((msg, commitment_msg)) = fulfill_msgs.0 {
+ if let Some((msg, commitment_msg)) = fulfill_msgs {
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::UpdateHTLCs {
node_id: node_id,
/// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
/// operation.
pub fn test_restore_channel_monitor(&self) {
- unimplemented!();
+ let mut new_events = Vec::new();
+ let mut close_results = Vec::new();
+ let mut htlc_forwards = Vec::new();
+ let mut htlc_failures = Vec::new();
+
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ channel_state.by_id.retain(|_, channel| {
+ if channel.is_awaiting_monitor_update() {
+ let chan_monitor = channel.channel_monitor();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ close_results.push(channel.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&channel) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => true,
+ }
+ } else {
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored();
+ if !pending_forwards.is_empty() {
+ htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
+ }
+ htlc_failures.append(&mut pending_failures);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ new_events.push(events::Event::UpdateHTLCs {
+ node_id: channel.get_their_node_id(),
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ new_events.push(events::Event::SendRevokeAndACK {
+ node_id: channel.get_their_node_id(),
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ true
+ }
+ } else { true }
+ });
+ }
+
+ for failure in htlc_failures.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ }
+ self.forward_htlcs(&mut htlc_forwards[..]);
+
+ for res in close_results.drain(..) {
+ self.finish_force_close_channel(res);
+ }
+
+ self.pending_events.lock().unwrap().append(&mut new_events);
}
fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, MsgHandleErrInternal> {
return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone()));
}
- let chan_keys = if cfg!(feature = "fuzztarget") {
- ChannelKeys {
- funding_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]).unwrap(),
- revocation_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0]).unwrap(),
- payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0]).unwrap(),
- delayed_payment_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0]).unwrap(),
- htlc_base_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]).unwrap(),
- channel_close_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0]).unwrap(),
- channel_monitor_claim_key: SecretKey::from_slice(&self.secp_ctx, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0]).unwrap(),
- commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
- }
- } else {
- let mut key_seed = [0u8; 32];
- rng::fill_bytes(&mut key_seed);
- match ChannelKeys::new_from_seed(&key_seed) {
- Ok(key) => key,
- Err(_) => panic!("RNG is busted!")
- }
- };
-
- let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))
+ let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let accept_msg = channel.get_accept_channel();
channel_state.by_id.insert(channel.channel_id(), channel);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
}
- }; // Release channel lock for install_watch_outpoint call,
- // note that this means if the remote end is misbehaving and sends a message for the same
- // channel back-to-back with funding_created, we'll end up thinking they sent a message
- // for a bogus channel.
+ };
+ // Because we have exclusive ownership of the channel here we can release the channel_state
+ // lock before add_update_monitor
if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
unimplemented!();
}
}
fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
- let (funding_txo, user_id, monitor) = {
+ let (funding_txo, user_id) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
- (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor)
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();
+ }
+ (chan.get_funding_txo().unwrap(), chan.get_user_id())
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
- if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
- unimplemented!();
- }
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::FundingBroadcastSafe {
funding_txo: funding_txo,
Ok(())
}
- fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, MsgHandleErrInternal> {
+ // Process failure we got back from upstream on a payment we sent. Returns update and a boolean
+ // indicating that the payment itself failed
+ fn process_onion_failure(&self, htlc_source: &HTLCSource, mut packet_decrypted: Vec<u8>) -> (Option<msgs::HTLCFailChannelUpdate>, bool) {
+ if let &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } = htlc_source {
+ macro_rules! onion_failure_log {
+ ( $error_code_textual: expr, $error_code: expr, $reported_name: expr, $reported_value: expr ) => {
+ log_trace!(self, "{}({:#x}) {}({})", $error_code_textual, $error_code, $reported_name, $reported_value);
+ };
+ ( $error_code_textual: expr, $error_code: expr ) => {
+ log_trace!(self, "{}({})", $error_code_textual, $error_code);
+ };
+ }
+
+ const BADONION: u16 = 0x8000;
+ const PERM: u16 = 0x4000;
+ const UPDATE: u16 = 0x1000;
+
+ let mut res = None;
+ let mut htlc_msat = *first_hop_htlc_msat;
+
+ // Handle packed channel/node updates for passing back for the route handler
+ Self::construct_onion_keys_callback(&self.secp_ctx, route, session_priv, |shared_secret, _, _, route_hop| {
+ if res.is_some() { return; }
+
+ let incoming_htlc_msat = htlc_msat;
+ let amt_to_forward = htlc_msat - route_hop.fee_msat;
+ htlc_msat = amt_to_forward;
+
+ let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
+
+ let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
+ decryption_tmp.resize(packet_decrypted.len(), 0);
+ let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
+ chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
+ packet_decrypted = decryption_tmp;
+
+ let is_from_final_node = route.hops.last().unwrap().pubkey == route_hop.pubkey;
+
+ if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
+ let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
+ let mut hmac = Hmac::new(Sha256::new(), &um);
+ hmac.input(&err_packet.encode()[32..]);
+ let mut calc_tag = [0u8; 32];
+ hmac.raw_result(&mut calc_tag);
+
+ if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
+ if err_packet.failuremsg.len() < 2 {
+ // Useless packet that we can't use but it passed HMAC, so it
+ // definitely came from the peer in question
+ res = Some((None, !is_from_final_node));
+ } else {
+ let error_code = byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]);
+
+ match error_code & 0xff {
+ 1|2|3 => {
+ // either from an intermediate or final node
+ // invalid_realm(PERM|1),
+ // temporary_node_failure(NODE|2)
+ // permanent_node_failure(PERM|NODE|2)
+ // required_node_feature_mssing(PERM|NODE|3)
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: error_code & PERM == PERM,
+ }), !(error_code & PERM == PERM && is_from_final_node)));
+ // node returning invalid_realm is removed from network_map,
+ // although NODE flag is not set, TODO: or remove channel only?
+ // retry payment when removed node is not a final node
+ return;
+ },
+ _ => {}
+ }
+
+ if is_from_final_node {
+ let payment_retryable = match error_code {
+ c if c == PERM|15 => false, // unknown_payment_hash
+ c if c == PERM|16 => false, // incorrect_payment_amount
+ 17 => true, // final_expiry_too_soon
+ 18 if err_packet.failuremsg.len() == 6 => { // final_incorrect_cltv_expiry
+ let _reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
+ true
+ },
+ 19 if err_packet.failuremsg.len() == 10 => { // final_incorrect_htlc_amount
+ let _reported_incoming_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ true
+ },
+ _ => {
+ // A final node has sent us either an invalid code or an error_code that
+ // MUST be sent from the processing node, or the formmat of failuremsg
+ // does not coform to the spec.
+ // Remove it from the network map and don't may retry payment
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ };
+ res = Some((None, payment_retryable));
+ return;
+ }
+
+ // now, error_code should be only from the intermediate nodes
+ match error_code {
+ _c if error_code & PERM == PERM => {
+ res = Some((Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
+ short_channel_id: route_hop.short_channel_id,
+ is_permanent: true,
+ }), false));
+ },
+ _c if error_code & UPDATE == UPDATE => {
+ let offset = match error_code {
+ c if c == UPDATE|7 => 0, // temporary_channel_failure
+ c if c == UPDATE|11 => 8, // amount_below_minimum
+ c if c == UPDATE|12 => 8, // fee_insufficient
+ c if c == UPDATE|13 => 4, // incorrect_cltv_expiry
+ c if c == UPDATE|14 => 0, // expiry_too_soon
+ c if c == UPDATE|20 => 2, // channel_disabled
+ _ => {
+ // node sending unknown code
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ };
+
+ if err_packet.failuremsg.len() >= offset + 2 {
+ let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[offset+2..offset+4]) as usize;
+ if err_packet.failuremsg.len() >= offset + 4 + update_len {
+ if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[offset + 4..offset + 4 + update_len])) {
+ // if channel_update should NOT have caused the failure:
+ // MAY treat the channel_update as invalid.
+ let is_chan_update_invalid = match error_code {
+ c if c == UPDATE|7 => { // temporary_channel_failure
+ false
+ },
+ c if c == UPDATE|11 => { // amount_below_minimum
+ let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ onion_failure_log!("amount_below_minimum", UPDATE|11, "htlc_msat", reported_htlc_msat);
+ incoming_htlc_msat > chan_update.contents.htlc_minimum_msat
+ },
+ c if c == UPDATE|12 => { // fee_insufficient
+ let reported_htlc_msat = byte_utils::slice_to_be64(&err_packet.failuremsg[2..2+8]);
+ let new_fee = amt_to_forward.checked_mul(chan_update.contents.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan_update.contents.fee_base_msat as u64) });
+ onion_failure_log!("fee_insufficient", UPDATE|12, "htlc_msat", reported_htlc_msat);
+ new_fee.is_none() || incoming_htlc_msat >= new_fee.unwrap() && incoming_htlc_msat >= amt_to_forward + new_fee.unwrap()
+ }
+ c if c == UPDATE|13 => { // incorrect_cltv_expiry
+ let reported_cltv_expiry = byte_utils::slice_to_be32(&err_packet.failuremsg[2..2+4]);
+ onion_failure_log!("incorrect_cltv_expiry", UPDATE|13, "cltv_expiry", reported_cltv_expiry);
+ route_hop.cltv_expiry_delta as u16 >= chan_update.contents.cltv_expiry_delta
+ },
+ c if c == UPDATE|20 => { // channel_disabled
+ let reported_flags = byte_utils::slice_to_be16(&err_packet.failuremsg[2..2+2]);
+ onion_failure_log!("channel_disabled", UPDATE|20, "flags", reported_flags);
+ chan_update.contents.flags & 0x01 == 0x01
+ },
+ c if c == UPDATE|21 => true, // expiry_too_far
+ _ => { unreachable!(); },
+ };
+
+ let msg = if is_chan_update_invalid { None } else {
+ Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
+ msg: chan_update,
+ })
+ };
+ res = Some((msg, true));
+ return;
+ }
+ }
+ }
+ },
+ _c if error_code & BADONION == BADONION => {
+ //TODO
+ },
+ 14 => { // expiry_too_soon
+ res = Some((None, true));
+ return;
+ }
+ _ => {
+ // node sending unknown code
+ res = Some((Some(msgs::HTLCFailChannelUpdate::NodeFailure {
+ node_id: route_hop.pubkey,
+ is_permanent: true,
+ }), false));
+ return;
+ }
+ }
+ }
+ }
+ }
+ }).expect("Route that we sent via spontaneously grew invalid keys in the middle of it?");
+ res.unwrap_or((None, true))
+ } else { ((None, true)) }
+ }
+
+ fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
- let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
+ match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
//TODO: here and below MsgHandleErrInternal, #153 case
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}?;
-
- match htlc_source {
- &HTLCSource::OutboundRoute { ref route, ref session_priv, .. } => {
- // Handle packed channel/node updates for passing back for the route handler
- let mut packet_decrypted = msg.reason.data.clone();
- let mut res = None;
- Self::construct_onion_keys_callback(&self.secp_ctx, &route, &session_priv, |shared_secret, _, _, route_hop| {
- if res.is_some() { return; }
-
- let ammag = ChannelManager::gen_ammag_from_shared_secret(&shared_secret);
-
- let mut decryption_tmp = Vec::with_capacity(packet_decrypted.len());
- decryption_tmp.resize(packet_decrypted.len(), 0);
- let mut chacha = ChaCha20::new(&ammag, &[0u8; 8]);
- chacha.process(&packet_decrypted, &mut decryption_tmp[..]);
- packet_decrypted = decryption_tmp;
-
- if let Ok(err_packet) = msgs::DecodedOnionErrorPacket::read(&mut Cursor::new(&packet_decrypted)) {
- if err_packet.failuremsg.len() >= 2 {
- let um = ChannelManager::gen_um_from_shared_secret(&shared_secret);
-
- let mut hmac = Hmac::new(Sha256::new(), &um);
- hmac.input(&err_packet.encode()[32..]);
- let mut calc_tag = [0u8; 32];
- hmac.raw_result(&mut calc_tag);
- if crypto::util::fixed_time_eq(&calc_tag, &err_packet.hmac) {
- const UNKNOWN_CHAN: u16 = 0x4000|10;
- const TEMP_CHAN_FAILURE: u16 = 0x4000|7;
- match byte_utils::slice_to_be16(&err_packet.failuremsg[0..2]) {
- TEMP_CHAN_FAILURE => {
- if err_packet.failuremsg.len() >= 4 {
- let update_len = byte_utils::slice_to_be16(&err_packet.failuremsg[2..4]) as usize;
- if err_packet.failuremsg.len() >= 4 + update_len {
- if let Ok(chan_update) = msgs::ChannelUpdate::read(&mut Cursor::new(&err_packet.failuremsg[4..4 + update_len])) {
- res = Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage {
- msg: chan_update,
- });
- }
- }
- }
- },
- UNKNOWN_CHAN => {
- // No such next-hop. We know this came from the
- // current node as the HMAC validated.
- res = Some(msgs::HTLCFailChannelUpdate::ChannelClosed {
- short_channel_id: route_hop.short_channel_id
- });
- },
- _ => {}, //TODO: Enumerate all of these!
- }
- }
- }
- }
- }).unwrap();
- Ok(res)
- },
- _ => { Ok(None) },
- }
+ Ok(())
}
fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
}
fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), MsgHandleErrInternal> {
- let (revoke_and_ack, commitment_signed, chan_monitor) = {
+ let (revoke_and_ack, commitment_signed) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
//TODO: here and below MsgHandleErrInternal, #153 case
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
+ let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();
+ }
+ (revoke_and_ack, commitment_signed)
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
- }
-
Ok((revoke_and_ack, commitment_signed))
}
+ #[inline]
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ let mut forward_event = None;
+ if !pending_forwards.is_empty() {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ if channel_state.forward_htlcs.is_empty() {
+ forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
+ channel_state.next_forward = forward_event.unwrap();
+ }
+ for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
+ match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info });
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info }));
+ }
+ }
+ }
+ }
+ match forward_event {
+ Some(time) => {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: time
+ });
+ }
+ None => {},
+ }
+ }
+ }
+
fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, MsgHandleErrInternal> {
- let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = {
+ let ((res, pending_forwards, mut pending_failures), short_channel_id) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
//TODO: here and below MsgHandleErrInternal, #153 case
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
+ let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();
+ }
+ ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
- }
for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
-
- let mut forward_event = None;
- if !pending_forwards.is_empty() {
- let mut channel_state = self.channel_state.lock().unwrap();
- if channel_state.forward_htlcs.is_empty() {
- forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
- channel_state.next_forward = forward_event.unwrap();
- }
- for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
- match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
- hash_map::Entry::Occupied(mut entry) => {
- entry.get_mut().push(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info });
- },
- hash_map::Entry::Vacant(entry) => {
- entry.insert(vec!(HTLCForwardInfo { prev_short_channel_id: short_channel_id, prev_htlc_id, forward_info }));
- }
- }
- }
- }
- match forward_event {
- Some(time) => {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PendingHTLCsForwardable {
- time_forwardable: time
- });
- }
- None => {},
- }
+ self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
Ok(res)
}
Ok(())
}
- fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), MsgHandleErrInternal> {
- let (res, chan_monitor) = {
+ fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder), MsgHandleErrInternal> {
+ let res = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg)
+ let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg)
.map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
- (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor)
+ if let Some(monitor) = channel_monitor {
+ if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ unimplemented!();
+ }
+ }
+ Ok((funding_locked, revoke_and_ack, commitment_update, order))
},
None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
- if let Some(monitor) = chan_monitor {
- if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
- unimplemented!();
- }
- }
+
res
}
if !chan.is_outbound() {
return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
}
+ if chan.is_awaiting_monitor_update() {
+ return Err(APIError::MonitorUpdateFailed);
+ }
if !chan.is_live() {
return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
}
handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
}
- fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
+ fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
}
handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
}
- fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), HandleError> {
+ fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder), HandleError> {
handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
}
use chain::chaininterface;
use chain::transaction::OutPoint;
use chain::chaininterface::ChainListener;
- use ln::channelmanager::{ChannelManager,OnionKeys};
- use ln::channelmonitor::{CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
+ use chain::keysinterface::KeysInterface;
+ use chain::keysinterface;
+ use ln::channelmanager::{ChannelManager,OnionKeys,PaymentFailReason};
+ use ln::channelmonitor::{ChannelMonitorUpdateErr, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
use ln::router::{Route, RouteHop, Router};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler};
commitment_msg: msgs::CommitmentSigned,
}
impl SendEvent {
+ fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
+ }
+
fn from_event(event: Event) -> SendEvent {
match event {
- Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => {
- assert!(update_fulfill_htlcs.is_empty());
- assert!(update_fail_htlcs.is_empty());
- assert!(update_fail_malformed_htlcs.is_empty());
- assert!(update_fee.is_none());
- SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed }
- },
+ Event::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
_ => panic!("Unexpected event type!"),
}
}
}
fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: [u8; 32]) {
- assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
+ assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, PaymentFailReason::PreimageUnknown));
check_added_monitors!(expected_route.last().unwrap(), 1);
let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
let events = origin_node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::PaymentFailed { payment_hash } => {
+ Event::PaymentFailed { payment_hash, rejected_by_dest } => {
assert_eq!(payment_hash, our_payment_hash);
+ assert!(rejected_by_dest);
},
_ => panic!("Unexpected event"),
}
let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+ let mut seed = [0; 32];
+ rng.fill_bytes(&mut seed);
+ let keys_manager = Arc::new(keysinterface::KeysManager::new(&seed, Network::Testnet, Arc::clone(&logger)));
let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
- let node_id = {
- let mut key_slice = [0; 32];
- rng.fill_bytes(&mut key_slice);
- SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
- };
- let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap();
- let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger));
+ let node = ChannelManager::new(0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone()).unwrap();
+ let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router,
network_payment_count: payment_count.clone(),
network_chan_count: chan_count.clone(),
}
}
+ macro_rules! expect_pending_htlcs_forwardable {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
+ $node.node.process_pending_htlc_forwards();
+ }}
+ }
+
#[test]
fn channel_reserve_test() {
use util::rng;
}}
};
- macro_rules! expect_pending_htlcs_forwardable {
- ($node: expr) => {{
- let events = $node.node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PendingHTLCsForwardable { .. } => { },
- _ => panic!("Unexpected event"),
- };
- $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
- $node.node.process_pending_htlc_forwards();
- }}
- };
-
macro_rules! expect_forward {
($node: expr) => {{
let mut events = $node.node.get_and_clear_pending_events();
assert!(chan_msgs.0.is_none());
}
if pending_raa.0 {
+ assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
check_added_monitors!(node_a, 1);
} else {
assert!(chan_msgs.0.is_none());
}
if pending_raa.1 {
+ assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
check_added_monitors!(node_b, 1);
} else {
_ => panic!("Unexpected event"),
}
match events[1] {
- Event::PaymentFailed { payment_hash } => {
+ Event::PaymentFailed { payment_hash, rejected_by_dest } => {
assert_eq!(payment_hash, payment_hash_5);
+ assert!(rejected_by_dest);
},
_ => panic!("Unexpected event"),
}
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
}
+ #[test]
+ fn test_drop_messages_peer_disconnect_dual_htlc() {
+ // Test that we can handle reconnecting when both sides of a channel have pending
+ // commitment_updates when we disconnect.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now try to send a second payment which will fail to send
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+
+ nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::UpdateHTLCs { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
+ assert!(commitment_update.is_none());
+ check_added_monitors!(nodes[0], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ assert_eq!(reestablish_1.len(), 1);
+ let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ assert_eq!(reestablish_2.len(), 1);
+
+ let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ assert!(bs_resp.1.is_none());
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst);
+
+ assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
+ assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
+ assert!(bs_commitment_signed.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap();
+ assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fee.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap();
+ assert!(as_commitment_signed.update_add_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fail_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fee.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
+ assert!(as_second_commitment_signed.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
+ assert!(bs_third_commitment_signed.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ let events_4 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+ nodes[1].node.process_pending_htlc_forwards();
+
+ let events_5 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ Event::PaymentReceived { ref payment_hash, amt: _ } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ }
+
+ #[test]
+ fn test_simple_monitor_permanent_update_fail() {
+ // Test that we handle a simple permanent monitor update failure
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ };
+
+ // TODO: Once we hit the chain with the failure transaction we should check that we get a
+ // PaymentFailed event
+
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ }
+
+ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
+ // Test that we can recover from a simple temporary monitor update failure optionally with
+ // a disconnect in between
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert!(events_1.is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ if disconnect {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events_2 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ let payment_event = SendEvent::from_event(events_2.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events_3 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+
+ // Now set it to failed again...
+ let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert!(events_4.is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ if disconnect {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_5 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ Event::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ // TODO: Once we hit the chain with the failure transaction we should check that we get a
+ // PaymentFailed event
+
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ }
+
+ #[test]
+ fn test_simple_monitor_temporary_update_fail() {
+ do_test_simple_monitor_temporary_update_fail(false);
+ do_test_simple_monitor_temporary_update_fail(true);
+ }
+
+ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
+ let disconnect_flags = 8 | 16;
+
+ // Test that we can recover from a temporary monitor update failure with some in-flight
+ // HTLCs going on at the same time potentially with some disconnection thrown in.
+ // * First we route a payment, then get a temporary monitor update failure when trying to
+ // route a second payment. We then claim the first payment.
+ // * If disconnect_count is set, we will disconnect at this point (which is likely as
+ // TemporaryFailure likely indicates net disconnect which resulted in failing to update
+ // the ChannelMonitor on a watchtower).
+ // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
+ // immediately, otherwise we wait sconnect and deliver them via the reconnect
+ // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
+ // disconnect_count & !disconnect_flags is 0).
+ // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
+ // through message sending, potentially disconnect/reconnecting multiple times based on
+ // disconnect_count, to get the update_fulfill_htlc through.
+ // * We then walk through more message exchanges to get the original update_add_htlc
+ // through, swapping message ordering based on disconnect_count & 8 and optionally
+ // disconnect/reconnecting based on disconnect_count.
+ let mut nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now try to send a second payment which will fail to send
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert!(events_1.is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
+ // but nodes[0] won't respond since it is frozen.
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+
+ if (disconnect_count & 16) == 0 {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+ }
+
+ (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if disconnect_count & !disconnect_flags > 0 {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ }
+
+ // Now fix monitor updating...
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ macro_rules! disconnect_reconnect_peers { () => { {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ assert_eq!(reestablish_1.len(), 1);
+ let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ assert_eq!(reestablish_2.len(), 1);
+
+ let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ (reestablish_1, reestablish_2, as_resp, bs_resp)
+ } } }
+
+ let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert!(events_4.is_empty());
+
+ let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ assert_eq!(reestablish_1.len(), 1);
+ let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ assert_eq!(reestablish_2.len(), 1);
+
+ let mut as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let mut bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ check_added_monitors!(nodes[1], 0);
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ assert!(bs_resp.1.is_none());
+ if (disconnect_count & 16) == 0 {
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.1.is_some());
+ assert!(as_resp.2.is_some());
+ assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst);
+ } else {
+ assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
+ assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
+ assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
+
+ assert!(as_resp.1.is_none());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let (as_resp_raa, as_resp_cu) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap();
+ assert!(as_resp_cu.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ as_resp.1 = Some(as_resp_raa);
+ bs_resp.2 = None;
+ }
+
+ if disconnect_count & !disconnect_flags > 1 {
+ let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
+
+ if (disconnect_count & 16) == 0 {
+ assert!(reestablish_1 == second_reestablish_1);
+ assert!(reestablish_2 == second_reestablish_2);
+ }
+ assert!(as_resp == second_as_resp);
+ assert!(bs_resp == second_bs_resp);
+ }
+
+ (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
+ } else {
+ let mut events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 2);
+ (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
+ Event::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ };
+
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ assert!(bs_commitment_signed.is_none()); // nodes[1] is awaiting an RAA from nodes[0] still
+ check_added_monitors!(nodes[1], 1);
+
+ if disconnect_count & !disconnect_flags > 2 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
+ assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
+
+ assert!(as_resp.2.is_none());
+ assert!(bs_resp.2.is_none());
+ }
+
+ let as_commitment_update;
+ let bs_second_commitment_update;
+
+ macro_rules! handle_bs_raa { () => {
+ as_commitment_update = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap();
+ assert!(as_commitment_update.update_add_htlcs.is_empty());
+ assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
+ assert!(as_commitment_update.update_fail_htlcs.is_empty());
+ assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
+ assert!(as_commitment_update.update_fee.is_none());
+ check_added_monitors!(nodes[0], 1);
+ } }
+
+ macro_rules! handle_initial_raa { () => {
+ bs_second_commitment_update = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap().unwrap();
+ assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fee.is_none());
+ check_added_monitors!(nodes[1], 1);
+ } }
+
+ if (disconnect_count & 8) == 0 {
+ handle_bs_raa!();
+
+ if disconnect_count & !disconnect_flags > 3 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
+ }
+
+ handle_initial_raa!();
+
+ if disconnect_count & !disconnect_flags > 4 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+ }
+ } else {
+ handle_initial_raa!();
+
+ if disconnect_count & !disconnect_flags > 3 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
+
+ assert!(as_resp.2.is_none());
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+
+ assert!(bs_resp.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
+ }
+
+ handle_bs_raa!();
+
+ if disconnect_count & !disconnect_flags > 4 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+ }
+ }
+
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap();
+ assert!(as_commitment_signed.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap();
+ assert!(bs_third_commitment_signed.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events_5 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ }
+
+ #[test]
+ fn test_monitor_temporary_update_fail_a() {
+ do_test_monitor_temporary_update_fail(0);
+ do_test_monitor_temporary_update_fail(1);
+ do_test_monitor_temporary_update_fail(2);
+ do_test_monitor_temporary_update_fail(3);
+ do_test_monitor_temporary_update_fail(4);
+ do_test_monitor_temporary_update_fail(5);
+ }
+
+ #[test]
+ fn test_monitor_temporary_update_fail_b() {
+ do_test_monitor_temporary_update_fail(2 | 8);
+ do_test_monitor_temporary_update_fail(3 | 8);
+ do_test_monitor_temporary_update_fail(4 | 8);
+ do_test_monitor_temporary_update_fail(5 | 8);
+ }
+
+ #[test]
+ fn test_monitor_temporary_update_fail_c() {
+ do_test_monitor_temporary_update_fail(1 | 16);
+ do_test_monitor_temporary_update_fail(2 | 16);
+ do_test_monitor_temporary_update_fail(3 | 16);
+ do_test_monitor_temporary_update_fail(2 | 8 | 16);
+ do_test_monitor_temporary_update_fail(3 | 8 | 16);
+ }
+
#[test]
fn test_invalid_channel_announcement() {
//Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
- let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
let unsigned_msg = dummy_unsigned_msg!();
sign_msg!(unsigned_msg);
assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
- let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
// Configured with Network::Testnet
let mut unsigned_msg = dummy_unsigned_msg!();