use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
use ln::channelmonitor::ChannelMonitorUpdateErr;
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, LocalFeatures};
+use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler};
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
}
+
+// Note that restore_between_fails with !fail_on_generate is useless
+// Also note that !fail_on_generate && !fail_on_signed is useless
+// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
+fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool) {
+ // Test that if the monitor update generated by funding_transaction_generated fails we continue
+ // the channel setup happily after the update is restored.
+ let mut nodes = create_network(2, &[None, None]);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap();
+
+ let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
+
+ if fail_on_generate {
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ }
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ if restore_between_fails {
+ assert!(fail_on_generate);
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ if fail_on_signed {
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ } else {
+ assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
+ assert!(fail_on_generate); // Somebody has to fail
+ }
+ let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ if fail_on_signed || !restore_between_fails {
+ if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() {
+ if fail_on_generate && !restore_between_fails {
+ assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
+ check_added_monitors!(nodes[0], 0);
+ } else {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ check_added_monitors!(nodes[0], 1);
+ }
+ } else { panic!(); }
+
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ } else {
+ funding_signed_res.unwrap();
+ }
+
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
+ assert_eq!(user_channel_id, 43);
+ assert_eq!(*funding_txo, funding_output);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &funding_tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ for node in nodes.iter() {
+ assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 8000000);
+ close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+}
+
+#[test]
+fn during_funding_monitor_fail() {
+ do_during_funding_monitor_fail(false, false, true);
+ do_during_funding_monitor_fail(true, false, true);
+ do_during_funding_monitor_fail(true, true, true);
+ do_during_funding_monitor_fail(true, true, false);
+}
if !self.channel_outbound {
return Err(ChannelError::Close("Received funding_signed for an inbound channel?"));
}
- if self.channel_state != ChannelState::FundingCreated as u32 {
+ if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
return Err(ChannelError::Close("Received funding_signed in strange state!"));
}
if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
self.sign_commitment_transaction(&mut local_initial_commitment_tx, &msg.signature);
self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys, self.feerate_per_kw, Vec::new());
self.last_local_commitment_txn = vec![local_initial_commitment_tx];
- self.channel_state = ChannelState::FundingSent as u32;
+ self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32));
self.cur_local_commitment_transaction_number -= 1;
- Ok(self.channel_monitor.clone())
+ if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
+ Ok(self.channel_monitor.clone())
+ } else {
+ Err(ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast"))
+ }
}
pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
/// Indicates that the latest ChannelMonitor update has been committed by the client
/// successfully and we should restore normal operation. Returns messages which should be sent
/// to the remote side.
- pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
+ pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool) {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
+ let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.channel_outbound;
let mut forwards = Vec::new();
mem::swap(&mut forwards, &mut self.monitor_pending_forwards);
if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
- return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures);
+ return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe);
}
let raa = if self.monitor_pending_revoke_and_ack {
self.monitor_pending_revoke_and_ack = false;
self.monitor_pending_commitment_signed = false;
let order = self.resend_order.clone();
- log_trace!(self, "Restored monitor updating resulting in {} commitment update and {} RAA, with {} first",
+ log_trace!(self, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
+ if needs_broadcast_safe { "a funding broadcast safe, " } else { "" },
if commitment_update.is_some() { "a" } else { "no" },
if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
- (raa, commitment_update, order, forwards, failures)
+ (raa, commitment_update, order, forwards, failures, needs_broadcast_safe)
}
pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> {
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
let _ = self.total_consistency_lock.read().unwrap();
- let (chan, msg, chan_monitor) = {
+ let (mut chan, msg, chan_monitor) = {
let (res, chan) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.remove(temporary_channel_id) {
};
// Because we have exclusive ownership of the channel here we can release the channel_state
// lock before add_update_monitor
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) {
+ Err(e) => {
+ log_error!(self, "Failed to store ChannelMonitor update for funding tx generation");
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_their_node_id(),
+ action: e.action,
+ });
+ return;
+ },
+ Ok(()) => unreachable!(),
+ }
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ // Its completely fine to continue with a FundingCreated until the monitor
+ // update is persisted, as long as we don't generate the FundingBroadcastSafe
+ // until the monitor has been safely persisted (as funding broadcast is not,
+ // in fact, safe).
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ },
+ }
}
let mut channel_state = self.channel_state.lock().unwrap();
let mut close_results = Vec::new();
let mut htlc_forwards = Vec::new();
let mut htlc_failures = Vec::new();
+ let mut pending_events = Vec::new();
let _ = self.total_consistency_lock.read().unwrap();
{
ChannelMonitorUpdateErr::TemporaryFailure => true,
}
} else {
- let (raa, commitment_update, order, pending_forwards, mut pending_failures) = channel.monitor_updating_restored();
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe) = channel.monitor_updating_restored();
if !pending_forwards.is_empty() {
htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
}
handle_cs!();
},
}
+ if needs_broadcast_safe {
+ pending_events.push(events::Event::FundingBroadcastSafe {
+ funding_txo: channel.get_funding_txo().unwrap(),
+ user_channel_id: channel.get_user_id(),
+ });
+ }
true
}
} else { true }
});
}
+ self.pending_events.lock().unwrap().append(&mut pending_events);
+
for failure in htlc_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- unimplemented!();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
}
(chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
},
}
}
+pub fn create_funding_transaction(node: &Node, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
+ let chan_id = *node.network_chan_count.borrow();
-pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction {
- node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
- node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
- node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
-
- let chan_id = *node_a.network_chan_count.borrow();
- let tx;
- let funding_output;
-
- let events_2 = node_a.node.get_and_clear_pending_events();
- assert_eq!(events_2.len(), 1);
- match events_2[0] {
+ let events = node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
- assert_eq!(*channel_value_satoshis, channel_value);
- assert_eq!(user_channel_id, 42);
+ assert_eq!(*channel_value_satoshis, expected_chan_value);
+ assert_eq!(user_channel_id, expected_user_chan_id);
- tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ let tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
}]};
- funding_output = OutPoint::new(tx.txid(), 0);
-
- node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
- let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), 1);
- assert_eq!(added_monitors[0].0, funding_output);
- added_monitors.clear();
+ let funding_outpoint = OutPoint::new(tx.txid(), 0);
+ (*temporary_channel_id, tx, funding_outpoint)
},
_ => panic!("Unexpected event"),
}
+}
+
+pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction {
+ node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
+ node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
+ node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
+
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
+
+ {
+ node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
+ let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
{