// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
// fully-serialized monitor state here, as well as the corresponding update_id.
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
- pub should_update_manager: atomic::AtomicBool,
}
impl TestChainMonitor {
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
keys,
persister,
latest_monitors: Mutex::new(HashMap::new()),
- should_update_manager: atomic::AtomicBool::new(false),
}
}
}
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
panic!("Already had monitor pre-watch_channel");
}
- self.should_update_manager.store(true, atomic::Ordering::Relaxed);
self.chain_monitor.watch_channel(funding_txo, monitor)
}
let mut ser = VecWriter(Vec::new());
deserialized_monitor.write(&mut ser).unwrap();
map_entry.insert((update.update_id, ser.0));
- self.should_update_manager.store(true, atomic::Ordering::Relaxed);
self.chain_monitor.update_channel(funding_txo, update)
}
if !chan_a_disconnected {
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
chan_a_disconnected = true;
- drain_msg_events_on_disconnect!(0);
- }
- if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
- node_a_ser.0.clear();
- nodes[0].write(&mut node_a_ser).unwrap();
+ push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
+ ab_events.clear();
+ ba_events.clear();
}
let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
nodes[0] = new_node_a;
if !chan_b_disconnected {
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
chan_b_disconnected = true;
- drain_msg_events_on_disconnect!(2);
- }
- if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
- node_c_ser.0.clear();
- nodes[2].write(&mut node_c_ser).unwrap();
+ push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
+ bc_events.clear();
+ cb_events.clear();
}
let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
nodes[2] = new_node_c;
_ => test_return!(),
}
- node_a_ser.0.clear();
- nodes[0].write(&mut node_a_ser).unwrap();
- monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
- node_b_ser.0.clear();
- nodes[1].write(&mut node_b_ser).unwrap();
- monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
- node_c_ser.0.clear();
- nodes[2].write(&mut node_c_ser).unwrap();
- monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ if nodes[0].get_and_clear_needs_persistence() == true {
+ node_a_ser.0.clear();
+ nodes[0].write(&mut node_a_ser).unwrap();
+ }
+ if nodes[1].get_and_clear_needs_persistence() == true {
+ node_b_ser.0.clear();
+ nodes[1].write(&mut node_b_ser).unwrap();
+ }
+ if nodes[2].get_and_clear_needs_persistence() == true {
+ node_c_ser.0.clear();
+ nodes[2].write(&mut node_c_ser).unwrap();
+ }
}
}
// see `await_start`'s use below.
let mut await_start = None;
if $check_slow_await { await_start = Some($get_timer(1)); }
- let updates_available = $await;
+ $await;
let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
// Exit the loop if the background processor was requested to stop.
break;
}
- if updates_available {
+ if $channel_manager.get_and_clear_needs_persistence() {
log_trace!($logger, "Persisting ChannelManager...");
$persister.persist_manager(&*$channel_manager)?;
log_trace!($logger, "Done persisting ChannelManager.");
channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
gossip_sync, peer_manager, logger, scorer, should_break, {
let fut = Selector {
- a: channel_manager.get_persistable_update_future(),
+ a: channel_manager.get_event_or_persistence_needed_future(),
b: chain_monitor.get_update_future(),
c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
};
match fut.await {
- SelectorOutput::A => true,
- SelectorOutput::B => false,
+ SelectorOutput::A|SelectorOutput::B => {},
SelectorOutput::C(exit) => {
should_break = exit;
- false
}
}
}, |t| sleeper(Duration::from_secs(t)),
define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
channel_manager, channel_manager.process_pending_events(&event_handler),
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
- Sleeper::from_two_futures(
- channel_manager.get_persistable_update_future(),
+ { Sleeper::from_two_futures(
+ channel_manager.get_event_or_persistence_needed_future(),
chain_monitor.get_update_future()
- ).wait_timeout(Duration::from_millis(100)),
+ ).wait_timeout(Duration::from_millis(100)); },
|_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
});
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
check_persisted_data!(nodes[0].node, filepath.clone());
loop {
- if !nodes[0].node.get_persistence_condvar_value() { break }
+ if !nodes[0].node.get_event_or_persist_condvar_value() { break }
}
// Force-close the channel.
// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
loop {
- if !nodes[0].node.get_persistence_condvar_value() { break }
+ if !nodes[0].node.get_event_or_persist_condvar_value() { break }
}
// Check network graph is persisted
}
#[cfg(target_os = "windows")]
-fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<u16> {
+fn path_to_windows_str<T: AsRef<OsStr>>(path: &T) -> Vec<u16> {
path.as_ref().encode_wide().chain(Some(0)).collect()
}
let res = if dest_file_path.exists() {
call!(unsafe {
windows_sys::Win32::Storage::FileSystem::ReplaceFileW(
- path_to_windows_str(dest_file_path.clone()).as_ptr(),
- path_to_windows_str(tmp_file_path).as_ptr(),
+ path_to_windows_str(&dest_file_path).as_ptr(),
+ path_to_windows_str(&tmp_file_path).as_ptr(),
std::ptr::null(),
windows_sys::Win32::Storage::FileSystem::REPLACEFILE_IGNORE_MERGE_ERRORS,
std::ptr::null_mut() as *const core::ffi::c_void,
} else {
call!(unsafe {
windows_sys::Win32::Storage::FileSystem::MoveFileExW(
- path_to_windows_str(tmp_file_path).as_ptr(),
- path_to_windows_str(dest_file_path.clone()).as_ptr(),
+ path_to_windows_str(&tmp_file_path).as_ptr(),
+ path_to_windows_str(&dest_file_path).as_ptr(),
windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
| windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
)
call!(unsafe {
windows_sys::Win32::Storage::FileSystem::MoveFileExW(
- path_to_windows_str(dest_file_path).as_ptr(),
- path_to_windows_str(trash_file_path.clone()).as_ptr(),
+ path_to_windows_str(&dest_file_path).as_ptr(),
+ path_to_windows_str(&trash_file_path).as_ptr(),
windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
| windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
)
create_announced_chan_between_nodes(&nodes, 0, 1);
// Route two payments to be claimed at the same time.
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clear();
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
// First route a payment that we will claim on chain and give the recipient the preimage.
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
nodes[1].node.claim_funds(payment_preimage);
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
nodes[1].node.get_and_clear_pending_msg_events();
/// much smaller than a full [`ChannelMonitor`]. However, for large single commitment transaction
/// updates (e.g. ones during which there are hundreds of HTLCs pending on the commitment
/// transaction), a single update may reach upwards of 1 MiB in serialized size.
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
#[must_use]
pub struct ChannelMonitorUpdate {
pub(crate) updates: Vec<ChannelMonitorUpdateStep>,
);
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum ChannelMonitorUpdateStep {
LatestHolderCommitmentTXInfo {
commitment_tx: HolderCommitmentTransaction,
/// channel basepoints via the new function, or they were obtained via
/// CommitmentTransaction.trust().keys() because we trusted the source of the
/// pre-calculated keys.
-#[derive(PartialEq, Eq, Clone)]
+#[derive(PartialEq, Eq, Clone, Debug)]
pub struct TxCreationKeys {
/// The broadcaster's per-commitment public key which was used to derive the other keys.
pub per_commitment_point: PublicKey,
/// Information needed to build and sign a holder's commitment transaction.
///
/// The transaction is only signed once we are ready to broadcast.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct HolderCommitmentTransaction {
inner: CommitmentTransaction,
/// Our counterparty's signature for the transaction
}
/// A pre-built Bitcoin commitment transaction and its txid.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct BuiltCommitmentTransaction {
/// The commitment transaction
pub transaction: Transaction,
///
/// This class can be used inside a signer implementation to generate a signature given the relevant
/// secret key.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct CommitmentTransaction {
commitment_number: u64,
to_broadcaster_value_sat: u64,
send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
// Route an HTLC from node 0 to node 1 (but don't settle)
- let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+ let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
// Make a copy of the ChainMonitor so we can capture the error it returns on a
// bogus update. Note that if instead we updated the nodes[0]'s ChainMonitor
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
// Now try to send a second payment which will fail to send
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
// Route a first payment that we'll fail backwards
- let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+ let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
// Forward a payment for B to claim
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
// Rebalance a bit so that we can send backwards from 3 to 2.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
nodes[1].node.claim_funds(payment_preimage_1);
// Rebalance a bit so that we can send backwards from 3 to 1.
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
- let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+ let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
nodes[2].node.fail_htlc_backwards(&payment_hash_1);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }]);
check_added_monitors!(nodes[2], 1);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
// Forward a payment for B to claim
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
// Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000);
let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2);
assert_eq!(bs_txn.len(), 1);
//
// Note that because, at the end, MonitorUpdateInProgress is still set, the HTLC generated in
// (c) will not be freed from the holding cell.
- let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
+ let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000);
nodes[0].node.send_payment_with_route(&route, payment_hash_1,
RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap();
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
let mut as_raa = None;
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
// `claim_funds` results in a ChannelMonitorUpdate.
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1).2;
- create_announced_chan_between_nodes(&nodes, 1, 2).2;
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000);
// Tee up two payments in opposite directions across nodes[1], one it sent to generate a
// PaymentSent event and one it forwards.
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000);
// Claim the first payment to get a `PaymentSent` event (but don't handle it yet).
nodes[2].node.claim_funds(payment_preimage_1);
let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]);
check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update
+ assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2));
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- // Finish the CS dance between nodes[0] and nodes[1].
- do_commitment_signed_dance(&nodes[1], &nodes[0], &as_htlc_fulfill_updates.commitment_signed, false, false);
+ // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the
+ // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the
+ // channel.
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed);
+ check_added_monitors(&nodes[1], 1);
+ let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false);
+ assert!(a.is_none());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa);
check_added_monitors(&nodes[1], 0);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); }
if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); }
- // The event processing should release the last RAA update.
- check_added_monitors(&nodes[1], 1);
+ // The event processing should release the last RAA updates on both channels.
+ check_added_monitors(&nodes[1], 2);
// When we fetch the next update the message getter will generate the next update for nodes[2],
// generating a further monitor update.
do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false);
expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true);
}
+
+fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) {
+ // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages
+ // from the downstream channel, we immediately claim the HTLC on the upstream channel, before
+ // even doing a `commitment_signed` dance on the downstream channel. This implies that our
+ // `ChannelMonitorUpdate`s are generated in the right order - first we ensure we'll get our
+ // money, then we write the update that resolves the downstream node claiming their money. This
+ // is safe as long as `ChannelMonitorUpdate`s complete in the order in which they are
+ // generated, but of course this may not be the case. For asynchronous update writes, we have
+ // to ensure monitor updates can block each other, preventing the inversion all together.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+ // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+ // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
+ // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+
+ let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+ let mut manager_b = Vec::new();
+ if !with_latest_manager {
+ manager_b = nodes[1].node.encode();
+ }
+
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[2], 1);
+ expect_payment_claimed!(nodes[2], payment_hash, 100_000);
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+ // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+ // for it since the monitor update is marked in-progress.
+ check_added_monitors(&nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we
+ // won't get the preimage when the nodes reconnect and we have to get it from the
+ // ChannelMonitor.
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+ check_added_monitors(&nodes[1], 1);
+ if complete_bc_commitment_dance {
+ let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack);
+ check_added_monitors(&nodes[2], 1);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed);
+ check_added_monitors(&nodes[2], 1);
+ let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the
+ // preimage in the A <-> B channel, which will prevent it from persisting the
+ // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage.
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_raa);
+ check_added_monitors(&nodes[1], 0);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ // Now reload node B
+ if with_latest_manager {
+ manager_b = nodes[1].node.encode();
+ }
+
+ let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+ reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+ if with_latest_manager {
+ // If we used the latest ChannelManager to reload from, we should have both channels still
+ // live. The B <-> C channel's final RAA ChannelMonitorUpdate must still be blocked as
+ // before - the ChannelMonitorUpdate for the A <-> B channel hasn't completed.
+ // When we call `timer_tick_occurred` we will get that monitor update back, which we'll
+ // complete after reconnecting to our peers.
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[1].node.timer_tick_occurred();
+ check_added_monitors(&nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now reconnect B to both A and C. If the B <-> C commitment signed dance wasn't run to
+ // the end go ahead and do that, though the
+ // `pending_responding_commitment_signed_dup_monitor` in `reconnect_args` indicates that we
+ // expect to *not* receive the final RAA ChannelMonitorUpdate.
+ if complete_bc_commitment_dance {
+ reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+ } else {
+ let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+ reconnect_args.pending_responding_commitment_signed.1 = true;
+ reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
+ reconnect_args.pending_raa = (false, true);
+ reconnect_nodes(reconnect_args);
+ }
+
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+
+ // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on
+ // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating
+ // process.
+ let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+ nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+ // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has
+ // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C
+ // channel.
+ } else {
+ // If the ChannelManager used in the reload was stale, check that the B <-> C channel was
+ // closed.
+ //
+ // Note that this will also process the ChannelMonitorUpdates which were queued up when we
+ // reloaded the ChannelManager. This will re-emit the A<->B preimage as well as the B<->C
+ // force-closure ChannelMonitorUpdate. Once the A<->B preimage update completes, the claim
+ // commitment update will be allowed to go out.
+ check_added_monitors(&nodes[1], 0);
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000);
+ check_added_monitors(&nodes[1], 2);
+
+ nodes[1].node.timer_tick_occurred();
+ check_added_monitors(&nodes[1], 0);
+
+ // Don't bother to reconnect B to C - that channel has been closed. We don't need to
+ // exchange any messages here even though there's a pending commitment update because the
+ // ChannelMonitorUpdate hasn't yet completed.
+ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
+
+ let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+ nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+ // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the
+ // preimage (as it was a replay of the original ChannelMonitorUpdate from before we
+ // restarted). When we go to fetch the commitment transaction updates we'll poll the
+ // ChannelMonitorUpdate completion, then generate (and complete) a new ChannelMonitorUpdate
+ // with the actual commitment transaction, which will allow us to fulfill the HTLC with
+ // node A.
+ }
+
+ let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+ check_added_monitors(&nodes[1], 1);
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false);
+
+ expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager);
+
+ // Finally, check that the payment was, ultimately, seen as sent by node A.
+ expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
+}
+
+#[test]
+fn test_inverted_mon_completion_order() {
+ do_test_inverted_mon_completion_order(true, true);
+ do_test_inverted_mon_completion_order(true, false);
+ do_test_inverted_mon_completion_order(false, true);
+ do_test_inverted_mon_completion_order(false, false);
+}
+
+fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) {
+ // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel
+ // is force-closed between when we generate the update on reload and when we go to handle the
+ // update or prior to generating the update at all.
+
+ if !close_chans_before_reload && close_only_a {
+ // If we're not closing, it makes no sense to "only close A"
+ panic!();
+ }
+
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+ // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+ // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one
+ // on the B<->C channel but leave the A<->B monitor update pending, then reload B.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+ let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[2], 1);
+ expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+ // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+ // for it since the monitor update is marked in-progress.
+ check_added_monitors(&nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get
+ // the preimage when the nodes reconnect, at which point we have to ensure we get it from the
+ // ChannelMonitor.
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+ check_added_monitors(&nodes[1], 1);
+ let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+ let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+
+ if close_chans_before_reload {
+ if !close_only_a {
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
+ }
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ }
+
+ // Now reload node B
+ let manager_b = nodes[1].node.encode();
+ reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+ if close_chans_before_reload {
+ // If the channels were already closed, B will rebroadcast its closing transactions here.
+ let bs_close_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ if close_only_a {
+ assert_eq!(bs_close_txn.len(), 2);
+ } else {
+ assert_eq!(bs_close_txn.len(), 3);
+ }
+ }
+
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(as_closing_tx.len(), 1);
+
+ // In order to give A's closing transaction to B without processing background events first,
+ // use the _without_consistency_checks utility method. This is similar to connecting blocks
+ // during startup prior to the node being full initialized.
+ mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
+
+ // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B
+ // ChannelMonitor (possible twice), even though the channel has since been closed.
+ check_added_monitors(&nodes[1], 0);
+ let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 };
+ if hold_post_reload_mon_update {
+ for _ in 0..mons_added {
+ persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ }
+ }
+ nodes[1].node.timer_tick_occurred();
+ check_added_monitors(&nodes[1], mons_added);
+
+ // Finally, check that B created a payment preimage transaction and close out the payment.
+ let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 });
+ let bs_preimage_tx = &bs_txn[0];
+ check_spends!(bs_preimage_tx, as_closing_tx[0]);
+
+ if !close_chans_before_reload {
+ check_closed_broadcast(&nodes[1], 1, true);
+ check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ } else {
+ // While we forwarded the payment a while ago, we don't want to process events too early or
+ // we'll run background tasks we wanted to test individually.
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
+ }
+
+ mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
+ check_closed_broadcast(&nodes[0], 1, true);
+ expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
+
+ if !close_chans_before_reload || close_only_a {
+ // Make sure the B<->C channel is still alive and well by sending a payment over it.
+ let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
+ reconnect_args.pending_responding_commitment_signed.1 = true;
+ if !close_chans_before_reload {
+ // TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
+ // will consider the forwarded payment complete and allow the B<->C
+ // `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
+ // be allowed, and needs fixing.
+ reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
+ }
+ reconnect_args.pending_raa.1 = true;
+
+ reconnect_nodes(reconnect_args);
+ let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+ nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
+ if !close_chans_before_reload {
+ // Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
+ // channel will fly, removing the payment preimage from it.
+ check_added_monitors(&nodes[1], 1);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ send_payment(&nodes[1], &[&nodes[2]], 100_000);
+ }
+}
+
+#[test]
+fn test_durable_preimages_on_closed_channel() {
+ do_test_durable_preimages_on_closed_channel(true, true, true);
+ do_test_durable_preimages_on_closed_channel(true, true, false);
+ do_test_durable_preimages_on_closed_channel(true, false, true);
+ do_test_durable_preimages_on_closed_channel(true, false, false);
+ do_test_durable_preimages_on_closed_channel(false, false, true);
+ do_test_durable_preimages_on_closed_channel(false, false, false);
+}
+
+fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
+ // Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
+ // before restart we run the monitor update completion action on startup.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+ let persister;
+ let new_chain_monitor;
+ let nodes_1_deserialized;
+
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2;
+
+ // Route a payment from A, through B, to C, then claim it on C. Once we pass B the
+ // `update_fulfill_htlc`+`commitment_signed` we have a monitor update for both of B's channels.
+ // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor
+ // update pending, then reload B. At that point, the final monitor update on the B<->C channel
+ // is still pending because it can't fly until the preimage is persisted on the A<->B monitor.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[2], 1);
+ expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+
+ // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages
+ // for it since the monitor update is marked in-progress.
+ check_added_monitors(&nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now step the Commitment Signed Dance between B and C and check that after the final RAA B
+ // doesn't let the preimage-removing monitor update fly.
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed);
+ check_added_monitors(&nodes[1], 1);
+ let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+ check_added_monitors(&nodes[2], 1);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+ check_added_monitors(&nodes[2], 1);
+
+ let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &cs_final_raa);
+ check_added_monitors(&nodes[1], 0);
+
+ // Finally, reload node B and check that after we call `process_pending_events` once we realize
+ // we've completed the A<->B preimage-including monitor update and so can release the B<->C
+ // preimage-removing monitor update.
+ let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode();
+ let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+ let manager_b = nodes[1].node.encode();
+ reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+
+ if close_during_reload {
+ // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
+ // (as learned about during the on-reload block connection).
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_broadcast!(nodes[0], true);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+ let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
+ }
+
+ let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+ let mut events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
+ expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
+ if close_during_reload {
+ match events[0] {
+ Event::ChannelClosed { .. } => {},
+ _ => panic!(),
+ }
+ check_closed_broadcast!(nodes[1], true);
+ }
+
+ // Once we run event processing the monitor should free, check that it was indeed the B<->C
+ // channel which was updated.
+ check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 });
+ let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
+ assert!(bc_update_id != post_ev_bc_update_id);
+
+ // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates
+ // fine.
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+ reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2]));
+ send_payment(&nodes[1], &[&nodes[2]], 100_000);
+}
+
+#[test]
+fn test_reload_mon_update_completion_actions() {
+ do_test_reload_mon_update_completion_actions(true);
+ do_test_reload_mon_update_completion_actions(false);
+}
}
/// Tracks the inbound corresponding to an outbound HTLC
-#[derive(Clone, Hash, PartialEq, Eq)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
// Note that this may be an outbound SCID alias for the associated channel.
short_channel_id: u64,
}
}
-/// A payment identifier used to uniquely identify a payment to LDK.
+/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+/// a payment and ensure idempotency in LDK.
///
/// This is not exported to bindings users as we just use [u8; 32] directly
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
}
}
-#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
pub(crate) enum SentHTLCId {
PreviousHopData { short_channel_id: u64, htlc_id: u64 },
/// Tracks the inbound corresponding to an outbound HTLC
#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
-#[derive(Clone, PartialEq, Eq)]
+#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum HTLCSource {
PreviousHopData(HTLCPreviousHopData),
OutboundRoute {
channel_capacity: None,
}
}
+
+ fn closes_channel(&self) -> bool {
+ self.chan_id.is_some()
+ }
}
/// We hold back HTLCs we intend to relay for a random interval greater than this (see
}
impl RAAMonitorUpdateBlockingAction {
- #[allow(unused)]
fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
Self::ForwardedPaymentInboundClaim {
channel_id: prev_hop.outpoint.to_channel_id(),
background_events_processed_since_startup: AtomicBool,
- persistence_notifier: Notifier,
+ event_persist_notifier: Notifier,
+ needs_persist_flag: AtomicBool,
entropy_source: ES,
node_signer: NS,
#[must_use]
enum NotifyOption {
DoPersist,
- SkipPersist,
+ SkipPersistHandleEvents,
+ SkipPersistNoEvents,
}
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
/// notify or not based on whether relevant changes have been made, providing a closure to
/// `optionally_notify` which returns a `NotifyOption`.
-struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
- persistence_notifier: &'a Notifier,
+struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
+ event_persist_notifier: &'a Notifier,
+ needs_persist_flag: &'a AtomicBool,
should_persist: F,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard: RwLockReadGuard<'a, ()>,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
- fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ /// Notifies any waiters and indicates that we need to persist, in addition to possibly having
+ /// events to handle.
+ ///
+ /// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
+ /// other cases where losing the changes on restart may result in a force-close or otherwise
+ /// isn't ideal.
+ fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
+ Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
+ }
+
+ fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
+ -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
- let _ = cm.get_cm().process_background_events(); // We always persist
+ let force_notify = cm.get_cm().process_background_events();
PersistenceNotifierGuard {
- persistence_notifier: &cm.get_cm().persistence_notifier,
- should_persist: || -> NotifyOption { NotifyOption::DoPersist },
+ event_persist_notifier: &cm.get_cm().event_persist_notifier,
+ needs_persist_flag: &cm.get_cm().needs_persist_flag,
+ should_persist: move || {
+ // Pick the "most" action between `persist_check` and the background events
+ // processing and return that.
+ let notify = persist_check();
+ match (notify, force_notify) {
+ (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
+ (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
+ (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
+ (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
+ _ => NotifyOption::SkipPersistNoEvents,
+ }
+ },
_read_guard: read_guard,
}
-
}
/// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
- /// [`ChannelManager::process_background_events`] MUST be called first.
- fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
- let read_guard = lock.read().unwrap();
+ /// [`ChannelManager::process_background_events`] MUST be called first (or
+ /// [`Self::optionally_notify`] used).
+ fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
+ (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+ let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
PersistenceNotifierGuard {
- persistence_notifier: notifier,
+ event_persist_notifier: &cm.get_cm().event_persist_notifier,
+ needs_persist_flag: &cm.get_cm().needs_persist_flag,
should_persist: persist_check,
_read_guard: read_guard,
}
}
}
-impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
+impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
- if (self.should_persist)() == NotifyOption::DoPersist {
- self.persistence_notifier.notify();
+ match (self.should_persist)() {
+ NotifyOption::DoPersist => {
+ self.needs_persist_flag.store(true, Ordering::Release);
+ self.event_persist_notifier.notify()
+ },
+ NotifyOption::SkipPersistHandleEvents =>
+ self.event_persist_notifier.notify(),
+ NotifyOption::SkipPersistNoEvents => {},
}
}
}
pub enum RecentPaymentDetails {
/// When an invoice was requested and thus a payment has not yet been sent.
AwaitingInvoice {
- /// Identifier for the payment to ensure idempotency.
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
payment_id: PaymentId,
},
/// When a payment is still being sent and awaiting successful delivery.
Pending {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that is currently being sent but has yet to be fulfilled or
/// abandoned.
payment_hash: PaymentHash,
/// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
/// payment is removed from tracking.
Fulfilled {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
/// made before LDK version 0.0.104.
payment_hash: Option<PaymentHash>,
/// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
/// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
Abandoned {
+ /// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
+ /// a payment and ensure idempotency in LDK.
+ payment_id: PaymentId,
/// Hash of the payment that we have given up trying to send.
payment_hash: PaymentHash,
},
return;
}
- let mut result = NotifyOption::SkipPersist;
+ let mut result;
{
// We'll acquire our total consistency lock so that we can be sure no other
// Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
// ensure any startup-generated background events are handled first.
- if $self.process_background_events() == NotifyOption::DoPersist { result = NotifyOption::DoPersist; }
+ result = $self.process_background_events();
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
processed_all_events = false;
}
- if result == NotifyOption::DoPersist {
- $self.persistence_notifier.notify();
+ match result {
+ NotifyOption::DoPersist => {
+ $self.needs_persist_flag.store(true, Ordering::Release);
+ $self.event_persist_notifier.notify();
+ },
+ NotifyOption::SkipPersistHandleEvents =>
+ $self.event_persist_notifier.notify(),
+ NotifyOption::SkipPersistNoEvents => {},
}
}
}
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
- persistence_notifier: Notifier::new(),
+
+ event_persist_notifier: Notifier::new(),
+ needs_persist_flag: AtomicBool::new(false),
entropy_source,
node_signer,
},
PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
Some(RecentPaymentDetails::Pending {
+ payment_id: *payment_id,
payment_hash: *payment_hash,
total_msat: *total_msat,
})
},
PendingOutboundPayment::Abandoned { payment_hash, .. } => {
- Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash })
+ Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
- Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash })
+ Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
},
PendingOutboundPayment::Legacy { .. } => None
})
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
if background_events.is_empty() {
- return NotifyOption::SkipPersist;
+ return NotifyOption::SkipPersistNoEvents;
}
for event in background_events.drain(..) {
}
fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
- if !chan.context.is_outbound() { return NotifyOption::SkipPersist; }
+ if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
- &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- return NotifyOption::SkipPersist;
+ chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ return NotifyOption::SkipPersistNoEvents;
}
if !chan.context.is_live() {
log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
- &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- return NotifyOption::SkipPersist;
+ chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
+ return NotifyOption::SkipPersistNoEvents;
}
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
/// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
/// it wants to detect). Thus, we have a variant exposed here for its benefit.
pub fn maybe_update_chan_fees(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut should_persist = NotifyOption::SkipPersistNoEvents;
let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
/// [`ChannelUpdate`]: msgs::ChannelUpdate
/// [`ChannelConfig`]: crate::util::config::ChannelConfig
pub fn timer_tick_occurred(&self) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut should_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut should_persist = NotifyOption::SkipPersistNoEvents;
let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum);
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
- fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
+ fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
+ forwarded_htlc_value_msat: Option<u64>, from_onchain: bool,
+ next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint
+ ) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
"We don't support claim_htlc claims during startup - monitors may not be available yet");
+ if let Some(pubkey) = next_channel_counterparty_node_id {
+ debug_assert_eq!(pubkey, path.hops[0].pubkey);
+ }
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: next_channel_outpoint,
counterparty_node_id: path.hops[0].pubkey,
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
+ let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat| {
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
- downstream_counterparty_and_funding_outpoint: None,
+ downstream_counterparty_and_funding_outpoint:
+ if let Some(node_id) = next_channel_counterparty_node_id {
+ Some((node_id, next_channel_outpoint, completed_blocker))
+ } else {
+ // We can only get `None` here if we are processing a
+ // `ChannelMonitor`-originated event, in which case we
+ // don't care about ensuring we wake the downstream
+ // channel's monitor updating - the channel is already
+ // closed.
+ None
+ },
})
} else { None }
});
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+ // likely to be lost on restart!
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
}
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
+ // likely to be lost on restart!
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
+
let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
hash_map::Entry::Occupied(mut chan_phase_entry) => {
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
+ if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
+ peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
+ .or_insert_with(Vec::new)
+ .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
+ }
+ // Note that we do not need to push an `actions_blocking_raa_monitor_updates`
+ // entry here, even though we *do* need to block the next RAA monitor update.
+ // We do this instead in the `claim_funds_internal` by attaching a
+ // `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
+ // outbound HTLC is claimed. This is guaranteed to all complete before we
+ // process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
res
} else {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, Some(*counterparty_node_id), funding_txo);
Ok(())
}
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
}
fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+ // Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
+ // closing a channel), so any changes are likely to be lost on restart!
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
})
}
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) fn test_raa_monitor_updates_held(&self,
+ counterparty_node_id: PublicKey, channel_id: ChannelId
+ ) -> bool {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state_lck = peer_state_mtx.lock().unwrap();
+ let peer_state = &mut *peer_state_lck;
+
+ if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
+ return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
+ chan.context().get_funding_txo().unwrap(), counterparty_node_id);
+ }
+ }
+ false
+ }
+
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let (htlcs_to_fail, res) = {
let per_peer_state = self.per_peer_state.read().unwrap();
Ok(())
}
- /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+ /// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
None => {
// It's not a local channel
- return Ok(NotifyOption::SkipPersist)
+ return Ok(NotifyOption::SkipPersistNoEvents)
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- return Ok(NotifyOption::SkipPersist)
+ return Ok(NotifyOption::SkipPersistNoEvents)
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
// If the announcement is about a channel of ours which is public, some
// other peer may simply be forwarding all its gossip to us. Don't provide
// a scary-looking error message and return Ok instead.
- return Ok(NotifyOption::SkipPersist);
+ return Ok(NotifyOption::SkipPersistNoEvents);
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.flags & 1 == 0;
if were_node_one == msg_from_node_one {
- return Ok(NotifyOption::SkipPersist);
+ return Ok(NotifyOption::SkipPersistNoEvents);
} else {
log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
- hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
+ hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
}
Ok(NotifyOption::DoPersist)
}
- fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
+ fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
let htlc_forwards;
let need_lnd_workaround = {
let per_peer_state = self.per_peer_state.read().unwrap();
}
};
+ let mut persist = NotifyOption::SkipPersistHandleEvents;
if let Some(forwards) = htlc_forwards {
self.forward_htlcs(&mut [forwards][..]);
+ persist = NotifyOption::DoPersist;
}
if let Some(channel_ready_msg) = need_lnd_workaround {
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
- Ok(())
+ Ok(persist)
}
/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", &preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", preimage);
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, counterparty_node_id, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
/// the `MessageSendEvent`s to the specific peer they were generated under.
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let mut result = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ let mut result = NotifyOption::SkipPersistNoEvents;
// TODO: This behavior should be documented. It's unintuitive that we query
// ChannelMonitors when clearing other events.
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
- &self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
+ let _persistence_guard =
+ PersistenceNotifierGuard::optionally_notify_skipping_background_events(
+ self, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
}
}
- /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
+ /// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
+ /// may have events that need processing.
+ ///
+ /// In order to check if this [`ChannelManager`] needs persisting, call
+ /// [`Self::get_and_clear_needs_persistence`].
///
/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
/// [`ChannelManager`] and should instead register actions to be taken later.
- ///
- pub fn get_persistable_update_future(&self) -> Future {
- self.persistence_notifier.get_future()
+ pub fn get_event_or_persistence_needed_future(&self) -> Future {
+ self.event_persist_notifier.get_future()
+ }
+
+ /// Returns true if this [`ChannelManager`] needs to be persisted.
+ pub fn get_and_clear_needs_persistence(&self) -> bool {
+ self.needs_persist_flag.swap(false, Ordering::AcqRel)
}
#[cfg(any(test, feature = "_test_utils"))]
- pub fn get_persistence_condvar_value(&self) -> bool {
- self.persistence_notifier.notify_pending()
+ pub fn get_event_or_persist_condvar_value(&self) -> bool {
+ self.event_persist_notifier.notify_pending()
}
/// Gets the latest best block which was connected either via the [`chain::Listen`] or
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // open_channel message - pre-funded channels are never written so there should be no
+ // change to the contents.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_open_channel(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => {
+ debug_assert!(false, "We shouldn't close a new channel");
+ NotifyOption::DoPersist
+ },
+ _ => NotifyOption::SkipPersistHandleEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_open_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannelV2) {
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // accept_channel message - pre-funded channels are never written so there should be no
+ // change to the contents.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
+ NotifyOption::SkipPersistHandleEvents
+ });
}
fn handle_accept_channel_v2(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannelV2) {
}
fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // channel_ready message - while the channel's state will change, any channel_ready message
+ // will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
+ // will not force-close the channel on startup.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_channel_ready(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ _ => NotifyOption::SkipPersistHandleEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_add_htlc message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_add_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fail_htlc message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fail_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fail_malformed_htlc message - the message itself doesn't change our channel state
+ // only the `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fail_malformed_htlc(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
+ // Note that we never need to persist the updated ChannelManager for an inbound
+ // update_fee message - the message itself doesn't change our channel state only the
+ // `commitment_signed` message afterwards will.
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_update_fee(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(()) => NotifyOption::SkipPersistNoEvents,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
- PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
- let force_persist = self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(self, || {
if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
- if force_persist == NotifyOption::DoPersist { NotifyOption::DoPersist } else { persist }
+ persist
} else {
- NotifyOption::SkipPersist
+ NotifyOption::DoPersist
}
});
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
- let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
+ let res = self.internal_channel_reestablish(counterparty_node_id, msg);
+ let persist = match &res {
+ Err(e) if e.closes_channel() => NotifyOption::DoPersist,
+ Err(_) => NotifyOption::SkipPersistHandleEvents,
+ Ok(persist) => *persist,
+ };
+ let _ = handle_error!(self, res, *counterparty_node_id);
+ persist
+ });
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
+ self, || NotifyOption::SkipPersistHandleEvents);
+
let mut failed_channels = Vec::new();
let mut per_peer_state = self.per_peer_state.write().unwrap();
let remove_peer = {
return Err(());
}
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+ let mut res = Ok(());
- // If we have too many peers connected which don't have funded channels, disconnect the
- // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
- // unfunded channels taking up space in memory for disconnected peers, we still let new
- // peers connect, but we'll reject new channels from them.
- let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
- let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
+ PersistenceNotifierGuard::optionally_notify(self, || {
+ // If we have too many peers connected which don't have funded channels, disconnect the
+ // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
+ // unfunded channels taking up space in memory for disconnected peers, we still let new
+ // peers connect, but we'll reject new channels from them.
+ let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
+ let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
- {
- let mut peer_state_lock = self.per_peer_state.write().unwrap();
- match peer_state_lock.entry(counterparty_node_id.clone()) {
- hash_map::Entry::Vacant(e) => {
- if inbound_peer_limited {
- return Err(());
- }
- e.insert(Mutex::new(PeerState {
- channel_by_id: HashMap::new(),
- inbound_channel_request_by_id: HashMap::new(),
- latest_features: init_msg.features.clone(),
- pending_msg_events: Vec::new(),
- in_flight_monitor_updates: BTreeMap::new(),
- monitor_update_blocked_actions: BTreeMap::new(),
- actions_blocking_raa_monitor_updates: BTreeMap::new(),
- is_connected: true,
- }));
- },
- hash_map::Entry::Occupied(e) => {
- let mut peer_state = e.get().lock().unwrap();
- peer_state.latest_features = init_msg.features.clone();
-
- let best_block_height = self.best_block.read().unwrap().height();
- if inbound_peer_limited &&
- Self::unfunded_channel_count(&*peer_state, best_block_height) ==
- peer_state.channel_by_id.len()
- {
- return Err(());
- }
+ {
+ let mut peer_state_lock = self.per_peer_state.write().unwrap();
+ match peer_state_lock.entry(counterparty_node_id.clone()) {
+ hash_map::Entry::Vacant(e) => {
+ if inbound_peer_limited {
+ res = Err(());
+ return NotifyOption::SkipPersistNoEvents;
+ }
+ e.insert(Mutex::new(PeerState {
+ channel_by_id: HashMap::new(),
+ inbound_channel_request_by_id: HashMap::new(),
+ latest_features: init_msg.features.clone(),
+ pending_msg_events: Vec::new(),
+ in_flight_monitor_updates: BTreeMap::new(),
+ monitor_update_blocked_actions: BTreeMap::new(),
+ actions_blocking_raa_monitor_updates: BTreeMap::new(),
+ is_connected: true,
+ }));
+ },
+ hash_map::Entry::Occupied(e) => {
+ let mut peer_state = e.get().lock().unwrap();
+ peer_state.latest_features = init_msg.features.clone();
+
+ let best_block_height = self.best_block.read().unwrap().height();
+ if inbound_peer_limited &&
+ Self::unfunded_channel_count(&*peer_state, best_block_height) ==
+ peer_state.channel_by_id.len()
+ {
+ res = Err(());
+ return NotifyOption::SkipPersistNoEvents;
+ }
- debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
- peer_state.is_connected = true;
- },
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+ peer_state.is_connected = true;
+ },
+ }
}
- }
- log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
+ log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let pending_msg_events = &mut peer_state.pending_msg_events;
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
- if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
- // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
- // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
- // worry about closing and removing them.
- debug_assert!(false);
- None
- }
- ).for_each(|chan| {
- pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.context.get_counterparty_node_id(),
- msg: chan.get_channel_reestablish(&self.logger),
+ peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+ if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+ // worry about closing and removing them.
+ debug_assert!(false);
+ None
+ }
+ ).for_each(|chan| {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg: chan.get_channel_reestablish(&self.logger),
+ });
});
- });
- }
- //TODO: Also re-broadcast announcement_signatures
- Ok(())
+ }
+
+ return NotifyOption::SkipPersistHandleEvents;
+ //TODO: Also re-broadcast announcement_signatures
+ });
+ res
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
// downstream chan is closed (because we don't have a
// channel_id -> peer map entry).
counterparty_opt.is_none(),
+ counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
monitor.get_funding_txo().0))
} else { None }
} else {
pending_background_events: Mutex::new(pending_background_events),
total_consistency_lock: RwLock::new(()),
background_events_processed_since_startup: AtomicBool::new(false),
- persistence_notifier: Notifier::new(),
+
+ event_persist_notifier: Notifier::new(),
+ needs_persist_flag: AtomicBool::new(false),
entropy_source: args.entropy_source,
node_signer: args.node_signer,
channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
- for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay {
+ for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay {
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
- downstream_closed, downstream_funding);
+ downstream_closed, downstream_node_id, downstream_funding);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
// All nodes start with a persistable update pending as `create_network` connects each node
// with all other nodes to make most tests simpler.
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
&nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// ... but the last node should not.
- assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
// After persisting the first two nodes they should no longer need fresh persistence.
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
- assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
// The nodes which are a party to the channel should also ignore messages from unrelated
// parties.
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
// At this point the channel info given by peers should still be the same.
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
- assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
- assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
+ assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
// To start (1), send a regular payment but don't claim it.
let expected_route = [&nodes[1]];
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
// Next, attempt a keysend payment and make sure it fails.
let route_params = RouteParameters::from_payment_params_and_value(
use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason};
use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource};
use crate::ln::{ChannelId, PaymentPreimage, PaymentHash, PaymentSecret};
-use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
+use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA};
use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate};
use crate::routing::router::{self, PaymentParameters, Route, RouteParameters};
use crate::ln::features::InitFeatures;
let height = node.best_block_info().1 + 1;
confirm_transactions_at(node, txn, height);
}
+/// Mine a single block containing the given transaction without extra consistency checks which may
+/// impact ChannelManager state.
+pub fn mine_transaction_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction) {
+ let height = node.best_block_info().1 + 1;
+ let mut block = Block {
+ header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: height, bits: 42, nonce: 42 },
+ txdata: Vec::new(),
+ };
+ for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
+ block.txdata.push(Transaction { version: 0, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: Vec::new() });
+ }
+ block.txdata.push((*tx).clone());
+ do_connect_block_without_consistency_checks(node, block, false);
+}
/// Mine the given transaction at the given height, mining blocks as required to build to that
/// height
///
assert!(depth >= 1);
for i in 1..depth {
let prev_blockhash = block.header.block_hash();
- do_connect_block(node, block, skip_intermediaries);
+ do_connect_block_with_consistency_checks(node, block, skip_intermediaries);
block = create_dummy_block(prev_blockhash, height + i, Vec::new());
}
let hash = block.header.block_hash();
- do_connect_block(node, block, false);
+ do_connect_block_with_consistency_checks(node, block, false);
hash
}
pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
- do_connect_block(node, block.clone(), false);
+ do_connect_block_with_consistency_checks(node, block.clone(), false);
}
fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) {
}
}
-fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
+fn do_connect_block_with_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
call_claimable_balances(node);
+ do_connect_block_without_consistency_checks(node, block, skip_intermediaries);
+ call_claimable_balances(node);
+ node.node.test_process_background_events();
+}
+
+fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: Block, skip_intermediaries: bool) {
let height = node.best_block_info().1 + 1;
#[cfg(feature = "std")] {
eprintln!("Connecting block using Block Connection Style: {:?}", *node.connect_style.borrow());
}
}
}
- call_claimable_balances(node);
- node.node.test_process_background_events();
for tx in &block.txdata {
for input in &tx.input {
check_added_monitors!(node_a, 1);
// If this commitment signed dance was due to a claim, don't check for an RAA monitor update.
- let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| {
- let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate
- { channel_funding_outpoint, counterparty_node_id }) = action
- {
- if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id {
- assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id());
- true
- } else { false }
- } else { false };
- if matching_action {
- if let Event::PaymentSent { .. } = ev {} else { panic!(); }
- }
- matching_action
- });
+ let got_claim = node_a.node.test_raa_monitor_updates_held(node_b.node.get_our_node_id(), commitment_signed.channel_id);
if fail_backwards { assert!(!got_claim); }
commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim);
}
}
+pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
+ event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
+ upstream_force_closed: bool, downstream_force_closed: bool
+) {
+ match event {
+ Event::PaymentForwarded {
+ fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
+ outbound_amount_forwarded_msat: _
+ } => {
+ assert_eq!(fee_earned_msat, expected_fee);
+ if !upstream_force_closed {
+ // Is the event prev_channel_id in one of the channels between the two nodes?
+ assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+ }
+ // We check for force closures since a force closed channel is removed from the
+ // node's channel list
+ if !downstream_force_closed {
+ assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
+ }
+ assert_eq!(claim_from_onchain_tx, downstream_force_closed);
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
macro_rules! expect_payment_forwarded {
($node: expr, $prev_node: expr, $next_node: expr, $expected_fee: expr, $upstream_force_closed: expr, $downstream_force_closed: expr) => {
- let events = $node.node.get_and_clear_pending_events();
+ let mut events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentForwarded {
- fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
- outbound_amount_forwarded_msat: _
- } => {
- assert_eq!(fee_earned_msat, $expected_fee);
- if fee_earned_msat.is_some() {
- // Is the event prev_channel_id in one of the channels between the two nodes?
- assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $prev_node.node.get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
- }
- // We check for force closures since a force closed channel is removed from the
- // node's channel list
- if !$downstream_force_closed {
- assert!($node.node.list_channels().iter().any(|x| x.counterparty.node_id == $next_node.node.get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
- }
- assert_eq!(claim_from_onchain_tx, $downstream_force_closed);
- },
- _ => panic!("Unexpected event"),
- }
+ $crate::ln::functional_test_utils::expect_payment_forwarded(
+ events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee,
+ $upstream_force_closed, $downstream_force_closed);
}
}
}
};
if $idx == 1 { fee += expected_extra_fees[i]; }
- expect_payment_forwarded!($node, $next_node, $prev_node, Some(fee as u64), false, false);
+ expect_payment_forwarded!(*$node, $next_node, $prev_node, Some(fee as u64), false, false);
expected_total_fee_msat += fee as u64;
check_added_monitors!($node, 1);
let new_next_msgs = if $new_msgs {
pub const TEST_FINAL_CLTV: u32 = 70;
-pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) {
let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV)
.with_bolt11_features(expected_route.last().unwrap().node.invoice_features()).unwrap();
let route_params = RouteParameters::from_payment_params_and_value(payment_params, recv_value);
}
let res = send_along_route(origin_node, route, expected_route, recv_value);
- (res.0, res.1, res.2)
+ (res.0, res.1, res.2, res.3)
}
pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) {
assert!(err.contains("Cannot send value that would put us over the max HTLC value in flight our peer will accept")));
}
-pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) {
+pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret, PaymentId) {
let res = route_payment(&origin, expected_route, recv_value);
claim_payment(&origin, expected_route, res.0);
res
pub node_a: &'a Node<'b, 'c, 'd>,
pub node_b: &'a Node<'b, 'c, 'd>,
pub send_channel_ready: (bool, bool),
- pub pending_htlc_adds: (i64, i64),
+ pub pending_responding_commitment_signed: (bool, bool),
+ /// Indicates that the pending responding commitment signed will be a dup for the recipient,
+ /// and no monitor update is expected
+ pub pending_responding_commitment_signed_dup_monitor: (bool, bool),
+ pub pending_htlc_adds: (usize, usize),
pub pending_htlc_claims: (usize, usize),
pub pending_htlc_fails: (usize, usize),
pub pending_cell_htlc_claims: (usize, usize),
node_a,
node_b,
send_channel_ready: (false, false),
+ pending_responding_commitment_signed: (false, false),
+ pending_responding_commitment_signed_dup_monitor: (false, false),
pending_htlc_adds: (0, 0),
pending_htlc_claims: (0, 0),
pending_htlc_fails: (0, 0),
pub fn reconnect_nodes<'a, 'b, 'c, 'd>(args: ReconnectArgs<'a, 'b, 'c, 'd>) {
let ReconnectArgs {
node_a, node_b, send_channel_ready, pending_htlc_adds, pending_htlc_claims, pending_htlc_fails,
- pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa
+ pending_cell_htlc_claims, pending_cell_htlc_fails, pending_raa,
+ pending_responding_commitment_signed, pending_responding_commitment_signed_dup_monitor,
} = args;
node_a.node.peer_connected(&node_b.node.get_our_node_id(), &msgs::Init {
features: node_b.node.init_features(), networks: None, remote_network_address: None
} else {
assert!(chan_msgs.1.is_none());
}
- if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+ if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 ||
+ pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 ||
+ pending_responding_commitment_signed.0
+ {
let commitment_update = chan_msgs.2.unwrap();
- if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
- assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
- } else {
- assert!(commitment_update.update_add_htlcs.is_empty());
- }
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0);
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0 + pending_cell_htlc_fails.0);
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail);
}
- if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ if !pending_responding_commitment_signed.0 {
commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
} else {
node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed);
// No commitment_signed so get_event_msg's assert(len == 1) passes
node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack);
assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
- check_added_monitors!(node_b, 1);
+ check_added_monitors!(node_b, if pending_responding_commitment_signed_dup_monitor.0 { 0 } else { 1 });
}
} else {
assert!(chan_msgs.2.is_none());
} else {
assert!(chan_msgs.1.is_none());
}
- if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+ if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 ||
+ pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 ||
+ pending_responding_commitment_signed.1
+ {
let commitment_update = chan_msgs.2.unwrap();
- if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
- assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
- }
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1);
assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.1 + pending_cell_htlc_claims.1);
assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.1 + pending_cell_htlc_fails.1);
assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail);
}
- if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ if !pending_responding_commitment_signed.1 {
commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
} else {
node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed);
// No commitment_signed so get_event_msg's assert(len == 1) passes
node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack);
assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
- check_added_monitors!(node_a, 1);
+ check_added_monitors!(node_a, if pending_responding_commitment_signed_dup_monitor.1 { 0 } else { 1 });
}
} else {
assert!(chan_msgs.2.is_none());
use crate::chain::channelmonitor;
use crate::chain::channelmonitor::{CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use crate::chain::transaction::OutPoint;
-use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
+use crate::sign::{EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
create_announced_chan_between_nodes(&nodes, 3, 4);
create_announced_chan_between_nodes(&nodes, 3, 5);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
*nodes[0].network_payment_count.borrow_mut() -= 1;
assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
// balancing
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000);
let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000);
// Sending exactly enough to hit the reserve amount should be accepted
for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
- let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
}
// However one more HTLC should be significantly over the reserve amount and fail.
// Send four HTLCs to cover the initial push_msat buffer we're required to include
for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
- let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
}
let (mut route, payment_hash, _, payment_secret) =
// In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
// reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
// commitment transaction fee.
- let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
+ route_payment(&nodes[1], &[&nodes[0]], dust_amt);
// Send four HTLCs to cover the initial push_msat buffer we're required to include
for _ in 0..MIN_AFFORDABLE_HTLC_COUNT {
- let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
}
// One more than the dust amt should fail, however.
let payment_amt = 46000; // Dust amount
// In the previous code, these first four payments would succeed.
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
// Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
// And this last payment previously resulted in nodes[1] closing on its inbound-channel
// counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
// transaction fee and therefore perceived this next payment as a channel reserve violation.
- let (_, _, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ route_payment(&nodes[0], &[&nodes[1]], payment_amt);
}
#[test]
let b_chan_values = get_channel_value_stat!(nodes[1], nodes[0], chan_1.2);
// Route the first two HTLCs.
let payment_value_1 = b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000;
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_value_1);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 20_000);
// Start routing the third HTLC (this is just used to get everyone in the right state).
let (route, payment_hash_3, payment_preimage_3, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
// One pending HTLC is discarded by the force-close:
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2], &nodes[3]], 3_000_000);
// Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not
// broadcasted until we reach the timelock time).
let chan_3_mon = nodes[3].chain_monitor.chain_monitor.remove_monitor(&OutPoint { txid: chan_3.3.txid(), index: 0 });
// One pending HTLC to time out:
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[3], &[&nodes[4]], 3_000_000);
// CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
// buffer space).
send_payment(&nodes[0], &[&nodes[1]], 8_000_000);
// node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
- let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
// Get the will-be-revoked local txn from node[0]
let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
// node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
// time as two different claim transactions as we're gonna to timeout htlc with given a high current height
let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0;
- let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let (_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
// Get the will-be-revoked local txn from node[0]
let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
- let (our_payment_preimage, payment_hash_1, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
- let (our_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+ let (our_payment_preimage, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+ let (our_payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
// Broadcast legit commitment tx from C on B's chain
// Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
- let (_payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let (_payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
// Broadcast legit commitment tx from C on B's chain
let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+ let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
// Get the will-be-revoked local txn from nodes[2]
let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
// Revoke the old state
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
- let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+ let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
mine_transaction(&nodes[1], &revoked_local_txn[0]);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
+ let (payment_preimage, _payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
// Get the will-be-revoked local txn from nodes[2]
let revoked_local_txn = get_local_commitment_txn!(nodes[2], chan_2.2);
assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
.unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
} else { 3000000 };
- let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
- let (_, second_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
- let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, first_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, second_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, third_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
nodes[2].node.fail_htlc_backwards(&first_payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
nodes[1].node.claim_funds(payment_preimage);
expect_payment_claimed!(nodes[1], payment_hash, 1_000_000);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1]));
- let (payment_preimage_3, payment_hash_3, _) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
+ let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000);
let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
} else if messages_delivered == 3 {
// nodes[0] still wants its RAA + commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_args.pending_responding_commitment_signed.0 = true;
reconnect_args.pending_raa.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[0] still wants its commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.0 = -1;
+ reconnect_args.pending_responding_commitment_signed.0 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 5 {
// nodes[1] still wants its final RAA
} else if messages_delivered == 2 {
// nodes[0] still wants its RAA + commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_args.pending_responding_commitment_signed.1 = true;
reconnect_args.pending_raa.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 3 {
// nodes[0] still wants its commitment_signed
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
- reconnect_args.pending_htlc_adds.1 = -1;
+ reconnect_args.pending_responding_commitment_signed.1 = true;
reconnect_nodes(reconnect_args);
} else if messages_delivered == 4 {
// nodes[1] still wants its final RAA
nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
}
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_leading() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let channel_manager get ahead of chain_monitor by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on channel_manager first and then on chain_monitor.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1,
+ // but it's best_block is block_1, since that was populated by channel_manager, and channel_manager
+ // was running ahead of chain_monitor at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
+#[test]
+fn test_channel_monitor_skipping_block_when_channel_manager_is_lagging() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Let chain_monitor get ahead of channel_manager by 1 block.
+ // This is to emulate race-condition where newly added channel_monitor skips processing 1 block,
+ // in case where client calls block_connect on chain_monitor first and then on channel_manager.
+ let height_1 = nodes[0].best_block_info().1 + 1;
+ let mut block_1 = create_dummy_block(nodes[0].best_block_hash(), height_1, Vec::new());
+
+ nodes[0].blocks.lock().unwrap().push((block_1.clone(), height_1));
+ nodes[0].chain_monitor.chain_monitor.block_connected(&block_1, height_1);
+
+ // Create channel, and it gets added to chain_monitor in funding_created.
+ let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
+
+ // channel_manager can't really skip block_1, it should get it eventually.
+ nodes[0].node.block_connected(&block_1, height_1);
+
+ // Now, newly added channel_monitor in chain_monitor hasn't processed block_1, it's best_block is
+ // the block before block_1, since that was populated by channel_manager, and channel_manager was
+ // running behind at the time of funding_created.
+ // Later on, subsequent blocks are connected to both channel_manager and chain_monitor.
+ // Hence, this channel's channel_monitor skipped block_1, directly tries to process subsequent blocks.
+ confirm_transaction_at(&nodes[0], &funding_tx, nodes[0].best_block_info().1 + 1);
+ connect_blocks(&nodes[0], CHAN_CONFIRM_DEPTH);
+
+ // Ensure nodes[0] generates a channel_ready after the transactions_confirmed
+ let as_channel_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
+}
+
#[test]
fn test_drop_messages_peer_disconnect_dual_htlc() {
// Test that we can handle reconnecting when both sides of a channel have pending
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
// Now try to send a second payment which will fail to send
let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
// Create some initial channels
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
assert_eq!(commitment_tx[0].input.len(), 1);
// Rebalance the network a bit by relaying one payment through all the channels ...
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- let (_, our_payment_hash, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3_000_000);
let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
assert_eq!(commitment_tx[0].input.len(), 1);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
- let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
let commitment_tx = get_local_commitment_txn!(nodes[2], chan_2.2);
check_spends!(commitment_tx[0], chan_2.3);
nodes[2].node.claim_funds(payment_preimage);
connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
connect_blocks(&nodes[3], node_max_height - nodes[3].best_block_info().1);
- let (our_payment_preimage, duplicate_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
+ let (our_payment_preimage, duplicate_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000);
let payment_secret = nodes[3].node.create_inbound_payment_for_hash(duplicate_payment_hash, None, 7200, None).unwrap();
// We reduce the final CLTV here by a somewhat arbitrary constant to keep it under the one-byte
// Create some initial channels
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
assert_eq!(local_txn.len(), 1);
assert_eq!(local_txn[0].input.len(), 1);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
// 0th HTLC:
- let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
- let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
// 2nd HTLC:
send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_1, nodes[5].node.create_inbound_payment_for_hash(payment_hash_1, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
// 3rd HTLC:
send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_2, nodes[5].node.create_inbound_payment_for_hash(payment_hash_2, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
// 4th HTLC:
- let (_, payment_hash_3, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ let (_, payment_hash_3, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
// 5th HTLC:
- let (_, payment_hash_4, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ let (_, payment_hash_4, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
// 6th HTLC:
send_along_route_with_secret(&nodes[1], route.clone(), &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_3, nodes[5].node.create_inbound_payment_for_hash(payment_hash_3, None, 7200, None).unwrap());
send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_4, nodes[5].node.create_inbound_payment_for_hash(payment_hash_4, None, 7200, None).unwrap());
// 8th HTLC:
- let (_, payment_hash_5, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ let (_, payment_hash_5, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
// 9th HTLC:
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], ds_dust_limit*1000);
send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], ds_dust_limit*1000, payment_hash_5, nodes[5].node.create_inbound_payment_for_hash(payment_hash_5, None, 7200, None).unwrap()); // not added < dust limit + HTLC tx fee
// 10th HTLC:
- let (_, payment_hash_6, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ let (_, payment_hash_6, ..) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 11th HTLC:
let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[5], 1000000);
send_along_route_with_secret(&nodes[1], route, &[&[&nodes[2], &nodes[3], &nodes[5]]], 1000000, payment_hash_6, nodes[5].node.create_inbound_payment_for_hash(payment_hash_6, None, 7200, None).unwrap());
// Create some initial channels
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
assert_eq!(local_txn[0].input.len(), 1);
check_spends!(local_txn[0], chan_1.3);
connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1);
connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1);
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000);
let local_txn_0 = get_local_commitment_txn!(nodes[0], chan_0.2);
let local_txn_1 = get_local_commitment_txn!(nodes[0], chan_1.2);
assert_eq!(local_txn_1[0].input.len(), 1);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3_000_000 });
// Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
// present in B's local commitment transaction, but none of A's commitment transactions.
// Also optionally test that we *don't* fail the channel in case the commitment transaction was
// actually revoked.
let htlc_value = if use_dust { 50000 } else { 3000000 };
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+ let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
nodes[1].node.claim_funds(our_payment_preimage);
check_added_monitors!(nodes[1], 1);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+ let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
nodes[1].node.claim_funds(our_payment_preimage);
check_added_monitors!(nodes[1], 1);
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
- let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
- let (_, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (_, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
route_payment(&nodes[0], &[&nodes[1]], 1000000);
// Cache one local commitment tx as previous
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
- let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
- let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (_payment_preimage_1, dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (_payment_preimage_2, non_dust_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
let as_commitment_tx = get_local_commitment_txn!(nodes[0], chan.2);
let bs_commitment_tx = get_local_commitment_txn!(nodes[1], chan.2);
// We revoked bs_commitment_tx
if revoked {
- let (payment_preimage_3, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (payment_preimage_3, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
}
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
// Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
// Lock HTLC in both directions
- let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
- let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
+ let (payment_preimage_1, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+ let (_, payment_hash_2, ..) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000);
// Route a HTLC from node 0 to node 1 (but don't settle)
- let (preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
+ let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000);
// Copy ChainMonitor to simulate a watchtower and update block height of node 0 until its ChannelMonitor timeout HTLC onchain
let chain_source = test_utils::TestChainSource::new(Network::Testnet);
let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
assert_eq!(local_txn[0].input.len(), 1);
assert_eq!(local_txn[0].output.len(), 3);
// Steps (1) and (2):
// Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
- let (payment_preimage, payment_hash, _payment_secret) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000);
// Check that Alice's commitment transaction now contains an output for this HTLC.
let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
- expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false, false);
+ let went_onchain = go_onchain_before_fulfill || force_closing_node == 1;
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], if went_onchain { None } else { Some(1000) }, went_onchain, false);
// If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
if !go_onchain_before_fulfill && broadcast_alice {
let events = nodes[1].node.get_and_clear_pending_msg_events();
create_announced_chan_between_nodes(&nodes, 0, 1);
let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+ let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
send_payment(&nodes[0], &[&nodes[2]], 1_000_000);
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
+ let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000);
nodes[1].node.claim_funds(our_payment_preimage);
check_added_monitors!(nodes[1], 1);
assert_eq!(funding_outpoint.to_channel_id(), chan_id);
// This HTLC is immediately claimed, giving node B the preimage
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000);
// This HTLC is allowed to time out, letting A claim it. However, in order to test claimable
// balances more fully we also give B the preimage for this HTLC.
- let (timeout_payment_preimage, timeout_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000);
+ let (timeout_payment_preimage, timeout_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 4_000_000);
// This HTLC will be dust, and not be claimable at all:
- let (dust_payment_preimage, dust_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 3_000);
+ let (dust_payment_preimage, dust_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000);
let htlc_cltv_timeout = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; // Note ChannelManager adds one to CLTV timeouts for safety
&nodes, 0, 1, 1_000_000, 500_000_000
).2;
route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]);
check_added_monitors!(nodes[1], 1);
commitment_signed_dance!(nodes[1], nodes[2], htlc_fulfill_updates.commitment_signed, false);
- expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, false, false);
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false);
if confirm_before_reload {
let best_block = nodes[0].blocks.lock().unwrap().last().unwrap().clone();
// Route a payment, but force-close the channel before the HTLC fulfill message arrives at
// nodes[0].
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 10_000_000);
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
// The simplest way to get a failure after a fulfill is to reload nodes[1] from a state
// pre-fulfill, which we do by serializing it here.
let (_, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2);
// Send and claim the payment. Inflight HTLCs should be empty.
- let payment_hash = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000).1;
+ let (_, payment_hash, _, payment_id) = send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
{
let mut node_0_per_peer_lock;
}
let pending_payments = nodes[0].node.list_recent_payments();
assert_eq!(pending_payments.len(), 1);
- assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash) });
+ assert_eq!(pending_payments[0], RecentPaymentDetails::Fulfilled { payment_hash: Some(payment_hash), payment_id });
// Remove fulfilled payment
for _ in 0..=IDEMPOTENCY_TIMEOUT_TICKS {
}
// Send the payment, but do not claim it. Our inflight HTLCs should contain the pending payment.
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
+ let (payment_preimage, payment_hash, _, payment_id) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 500000);
let inflight_htlcs = node_chanmgrs[0].compute_inflight_htlcs();
{
let mut node_0_per_peer_lock;
}
let pending_payments = nodes[0].node.list_recent_payments();
assert_eq!(pending_payments.len(), 1);
- assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_hash, total_msat: 500000 });
+ assert_eq!(pending_payments[0], RecentPaymentDetails::Pending { payment_id, payment_hash, total_msat: 500000 });
// Now, let's claim the payment. This should result in the used liquidity to return `None`.
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
nodes_0_serialized = nodes[0].node.encode();
}
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
if persist_manager_with_payment {
nodes_0_serialized = nodes[0].node.encode();
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
- let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
- let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (our_payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (_, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
node_0_stale_monitors_serialized.push(writer.0);
}
- let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
+ let (our_payment_preimage, ..) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
// Serialize the ChannelManager here, but the monitor we keep up-to-date
let nodes_0_serialized = nodes[0].node.encode();
let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2;
- let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+ let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
let node_encoded = nodes[1].node.encode();
connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1);
connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1);
- let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+ let (our_payment_preimage, our_payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
// Provide preimage to node 2 by claiming payment
nodes[2].node.claim_funds(our_payment_preimage);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000);
- let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
- let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000);
// Remote commitment txn with 4 outputs: to_local, to_remote, 2 outgoing HTLC
let remote_txn = get_local_commitment_txn!(nodes[1], chan.2);
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
let _chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+ let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
+ let (payment_preimage, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
let keys_manager = test_utils::TestKeysInterface::new(&[0u8; 32], Network::Testnet);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
- let (payment_preimage_0, payment_hash_0, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+ let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
nodes[0].node.close_channel(&chan_1.2, &nodes[1].node.get_our_node_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
nodes[1].node.close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
for stored_key in kv_store.list(
CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
{
+ if stored_key.len() < 66 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Stored key has invalid length"));
+ }
+
let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
})?;
use crate::io;
use crate::prelude::*;
use core::cell::RefCell;
-use core::ops::Deref;
use core::time::Duration;
use crate::sync::{Mutex, Arc};
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
}
impl WatchtowerPersister {
+ #[cfg(test)]
pub(crate) fn new(destination_script: Script) -> Self {
WatchtowerPersister {
persister: TestPersister::new(),
}
}
+ #[cfg(test)]
pub(crate) fn justice_tx(&self, funding_txo: OutPoint, commitment_txid: &Txid)
-> Option<Transaction> {
self.watchtower_state.lock().unwrap().get(&funding_txo).unwrap().get(commitment_txid).cloned()