+
+#[test]
+fn test_pre_lockin_no_chan_closed_update() {
+ // Test that if a peer closes a channel in response to a funding_created message we don't
+ // generate a channel update (as the channel cannot appear on chain without a funding_signed
+ // message).
+ //
+ // Doing so would imply a channel monitor update before the initial channel monitor
+ // registration, violating our API guarantees.
+ //
+ // Previously, full_stack_target managed to hit this case by opening then closing a channel,
+ // then opening a second channel with the same funding output as the first (which is not
+ // rejected because the first channel does not exist in the ChannelManager) and closing it
+ // before receiving funding_signed.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Create an initial channel
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+ let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
+ let accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
+
+ // Move the first channel through the funding flow...
+ let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ let channel_id = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() });
+ assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty());
+}
+
+#[test]
+fn test_htlc_no_detection() {
+ // This test is a mutation to underscore the detection logic bug we had
+ // before #653. HTLC value routed is above the remaining balance, thus
+ // inverting HTLC and `to_remote` output. HTLC will come second and
+ // it wouldn't be seen by pre-#653 detection as we were enumerate()'ing
+ // on a watched outputs vector (Vec<TxOut>) thus implicitly relying on
+ // outputs order detection for correct spending children filtring.
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 1_000_000);
+ let (_, our_payment_hash, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 2_000_000);
+ let local_txn = get_local_commitment_txn!(nodes[0], chan_1.2);
+ assert_eq!(local_txn[0].input.len(), 1);
+ assert_eq!(local_txn[0].output.len(), 3);
+ check_spends!(local_txn[0], chan_1.3);
+
+ // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ connect_block(&nodes[0], &Block { header, txdata: vec![local_txn[0].clone()] });
+ // We deliberately connect the local tx twice as this should provoke a failure calling
+ // this test before #653 fix.
+ chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1);
+
+ let htlc_timeout = {
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[1], local_txn[0]);
+ node_txn[1].clone()
+ };
+
+ let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ connect_block(&nodes[0], &Block { header: header_201, txdata: vec![htlc_timeout.clone()] });
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ expect_payment_failed!(nodes[0], our_payment_hash, true);
+}
+
+fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain_before_fulfill: bool) {
+ // If we route an HTLC, then learn the HTLC's preimage after the upstream channel has been
+ // force-closed, we must claim that HTLC on-chain. (Given an HTLC forwarded from Alice --> Bob -->
+ // Carol, Alice would be the upstream node, and Carol the downstream.)
+ //
+ // Steps of the test:
+ // 1) Alice sends a HTLC to Carol through Bob.
+ // 2) Carol doesn't settle the HTLC.
+ // 3) If broadcast_alice is true, Alice force-closes her channel with Bob. Else Bob force closes.
+ // Steps 4 and 5 may be reordered depending on go_onchain_before_fulfill.
+ // 4) Bob sees the Alice's commitment on his chain or vice versa. An offered output is present
+ // but can't be claimed as Bob doesn't have yet knowledge of the preimage.
+ // 5) Carol release the preimage to Bob off-chain.
+ // 6) Bob claims the offered output on the broadcasted commitment.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let chan_ab = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ // Steps (1) and (2):
+ // Send an HTLC Alice --> Bob --> Carol, but Carol doesn't settle the HTLC back.
+ let (payment_preimage, _payment_hash, _payment_secret) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3_000_000);
+
+ // Check that Alice's commitment transaction now contains an output for this HTLC.
+ let alice_txn = get_local_commitment_txn!(nodes[0], chan_ab.2);
+ check_spends!(alice_txn[0], chan_ab.3);
+ assert_eq!(alice_txn[0].output.len(), 2);
+ check_spends!(alice_txn[1], alice_txn[0]); // 2nd transaction is a non-final HTLC-timeout
+ assert_eq!(alice_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(alice_txn.len(), 2);
+
+ // Steps (3) and (4):
+ // If `go_onchain_before_fufill`, broadcast the relevant commitment transaction and check that Bob
+ // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
+ let mut force_closing_node = 0; // Alice force-closes
+ if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
+ nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
+ check_closed_broadcast!(nodes[force_closing_node], true);
+ check_added_monitors!(nodes[force_closing_node], 1);
+ if go_onchain_before_fulfill {
+ let txn_to_broadcast = match broadcast_alice {
+ true => alice_txn.clone(),
+ false => get_local_commitment_txn!(nodes[1], chan_ab.2)
+ };
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ if broadcast_alice {
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ }
+ assert_eq!(bob_txn.len(), 1);
+ check_spends!(bob_txn[0], chan_ab.3);
+ }
+
+ // Step (5):
+ // Carol then claims the funds and sends an update_fulfill message to Bob, and they go through the
+ // process of removing the HTLC from their commitment transactions.
+ assert!(nodes[2].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let carol_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(carol_updates.update_add_htlcs.is_empty());
+ assert!(carol_updates.update_fail_htlcs.is_empty());
+ assert!(carol_updates.update_fail_malformed_htlcs.is_empty());
+ assert!(carol_updates.update_fee.is_none());
+ assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1);
+
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]);
+ // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage.
+ if !go_onchain_before_fulfill && broadcast_alice {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &carol_updates.commitment_signed);
+ // One monitor update for the preimage to update the Bob<->Alice channel, one monitor update
+ // Carol<->Bob's updated commitment transaction info.
+ check_added_monitors!(nodes[1], 2);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ let bob_revocation = match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[2].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+ let bob_updates = match events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[2].node.get_our_node_id());
+ (*updates).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revocation);
+ check_added_monitors!(nodes[2], 1);
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_updates.commitment_signed);
+ check_added_monitors!(nodes[2], 1);
+
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let carol_revocation = match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &carol_revocation);
+ check_added_monitors!(nodes[1], 1);
+
+ // If this test requires the force-closed channel to not be on-chain until after the fulfill,
+ // here's where we put said channel's commitment tx on-chain.
+ let mut txn_to_broadcast = alice_txn.clone();
+ if !broadcast_alice { txn_to_broadcast = get_local_commitment_txn!(nodes[1], chan_ab.2); }
+ if !go_onchain_before_fulfill {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
+ // If Bob was the one to force-close, he will have already passed these checks earlier.
+ if broadcast_alice {
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ }
+ let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ if broadcast_alice {
+ // In `connect_block()`, the ChainMonitor and ChannelManager are separately notified about a
+ // new block being connected. The ChannelManager being notified triggers a monitor update,
+ // which triggers broadcasting our commitment tx and an HTLC-claiming tx. The ChainMonitor
+ // being notified triggers the HTLC-claiming tx redundantly, resulting in 3 total txs being
+ // broadcasted.
+ assert_eq!(bob_txn.len(), 3);
+ check_spends!(bob_txn[1], chan_ab.3);
+ } else {
+ assert_eq!(bob_txn.len(), 2);
+ check_spends!(bob_txn[0], chan_ab.3);
+ }
+ }
+
+ // Step (6):
+ // Finally, check that Bob broadcasted a preimage-claiming transaction for the HTLC output on the
+ // broadcasted commitment transaction.
+ {
+ let bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ if go_onchain_before_fulfill {
+ // Bob should now have an extra broadcasted tx, for the preimage-claiming transaction.
+ assert_eq!(bob_txn.len(), 2);
+ }
+ let script_weight = match broadcast_alice {
+ true => OFFERED_HTLC_SCRIPT_WEIGHT,
+ false => ACCEPTED_HTLC_SCRIPT_WEIGHT
+ };
+ // If Alice force-closed and Bob didn't receive her commitment transaction until after he
+ // received Carol's fulfill, he broadcasts the HTLC-output-claiming transaction first. Else if
+ // Bob force closed or if he found out about Alice's commitment tx before receiving Carol's
+ // fulfill, then he broadcasts the HTLC-output-claiming transaction second.
+ if broadcast_alice && !go_onchain_before_fulfill {
+ check_spends!(bob_txn[0], txn_to_broadcast[0]);
+ assert_eq!(bob_txn[0].input[0].witness.last().unwrap().len(), script_weight);
+ } else {
+ check_spends!(bob_txn[1], txn_to_broadcast[0]);
+ assert_eq!(bob_txn[1].input[0].witness.last().unwrap().len(), script_weight);
+ }
+ }
+}
+
+#[test]
+fn test_onchain_htlc_settlement_after_close() {
+ do_test_onchain_htlc_settlement_after_close(true, true);
+ do_test_onchain_htlc_settlement_after_close(false, true); // Technically redundant, but may as well
+ do_test_onchain_htlc_settlement_after_close(true, false);
+ do_test_onchain_htlc_settlement_after_close(false, false);
+}
+
+#[test]
+fn test_duplicate_chan_id() {
+ // Test that if a given peer tries to open a channel with the same channel_id as one that is
+ // already open we reject it and keep the old channel.
+ //
+ // Previously, full_stack_target managed to figure out that if you tried to open two channels
+ // with the same funding output (ie post-funding channel_id), we'd create a monitor update for
+ // the existing channel when we detect the duplicate new channel, screwing up our monitor
+ // updating logic for the existing channel.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Create an initial channel
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+ let mut open_chan_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ // Try to create a second channel with the same temporary_channel_id as the first and check
+ // that it is rejected.
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
+ {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ // Technically, at this point, nodes[1] would be justified in thinking both the
+ // first (valid) and second (invalid) channels are closed, given they both have
+ // the same non-temporary channel_id. However, currently we do not, so we just
+ // move forward with it.
+ assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ // Move the first channel through the funding flow...
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ {
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+ let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+
+ let funding_outpoint = ::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index };
+ let channel_id = funding_outpoint.to_channel_id();
+
+ // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a
+ // temporary one).
+
+ // First try to open a second channel with a temporary channel id equal to the txid-based one.
+ // Technically this is allowed by the spec, but we don't support it and there's little reason
+ // to. Still, it shouldn't cause any other issues.
+ open_chan_msg.temporary_channel_id = channel_id;
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_msg);
+ {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ // Technically, at this point, nodes[1] would be justified in thinking both
+ // channels are closed, but currently we do not, so we just move forward with it.
+ assert_eq!(msg.channel_id, open_chan_msg.temporary_channel_id);
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ // Now try to create a second channel which has a duplicate funding output.
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+ let open_chan_2_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_chan_2_msg);
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+ create_funding_transaction(&nodes[0], 100000, 42); // Get and check the FundingGenerationReady event
+
+ let funding_created = {
+ let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
+ let logger = test_utils::TestLogger::new();
+ as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+ };
+ check_added_monitors!(nodes[0], 0);
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
+ // At this point we'll try to add a duplicate channel monitor, which will be rejected, but
+ // still needs to be cleared here.
+ check_added_monitors!(nodes[1], 1);
+
+ // ...still, nodes[1] will reject the duplicate channel.
+ {
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ // Technically, at this point, nodes[1] would be justified in thinking both
+ // channels are closed, but currently we do not, so we just move forward with it.
+ assert_eq!(msg.channel_id, channel_id);
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ // finally, finish creating the original channel and send a payment over it to make sure
+ // everything is functional.
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg);
+ {
+ let mut added_monitors = nodes[0].chain_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 0);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
+
+ let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
+ send_payment(&nodes[0], &[&nodes[1]], 8000000);
+}
+
+#[test]
+fn test_error_chans_closed() {
+ // Test that we properly handle error messages, closing appropriate channels.
+ //
+ // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
+ // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
+ // we can test various edge cases around it to ensure we don't regress.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+ assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
+ assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
+
+ // Closing a channel from a different peer has no effect
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+
+ // Closing one channel doesn't impact others
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
+ check_added_monitors!(nodes[0], 1);
+ check_closed_broadcast!(nodes[0], false);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
+
+ // A null channel ID should close all channels
+ let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+ check_added_monitors!(nodes[0], 2);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ // Note that at this point users of a standard PeerHandler will end up calling
+ // peer_disconnected with no_connection_possible set to false, duplicating the
+ // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
+ // users with their own peer handling logic. We duplicate the call here, however.
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+}
+
+#[test]
+fn test_invalid_funding_tx() {
+ // Test that we properly handle invalid funding transactions sent to us from a peer.
+ //
+ // Previously, all other major lightning implementations had failed to properly sanitize
+ // funding transactions from their counterparties, leading to a multi-implementation critical
+ // security vulnerability (though we always sanitized properly, we've previously had
+ // un-released crashes in the sanitization process).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 10_000, 42, None).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
+
+ let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], 100_000, 42);
+ for output in tx.output.iter_mut() {
+ // Make the confirmed funding transaction have a bogus script_pubkey
+ output.script_pubkey = bitcoin::Script::new();
+ }
+
+ nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, tx.clone(), 0).unwrap();
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 0);
+
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
+ nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+
+ confirm_transaction_at(&nodes[1], &tx, 1);
+ check_added_monitors!(nodes[1], 1);
+ let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ if let MessageSendEvent::HandleError { node_id, action } = &events_2[0] {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ if let msgs::ErrorAction::SendErrorMessage { msg } = action {
+ assert_eq!(msg.data, "funding tx had wrong script/value or output index");
+ } else { panic!(); }
+ } else { panic!(); }
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+}
+
+fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
+ // In the first version of the chain::Confirm interface, after a refactor was made to not
+ // broadcast CSV-locked transactions until their CSV lock is up, we wouldn't reliably broadcast
+ // transactions after a `transactions_confirmed` call. Specifically, if the chain, provided via
+ // `best_block_updated` is at height N, and a transaction output which we wish to spend at
+ // height N-1 (due to a CSV to height N-1) is provided at height N, we will not broadcast the
+ // spending transaction until height N+1 (or greater). This was due to the way
+ // `ChannelMonitor::transactions_confirmed` worked, only checking if we should broadcast a
+ // spending transaction at the height the input transaction was confirmed at, not whether we
+ // should broadcast a spending transaction at the current height.
+ // A second, similar, issue involved failing HTLCs backwards - because we only provided the
+ // height at which transactions were confirmed to `OnchainTx::update_claims_view`, it wasn't
+ // aware that the anti-reorg-delay had, in fact, already expired, waiting to fail-backwards
+ // until we learned about an additional block.
+ //
+ // As an additional check, if `test_height_before_timelock` is set, we instead test that we
+ // aren't broadcasting transactions too early (ie not broadcasting them at all).
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks;
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let (_, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+ nodes[1].node.force_close_channel(&channel_id).unwrap();
+ check_closed_broadcast!(nodes[1], true);
+ check_added_monitors!(nodes[1], 1);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(node_txn.len(), 1);
+
+ let conf_height = nodes[1].best_block_info().1;
+ if !test_height_before_timelock {
+ connect_blocks(&nodes[1], 24 * 6);
+ }
+ nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
+ &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height);
+ if test_height_before_timelock {
+ // If we confirmed the close transaction, but timelocks have not yet expired, we should not
+ // generate any events or broadcast any transactions
+ assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty());
+ assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
+ } else {
+ // We should broadcast an HTLC transaction spending our funding transaction first
+ let spending_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(spending_txn.len(), 2);
+ assert_eq!(spending_txn[0], node_txn[0]);
+ check_spends!(spending_txn[1], node_txn[0]);
+ // We should also generate a SpendableOutputs event with the to_self output (as its
+ // timelock is up).
+ let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager);
+ assert_eq!(descriptor_spend_txn.len(), 1);
+
+ // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we
+ // should immediately fail-backwards the HTLC to the previous hop, without waiting for an
+ // additional block built on top of the current chain.
+ nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
+ &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, false);
+ expect_payment_failure_chan_update!(nodes[0], chan_announce.contents.short_channel_id, true);
+ }
+}
+
+#[test]
+fn test_tx_confirmed_skipping_blocks_immediate_broadcast() {
+ do_test_tx_confirmed_skipping_blocks_immediate_broadcast(false);
+ do_test_tx_confirmed_skipping_blocks_immediate_broadcast(true);
+}
+
+#[test]
+fn test_keysend_payments_to_public_node() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ let route = get_route(&payer_pubkey, &network_graph, &payee_pubkey, None,
+ None, &vec![], 10000, 40,
+ nodes[0].logger).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let event = events.pop().unwrap();
+ let path = vec![&nodes[1]];
+ pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
+ claim_payment(&nodes[0], &path, test_preimage);
+}
+
+#[test]
+fn test_keysend_payments_to_private_node() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap();
+ let first_hops = nodes[0].node.list_usable_channels();
+ let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey,
+ Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+ nodes[0].logger).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let event = events.pop().unwrap();
+ let path = vec![&nodes[1]];
+ pass_along_path(&nodes[0], &path, 10000, payment_hash, None, event, true, Some(test_preimage));
+ claim_payment(&nodes[0], &path, test_preimage);
+}