X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=c83b19e7e571e95569c7e11d1acaaa09a03d65cd;hb=f70c113fd813cff4d31481a9200e5f7121224a81;hp=5d8cc59978eb1d8f01379b47be6be41900aa8cd1;hpb=0dbf17b78116f175742ad10d2f50cc3dc0277864;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 5d8cc599..c83b19e7 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() { //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, @@ -871,8 +871,8 @@ fn test_update_fee_with_fundee_update_add_htlc() { send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -985,8 +985,8 @@ fn test_update_fee() { assert_eq!(get_feerate!(nodes[0], nodes[1], channel_id), feerate + 30); assert_eq!(get_feerate!(nodes[1], nodes[0], channel_id), feerate + 30); close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -1104,17 +1104,17 @@ fn fake_network_test() { // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true); - check_closed_event!(nodes[2], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[2], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[2].node.get_our_node_id()], 100000); close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[3].node.get_our_node_id()], 100000); + check_closed_event!(nodes[3], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -1617,7 +1617,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); @@ -1796,7 +1796,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); @@ -2889,8 +2889,10 @@ fn test_htlc_on_chain_success() { } let chan_id = Some(chan_1.2); match forwarded_events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -2899,8 +2901,10 @@ fn test_htlc_on_chain_success() { _ => panic!() } match forwarded_events[2] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -3328,22 +3332,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() }); - match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, - _ => panic!("Unexepected event"), - } - match events[1] { - Event::PaymentPathFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } - match events[2] { - Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } + assert!(events.iter().any(|ev| matches!( + ev, + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -4916,8 +4916,10 @@ fn test_onchain_to_onchain_claim() { _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentForwarded { fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id, outbound_amount_forwarded_msat } => { - assert_eq!(fee_earned_msat, Some(1000)); + Event::PaymentForwarded { total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, + next_channel_id, outbound_amount_forwarded_msat, .. + } => { + assert_eq!(total_fee_earned_msat, Some(1000)); assert_eq!(prev_channel_id, Some(chan_1.2)); assert_eq!(claim_from_onchain_tx, true); assert_eq!(next_channel_id, Some(chan_2.2)); @@ -5537,8 +5539,9 @@ fn test_key_derivation_params() { let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &chanmon_cfgs[0].logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); - let router = test_utils::TestRouter::new(network_graph.clone(), &scorer); - let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; + let router = test_utils::TestRouter::new(network_graph.clone(), &chanmon_cfgs[0].logger, &scorer); + let message_router = test_utils::TestMessageRouter::new(network_graph.clone()); + let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, router, message_router, chain_monitor, keys_manager: &keys_manager, network_graph, node_seed: seed, override_init_features: alloc::rc::Rc::new(core::cell::RefCell::new(None)) }; let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs); node_cfgs.remove(0); node_cfgs.insert(0, node); @@ -5622,7 +5625,7 @@ fn test_static_output_closing_tx() { let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2; mine_transaction(&nodes[0], &closing_tx); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[0], node_cfgs[0].keys_manager); @@ -5630,7 +5633,7 @@ fn test_static_output_closing_tx() { check_spends!(spend_txn[0], closing_tx); mine_transaction(&nodes[1], &closing_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); let spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); @@ -6299,7 +6302,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, @@ -8216,8 +8219,10 @@ fn test_onion_value_mpp_set_calculation() { RecipientOnionFields::secret_only(our_payment_secret), height + 1, &None).unwrap(); // Edit amt_to_forward to simulate the sender having set // the final amount and the routing node taking less fee - if let msgs::OutboundOnionPayload::Receive { ref mut amt_msat, .. } = onion_payloads[1] { - *amt_msat = 99_000; + if let msgs::OutboundOnionPayload::Receive { + ref mut sender_intended_htlc_amt_msat, .. + } = onion_payloads[1] { + *sender_intended_htlc_amt_msat = 99_000; } else { panic!() } let new_onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &our_payment_hash).unwrap(); payment_event.msgs[0].onion_routing_packet = new_onion_packet; @@ -8685,7 +8690,7 @@ fn test_pre_lockin_no_chan_closed_update() { check_added_monitors!(nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, @@ -8981,6 +8986,149 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { } } +#[test] +fn test_peer_funding_sidechannel() { + // Test that if a peer somehow learns which txid we'll use for our channel funding before we + // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously + // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning + // the txid and panicked if the peer tried to open a redundant channel to us with the same + // funding outpoint. + // + // While this assumption is generally safe, some users may have out-of-band protocols where + // they notify their LSP about a funding outpoint first, or this may be violated in the future + // with collaborative transaction construction protocols, i.e. dual-funding. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); + let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0); + + let (_, tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + + let cs_funding_events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(cs_funding_events.len(), 1); + match cs_funding_events[0] { + Event::FundingGenerationReady { .. } => {} + _ => panic!("Unexpected event {:?}", cs_funding_events), + } + + nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap(); + let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + + let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone()); + let err_msg = format!("{:?}", res.unwrap_err()); + assert!(err_msg.contains("An existing channel using outpoint ")); + assert!(err_msg.contains(" is open with peer")); + // Even though the last funding_transaction_generated errored, it still generated a + // SendFundingCreated. However, when the peer responds with a funding_signed it will send the + // appropriate error message. + let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created); + check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), }; + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]); + + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); +} + +#[test] +fn test_duplicate_conflicting_funding_from_second_peer() { + // Test that if a user tries to fund a channel with a funding outpoint they'd previously used + // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we + // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks + // implies the user (and our counterparty) has reused cryptographic keys across channels, which + // we require the user not do. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); + + let (_, tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + + // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into + // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails. + let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3; + let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); + nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap(); + + nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + + let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg); + // At this point, the channel should be closed, after having generated one monitor write (the + // watch_channel call which failed), but zero monitor updates. + check_added_monitors!(nodes[0], 1); + get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); + let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() }; + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]); +} + +#[test] +fn test_duplicate_funding_err_in_funding() { + // Test that if we have a live channel with one peer, then another peer comes along and tries + // to create a second channel with the same txid we'll fail and not overwrite the + // outpoint_to_peer map in `ChannelManager`. + // + // This was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); + let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }; + assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); + + nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); + let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + let node_c_temp_chan_id = open_chan_msg.temporary_channel_id; + open_chan_msg.temporary_channel_id = real_channel_id; + nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg); + let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id()); + accept_chan_msg.temporary_channel_id = node_c_temp_chan_id; + nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg); + + // Now that we have a second channel with the same funding txo, send a bogus funding message + // and let nodes[1] remove the inbound channel. + let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42); + + nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap(); + + let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + funding_created_msg.temporary_channel_id = real_channel_id; + // Make the signature invalid by changing the funding output + funding_created_msg.funding_output_index += 10; + nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg); + get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + let err = "Invalid funding_created signature from peer".to_owned(); + let reason = ClosureReason::ProcessingError { err }; + let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason); + check_closed_events(&nodes[1], &[expected_closing]); + + assert_eq!( + *nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(), + nodes[0].node.get_our_node_id() + ); +} + #[test] fn test_duplicate_chan_id() { // Test that if a given peer tries to open a channel with the same channel_id as one that is @@ -9039,7 +9187,7 @@ fn test_duplicate_chan_id() { let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; - let channel_id = funding_outpoint.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a // temporary one). @@ -9083,14 +9231,20 @@ fn test_duplicate_chan_id() { chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, _ => panic!("Unexpected ChannelPhase variant"), - } + }.unwrap() }; check_added_monitors!(nodes[0], 0); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. check_added_monitors!(nodes[1], 0); + check_closed_events(&nodes[1], &[ + ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError { + err: "Already had channel with the new channel_id".to_owned() + }) + ]); + // ...still, nodes[1] will reject the duplicate channel. { let events = nodes[1].node.get_and_clear_pending_msg_events(); @@ -10487,7 +10641,7 @@ fn test_batch_channel_open() { // Complete the persistence of the monitor. nodes[0].chain_monitor.complete_sole_pending_chan_update( - &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id() + &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 }) ); let events = nodes[0].node.get_and_clear_pending_events(); @@ -10542,17 +10696,23 @@ fn test_disconnect_in_funding_batch() { nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); // The channels in the batch will close immediately. - let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(); - let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id(); + let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 }; + let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; + let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); check_closed_events(&nodes[0], &[ ExpectedCloseEvent { channel_id: Some(channel_id_1), discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), ..Default::default() }, ExpectedCloseEvent { channel_id: Some(channel_id_2), discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), ..Default::default() }, ]); @@ -10610,8 +10770,10 @@ fn test_batch_funding_close_after_funding_signed() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); // Force-close the channel for which we've completed the initial monitor. - let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(); - let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id(); + let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 }; + let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; + let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap(); check_added_monitors(&nodes[0], 2); { @@ -10643,11 +10805,15 @@ fn test_batch_funding_close_after_funding_signed() { ExpectedCloseEvent { channel_id: Some(channel_id_1), discard_funding: true, + channel_funding_txo: Some(funding_txo_1), + user_channel_id: Some(42), ..Default::default() }, ExpectedCloseEvent { channel_id: Some(channel_id_2), discard_funding: true, + channel_funding_txo: Some(funding_txo_2), + user_channel_id: Some(43), ..Default::default() }, ]); @@ -10667,7 +10833,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); - let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id(); + let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1);