X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=77b791e0136d759f7d4784c2b9d83f100fa80115;hb=d873e7278946e12ed591b23f1f4014adc09f5770;hp=e4d7a33c8c16c610974c2390c09a235847d89e78;hpb=142b0d624e8ef71aea4aeb1d3591c6a5b59a771d;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e4d7a33c..77b791e0 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3400,7 +3400,7 @@ fn test_htlc_ignore_latest_remote_commitment() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); + nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); @@ -3461,7 +3461,7 @@ fn test_force_close_fail_back() { // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); + nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap(); check_closed_broadcast!(nodes[2], false); check_added_monitors!(nodes[2], 1); let tx = { @@ -4783,7 +4783,7 @@ fn test_claim_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2); + nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4810,7 +4810,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); - nodes[0].node.force_close_channel(&chan.2); + nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); @@ -8553,7 +8553,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor. let mut force_closing_node = 0; // Alice force-closes if !broadcast_alice { force_closing_node = 1; } // Bob force-closes - nodes[force_closing_node].node.force_close_channel(&chan_ab.2); + nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap(); check_closed_broadcast!(nodes[force_closing_node], false); check_added_monitors!(nodes[force_closing_node], 1); if go_onchain_before_fulfill { @@ -8836,3 +8836,66 @@ fn test_duplicate_chan_id() { update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000); } + +#[test] +fn test_error_chans_closed() { + // Test that we properly handle error messages, closing appropriate channels. + // + // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different + // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time + // we can test various edge cases around it to ensure we don't regress. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + + assert_eq!(nodes[0].node.list_usable_channels().len(), 3); + assert_eq!(nodes[1].node.list_usable_channels().len(), 2); + assert_eq!(nodes[2].node.list_usable_channels().len(), 1); + + // Closing a channel from a different peer has no effect + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); + assert_eq!(nodes[0].node.list_usable_channels().len(), 3); + + // Closing one channel doesn't impact others + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); + check_added_monitors!(nodes[0], 1); + check_closed_broadcast!(nodes[0], false); + assert_eq!(nodes[0].node.list_usable_channels().len(), 2); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2); + + // A null channel ID should close all channels + let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); + check_added_monitors!(nodes[0], 2); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + // Note that at this point users of a standard PeerHandler will end up calling + // peer_disconnected with no_connection_possible set to false, duplicating the + // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for + // users with their own peer handling logic. We duplicate the call here, however. + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); +}