X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=374d74f6c18b578b8123e4f2344375164abff704;hb=f1c7fd2ab9b4df5f4b7cad855501d1178b2eb1c6;hp=81762923fda224f83874ff4413dc3e4d7605ef3c;hpb=33b7c906f2ec1522e466a42ca70b82806d89d8a6;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 81762923..374d74f6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1138,8 +1138,11 @@ impl Channel { } /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return - /// Ok(_) if debug assertions are turned on and preconditions are met. + /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always + /// return Ok(_) if debug assertions are turned on or preconditions are met. + /// + /// Note that it is still possible to hit these assertions in case we find a preimage on-chain + /// but then have a reorg which settles on an HTLC-failure on chain. fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option, Option), ChannelError> { // Either ChannelFunded got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an @@ -1167,6 +1170,7 @@ impl Channel { } else { log_warn!(self, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); } + debug_assert!(false, "Tried to fulfill an HTLC that was already fail/fulfilled"); return Ok((None, None)); }, _ => { @@ -1200,6 +1204,9 @@ impl Channel { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { + // Make sure we don't leave latest_monitor_update_id incremented here: + self.latest_monitor_update_id -= 1; + debug_assert!(false, "Tried to fulfill an HTLC that was already fulfilled"); return Ok((None, None)); } }, @@ -1208,6 +1215,7 @@ impl Channel { log_warn!(self, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id())); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. + debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); return Ok((None, Some(monitor_update))); } }, @@ -1259,8 +1267,11 @@ impl Channel { } /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return - /// Ok(_) if debug assertions are turned on and preconditions are met. + /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always + /// return Ok(_) if debug assertions are turned on or preconditions are met. + /// + /// Note that it is still possible to hit these assertions in case we find a preimage on-chain + /// but then have a reorg which settles on an HTLC-failure on chain. pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result, ChannelError> { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); @@ -1277,6 +1288,7 @@ impl Channel { match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(_) => { + debug_assert!(false, "Tried to fail an HTLC that was already fail/fulfilled"); return Ok(None); }, _ => { @@ -1297,11 +1309,13 @@ impl Channel { match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { + debug_assert!(false, "Tried to fail an HTLC that was already fulfilled"); return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID")); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { + debug_assert!(false, "Tried to fail an HTLC that was already failed"); return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID")); } }, @@ -3760,7 +3774,7 @@ impl Channel { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self) -> (Vec, Vec<(HTLCSource, PaymentHash)>) { + pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) { assert!(self.channel_state != ChannelState::ShutdownComplete as u32); // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and @@ -3783,12 +3797,11 @@ impl Channel { self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; - if self.channel_monitor.is_some() { - (self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs) - } else { - // We aren't even signed funding yet, so can't broadcast anything - (Vec::new(), dropped_outbound_htlcs) - } + self.latest_monitor_update_id += 1; + (self.funding_txo.clone(), ChannelMonitorUpdate { + update_id: self.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], + }, dropped_outbound_htlcs) } } @@ -4255,22 +4268,28 @@ impl ReadableArgs> for Channel = Arc::new(test_utils::TestLogger::new()); + let secp_ctx = Secp256k1::new(); + let mut seed = [0; 32]; + let mut rng = thread_rng(); + rng.fill_bytes(&mut seed); + let network = Network::Testnet; + let keys_provider = test_utils::TestKeysInterface::new(&seed, network, logger.clone() as Arc); + + // Go through the flow of opening a channel between two nodes. + + // Create Node A's channel + let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let config = UserConfig::default(); + let mut node_a_chan = Channel::::new_outbound(&&feeest, &&keys_provider, node_a_node_id, 10000000, 100000, 42, Arc::clone(&logger), &config).unwrap(); + + // Create Node B's channel by receiving Node A's open_channel message + let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.bitcoin_hash(), &&feeest); + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); + let mut node_b_chan = Channel::::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::supported(), &open_channel_msg, 7, logger, &config).unwrap(); + + // Node B --> Node A: accept channel + let accept_channel_msg = node_b_chan.get_accept_channel(); + node_a_chan.accept_channel(&accept_channel_msg, &config, InitFeatures::supported()).unwrap(); + + // Node A --> Node B: funding created + let output_script = node_a_chan.get_funding_redeemscript(); + let tx = Transaction { version: 1, lock_time: 0, input: Vec::new(), output: vec![TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }]}; + let funding_outpoint = OutPoint::new(tx.txid(), 0); + let (funding_created_msg, _) = node_a_chan.get_outbound_funding_created(funding_outpoint).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg).unwrap(); + + // Node B --> Node A: funding signed + let _ = node_a_chan.funding_signed(&funding_signed_msg); + + // Now disconnect the two nodes and check that the commitment point in + // Node B's channel_reestablish message is sane. + node_b_chan.remove_uncommitted_htlcs_and_mark_paused(); + let expected_commitment_point = PublicKey::from_secret_key(&secp_ctx, &node_b_chan.build_local_commitment_secret(node_b_chan.cur_local_commitment_transaction_number + 1)); + let msg = node_b_chan.get_channel_reestablish(); + match msg.data_loss_protect { + OptionalField::Present(DataLossProtect { my_current_per_commitment_point, .. }) => { + assert_eq!(expected_commitment_point, my_current_per_commitment_point); + }, + _ => panic!() + } + + // Check that the commitment point in Node A's channel_reestablish message + // is sane. + node_a_chan.remove_uncommitted_htlcs_and_mark_paused(); + let expected_commitment_point = PublicKey::from_secret_key(&secp_ctx, &node_a_chan.build_local_commitment_secret(node_a_chan.cur_local_commitment_transaction_number + 1)); + let msg = node_a_chan.get_channel_reestablish(); + match msg.data_loss_protect { + OptionalField::Present(DataLossProtect { my_current_per_commitment_point, .. }) => { + assert_eq!(expected_commitment_point, my_current_per_commitment_point); + }, + _ => panic!() + } + } + #[test] fn outbound_commitment_test() { // Test vectors from BOLT 3 Appendix C: