Merge pull request #241 from TheBlueMatt/2018-10-peer-free-panic
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Sat, 3 Nov 2018 03:59:59 +0000 (23:59 -0400)
committerGitHub <noreply@github.com>
Sat, 3 Nov 2018 03:59:59 +0000 (23:59 -0400)
Fix pre-noise outbound peer disconnect panic found by fuzzer

src/ln/channel.rs
src/ln/channelmanager.rs
src/ln/msgs.rs
src/util/events.rs

index 1fbc605bb3baa34491bf0d4d034f0a3de805503f..b9a81f6ebc0465cf34396eb44073b790fd7c3925 100644 (file)
@@ -1553,6 +1553,12 @@ impl Channel {
 
                //TODO: Check msg.cltv_expiry further? Do this in channel manager?
 
+               if self.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+                       if let PendingHTLCStatus::Forward(_) = pending_forward_state {
+                               panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
+                       }
+               }
+
                // Now update local state:
                self.next_remote_htlc_id += 1;
                self.pending_inbound_htlcs.push(InboundHTLCOutput {
@@ -1632,13 +1638,16 @@ impl Channel {
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
        }
 
-       pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, ChannelMonitor), HandleError> {
+       pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &FeeEstimator) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitor), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got commitment signed message when channel was not in an operational state", action: None});
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(HandleError{err: "Peer sent commitment_signed when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent commitment_signed when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
                }
+               if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
+                       return Err(HandleError{err: "Peer sent commitment_signed after we'd started exchanging closing_signeds", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent commitment_signed after we'd started exchanging closing_signeds".to_string(), channel_id: msg.channel_id}})});
+               }
 
                let funding_script = self.get_funding_redeemscript();
 
@@ -1730,19 +1739,21 @@ impl Channel {
                        return Err(HandleError{err: "Previous monitor update failure prevented generation of RAA", action: Some(ErrorAction::IgnoreError)});
                }
 
-               let (our_commitment_signed, monitor_update) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+               let (our_commitment_signed, monitor_update, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                        // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
                        // we'll send one right away when we get the revoke_and_ack when we
                        // free_holding_cell_htlcs().
                        let (msg, monitor) = self.send_commitment_no_status_check()?;
-                       (Some(msg), monitor)
-               } else { (None, self.channel_monitor.clone()) };
+                       (Some(msg), monitor, None)
+               } else if !need_our_commitment {
+                       (None, self.channel_monitor.clone(), self.maybe_propose_first_closing_signed(fee_estimator))
+               } else { (None, self.channel_monitor.clone(), None) };
 
                Ok((msgs::RevokeAndACK {
                        channel_id: self.channel_id,
                        per_commitment_secret: per_commitment_secret,
                        next_per_commitment_point: next_per_commitment_point,
-               }, our_commitment_signed, monitor_update))
+               }, our_commitment_signed, closing_signed, monitor_update))
        }
 
        /// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
@@ -1843,13 +1854,16 @@ impl Channel {
        /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
        /// generating an appropriate error *after* the channel state has been updated based on the
        /// revoke_and_ack message.
-       pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, [u8; 32], HTLCFailReason)>, ChannelMonitor), HandleError> {
+       pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &FeeEstimator) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, [u8; 32], HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitor), HandleError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        return Err(HandleError{err: "Got revoke/ACK message when channel was not in an operational state", action: None});
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
                        return Err(HandleError{err: "Peer sent revoke_and_ack when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent revoke_and_ack when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
                }
+               if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
+                       return Err(HandleError{err: "Peer sent revoke_and_ack after we'd started exchanging closing_signeds", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_string(), channel_id: msg.channel_id}})});
+               }
 
                if let Some(their_prev_commitment_point) = self.their_prev_commitment_point {
                        if PublicKey::from_secret_key(&self.secp_ctx, &secp_call!(SecretKey::from_slice(&self.secp_ctx, &msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret", self.channel_id())) != their_prev_commitment_point {
@@ -1971,7 +1985,7 @@ impl Channel {
                        }
                        self.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
-                       return Ok((None, Vec::new(), Vec::new(), self.channel_monitor.clone()));
+                       return Ok((None, Vec::new(), Vec::new(), None, self.channel_monitor.clone()));
                }
 
                match self.free_holding_cell_htlcs()? {
@@ -1984,7 +1998,7 @@ impl Channel {
                                for fail_msg in update_fail_malformed_htlcs.drain(..) {
                                        commitment_update.0.update_fail_malformed_htlcs.push(fail_msg);
                                }
-                               Ok((Some(commitment_update.0), to_forward_infos, revoked_htlcs, commitment_update.1))
+                               Ok((Some(commitment_update.0), to_forward_infos, revoked_htlcs, None, commitment_update.1))
                        },
                        None => {
                                if require_commitment {
@@ -1996,9 +2010,9 @@ impl Channel {
                                                update_fail_malformed_htlcs,
                                                update_fee: None,
                                                commitment_signed
-                                       }), to_forward_infos, revoked_htlcs, monitor_update))
+                                       }), to_forward_infos, revoked_htlcs, None, monitor_update))
                                } else {
-                                       Ok((None, to_forward_infos, revoked_htlcs, self.channel_monitor.clone()))
+                                       Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), self.channel_monitor.clone()))
                                }
                        }
                }
@@ -2057,6 +2071,9 @@ impl Channel {
                        self.channel_state = ChannelState::ShutdownComplete as u32;
                        return outbound_drops;
                }
+               // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+               // will be retransmitted.
+               self.last_sent_closing_fee = None;
 
                let mut inbound_drop_count = 0;
                self.pending_inbound_htlcs.retain(|htlc| {
@@ -2244,7 +2261,7 @@ impl Channel {
 
        /// May panic if some calls other than message-handling calls (which will all Err immediately)
        /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
-       pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor>, RAACommitmentOrder), ChannelError> {
+       pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor>, RAACommitmentOrder, Option<msgs::Shutdown>), ChannelError> {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
@@ -2260,9 +2277,16 @@ impl Channel {
                // remaining cases either succeed or ErrorMessage-fail).
                self.channel_state &= !(ChannelState::PeerDisconnected as u32);
 
+               let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+                       Some(msgs::Shutdown {
+                               channel_id: self.channel_id,
+                               scriptpubkey: self.get_closing_scriptpubkey(),
+                       })
+               } else { None };
+
                if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) == ChannelState::FundingSent as u32 {
                        // Short circuit the whole handler as there is nothing we can resend them
-                       return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst));
+                       return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg));
                }
 
                if msg.next_local_commitment_number == 0 || msg.next_remote_commitment_number == 0 {
@@ -2275,7 +2299,7 @@ impl Channel {
                        return Ok((Some(msgs::FundingLocked {
                                channel_id: self.channel_id(),
                                next_per_commitment_point: next_per_commitment_point,
-                       }), None, None, None, RAACommitmentOrder::CommitmentFirst));
+                       }), None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg));
                }
 
                let required_revoke = if msg.next_remote_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
@@ -2338,11 +2362,11 @@ impl Channel {
                                                        panic!("Got non-channel-failing result from free_holding_cell_htlcs");
                                                }
                                        },
-                                       Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order)),
-                                       Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order)),
+                                       Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order, shutdown_msg)),
+                                       Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)),
                                }
                        } else {
-                               return Ok((resend_funding_locked, required_revoke, None, None, order));
+                               return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg));
                        }
                } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 {
                        if required_revoke.is_some() {
@@ -2356,71 +2380,78 @@ impl Channel {
 
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                self.monitor_pending_commitment_signed = true;
-                               return Ok((resend_funding_locked, None, None, None, order));
+                               return Ok((resend_funding_locked, None, None, None, order, shutdown_msg));
                        }
 
-                       return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order));
+                       return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order, shutdown_msg));
                } else {
                        return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
                }
        }
 
-       pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, [u8; 32])>), HandleError> {
+       fn maybe_propose_first_closing_signed(&mut self, fee_estimator: &FeeEstimator) -> Option<msgs::ClosingSigned> {
+               if !self.channel_outbound || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() ||
+                               self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK ||
+                               self.last_sent_closing_fee.is_some() ||
+                               self.cur_remote_commitment_transaction_number != self.cur_local_commitment_transaction_number{
+                       return None;
+               }
+
+               let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+               if self.feerate_per_kw > proposed_feerate {
+                       proposed_feerate = self.feerate_per_kw;
+               }
+               let tx_weight = Self::get_closing_transaction_weight(&self.get_closing_scriptpubkey(), self.their_shutdown_scriptpubkey.as_ref().unwrap());
+               let proposed_total_fee_satoshis = proposed_feerate * tx_weight / 1000;
+
+               let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false);
+               let funding_redeemscript = self.get_funding_redeemscript();
+               let sighash = Message::from_slice(&bip143::SighashComponents::new(&closing_tx).sighash_all(&closing_tx.input[0], &funding_redeemscript, self.channel_value_satoshis)[..]).unwrap();
+
+               self.last_sent_closing_fee = Some((proposed_feerate, total_fee_satoshis));
+               Some(msgs::ClosingSigned {
+                       channel_id: self.channel_id,
+                       fee_satoshis: total_fee_satoshis,
+                       signature: self.secp_ctx.sign(&sighash, &self.local_keys.funding_key),
+               })
+       }
+
+       pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, [u8; 32])>), ChannelError> {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(HandleError{err: "Peer sent shutdown when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent shutdown when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+                       return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish"));
                }
                if self.channel_state < ChannelState::FundingSent as u32 {
-                       self.channel_state = ChannelState::ShutdownComplete as u32;
-                       self.channel_update_count += 1;
-                       return Ok((None, None, Vec::new()));
+                       // Spec says we should fail the connection, not the channel, but that's nonsense, there
+                       // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
+                       // can do that via error message without getting a connection fail anyway...
+                       return Err(ChannelError::Close("Peer sent shutdown pre-funding generation"));
                }
                for htlc in self.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
-                               return Err(HandleError{err: "Got shutdown with remote pending HTLCs", action: None});
+                               return Err(ChannelError::Close("Got shutdown with remote pending HTLCs"));
                        }
                }
-               if (self.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
-                       return Err(HandleError{err: "Remote peer sent duplicate shutdown message", action: None});
-               }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
 
                // BOLT 2 says we must only send a scriptpubkey of certain standard forms, which are up to
                // 34 bytes in length, so dont let the remote peer feed us some super fee-heavy script.
                if self.channel_outbound && msg.scriptpubkey.len() > 34 {
-                       return Err(HandleError{err: "Got shutdown_scriptpubkey of absurd length from remote peer", action: None});
+                       return Err(ChannelError::Close("Got shutdown_scriptpubkey of absurd length from remote peer"));
                }
 
                //Check shutdown_scriptpubkey form as BOLT says we must
-               if !(msg.scriptpubkey.is_p2pkh()) && !(msg.scriptpubkey.is_p2sh())
-                       && !(msg.scriptpubkey.is_v0_p2wpkh()) && !(msg.scriptpubkey.is_v0_p2wsh()){
-                       return Err(HandleError{err: "Got an invalid scriptpubkey from remote peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
+               if !msg.scriptpubkey.is_p2pkh() && !msg.scriptpubkey.is_p2sh() && !msg.scriptpubkey.is_v0_p2wpkh() && !msg.scriptpubkey.is_v0_p2wsh() {
+                       return Err(ChannelError::Close("Got a nonstandard scriptpubkey from remote peer"));
                }
 
                if self.their_shutdown_scriptpubkey.is_some() {
                        if Some(&msg.scriptpubkey) != self.their_shutdown_scriptpubkey.as_ref() {
-                               return Err(HandleError{err: "Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey", action: None});
+                               return Err(ChannelError::Close("Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"));
                        }
                } else {
                        self.their_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
                }
 
-               let our_closing_script = self.get_closing_scriptpubkey();
-
-               let (proposed_feerate, proposed_fee, our_sig) = if self.channel_outbound && self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() {
-                       let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
-                       if self.feerate_per_kw > proposed_feerate {
-                               proposed_feerate = self.feerate_per_kw;
-                       }
-                       let tx_weight = Self::get_closing_transaction_weight(&our_closing_script, &msg.scriptpubkey);
-                       let proposed_total_fee_satoshis = proposed_feerate * tx_weight / 1000;
-
-                       let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false);
-                       let funding_redeemscript = self.get_funding_redeemscript();
-                       let sighash = Message::from_slice(&bip143::SighashComponents::new(&closing_tx).sighash_all(&closing_tx.input[0], &funding_redeemscript, self.channel_value_satoshis)[..]).unwrap();
-
-                       (Some(proposed_feerate), Some(total_fee_satoshis), Some(self.secp_ctx.sign(&sighash, &self.local_keys.funding_key)))
-               } else { (None, None, None) };
-
                // From here on out, we may not fail!
 
                self.channel_state |= ChannelState::RemoteShutdownSent as u32;
@@ -2429,6 +2460,7 @@ impl Channel {
                // We can't send our shutdown until we've committed all of our pending HTLCs, but the
                // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
                // cell HTLCs and return them to fail the payment.
+               self.holding_cell_update_fee = None;
                let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                        match htlc_update {
@@ -2439,35 +2471,22 @@ impl Channel {
                                _ => true
                        }
                });
-               for htlc in self.pending_outbound_htlcs.iter() {
-                       if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
-                               return Ok((None, None, dropped_outbound_htlcs));
-                       }
-               }
+               // If we have any LocalAnnounced updates we'll probably just get back a update_fail_htlc
+               // immediately after the commitment dance, but we can send a Shutdown cause we won't send
+               // any further commitment updates after we set LocalShutdownSent.
 
                let our_shutdown = if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
                        None
                } else {
                        Some(msgs::Shutdown {
                                channel_id: self.channel_id,
-                               scriptpubkey: our_closing_script,
+                               scriptpubkey: self.get_closing_scriptpubkey(),
                        })
                };
 
                self.channel_state |= ChannelState::LocalShutdownSent as u32;
                self.channel_update_count += 1;
-               if self.pending_inbound_htlcs.is_empty() && self.pending_outbound_htlcs.is_empty() && self.channel_outbound {
-                       // There are no more HTLCs and we're the funder, this means we start the closing_signed
-                       // dance with an initial fee proposal!
-                       self.last_sent_closing_fee = Some((proposed_feerate.unwrap(), proposed_fee.unwrap()));
-                       Ok((our_shutdown, Some(msgs::ClosingSigned {
-                               channel_id: self.channel_id,
-                               fee_satoshis: proposed_fee.unwrap(),
-                               signature: our_sig.unwrap(),
-                       }), dropped_outbound_htlcs))
-               } else {
-                       Ok((our_shutdown, None, dropped_outbound_htlcs))
-               }
+               Ok((our_shutdown, self.maybe_propose_first_closing_signed(fee_estimator), dropped_outbound_htlcs))
        }
 
        pub fn closing_signed(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), HandleError> {
@@ -3250,9 +3269,9 @@ impl Channel {
                }
                self.channel_update_count += 1;
 
-               // We can't send our shutdown until we've committed all of our pending HTLCs, but the
-               // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
-               // cell HTLCs and return them to fail the payment.
+               // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
+               // our shutdown until we've committed all of the pending changes.
+               self.holding_cell_update_fee = None;
                let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                        match htlc_update {
index 51bf27045f8c21e21e68b30b2fbd32f99c19c1a4..1141d7dfbcd7c59be53fbfc3df6fdaa2f15fb67f 100644 (file)
@@ -1233,6 +1233,9 @@ impl ChannelManager {
 
        /// Call this upon creation of a funding transaction for the given channel.
        ///
+       /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
+       /// or your counterparty can steal your funds!
+       ///
        /// Panics if a funding transaction has already been provided for this channel.
        ///
        /// May panic if the funding_txo is duplicative with some other channel (note that this should
@@ -1832,7 +1835,7 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       let (shutdown, closing_signed, dropped_htlcs) = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: their_node_id.clone(),
@@ -1926,7 +1929,7 @@ impl ChannelManager {
                //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
                //but we should prevent it anyway.
 
-               let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+               let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
                let channel_state = channel_state_lock.borrow_parts();
 
                match channel_state.by_id.get_mut(&msg.channel_id) {
@@ -1936,7 +1939,16 @@ impl ChannelManager {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
                                if !chan.is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)}));
+                                       // If the update_add is completely bogus, the call will Err and we will close,
+                                       // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+                                       // want to reject the new HTLC and fail it backwards instead of forwarding.
+                                       if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+                                               pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+                                                       channel_id: msg.channel_id,
+                                                       htlc_id: msg.htlc_id,
+                                                       reason: ChannelManager::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &self.get_channel_update(chan).unwrap().encode_with_len()[..]),
+                                               }));
+                                       }
                                }
                                chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
                        },
@@ -2202,7 +2214,7 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                               let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) = chan.commitment_signed(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
                                if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                        unimplemented!();
                                }
@@ -2223,6 +2235,12 @@ impl ChannelManager {
                                                },
                                        });
                                }
+                               if let Some(msg) = closing_signed {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                               node_id: their_node_id.clone(),
+                                               msg,
+                                       });
+                               }
                                Ok(())
                        },
                        None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -2272,7 +2290,7 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (commitment_update, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) = chan.revoke_and_ack(&msg, &*self.fee_estimator).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
                                        if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                                                unimplemented!();
                                        }
@@ -2282,6 +2300,12 @@ impl ChannelManager {
                                                        updates,
                                                });
                                        }
+                                       if let Some(msg) = closing_signed {
+                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                       node_id: their_node_id.clone(),
+                                                       msg,
+                                               });
+                                       }
                                        (pending_forwards, pending_failures, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -2359,7 +2383,7 @@ impl ChannelManager {
                                if chan.get_their_node_id() != *their_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg)
+                               let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order, shutdown) = chan.channel_reestablish(msg)
                                        .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                if let Some(monitor) = channel_monitor {
                                        if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
@@ -2398,6 +2422,12 @@ impl ChannelManager {
                                                send_raa!();
                                        },
                                }
+                               if let Some(msg) = shutdown {
+                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+                                               node_id: their_node_id.clone(),
+                                               msg,
+                                       });
+                               }
                                Ok(())
                        },
                        None => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -3616,6 +3646,30 @@ mod tests {
                }
        }
 
+       macro_rules! get_closing_signed_broadcast {
+               ($node: expr, $dest_pubkey: expr) => {
+                       {
+                               let events = $node.get_and_clear_pending_msg_events();
+                               assert!(events.len() == 1 || events.len() == 2);
+                               (match events[events.len() - 1] {
+                                       MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+                                               assert_eq!(msg.contents.flags & 2, 2);
+                                               msg.clone()
+                                       },
+                                       _ => panic!("Unexpected event"),
+                               }, if events.len() == 2 {
+                                       match events[0] {
+                                               MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+                                                       assert_eq!(*node_id, $dest_pubkey);
+                                                       Some(msg.clone())
+                                               },
+                                               _ => panic!("Unexpected event"),
+                                       }
+                               } else { None })
+                       }
+               }
+       }
+
        fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
                let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
                let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
@@ -3647,29 +3701,6 @@ mod tests {
                        })
                };
 
-               macro_rules! get_closing_signed_broadcast {
-                       ($node: expr, $dest_pubkey: expr) => {
-                               {
-                                       let events = $node.get_and_clear_pending_msg_events();
-                                       assert!(events.len() == 1 || events.len() == 2);
-                                       (match events[events.len() - 1] {
-                                               MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
-                                                       msg.clone()
-                                               },
-                                               _ => panic!("Unexpected event"),
-                                       }, if events.len() == 2 {
-                                               match events[0] {
-                                                       MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
-                                                               assert_eq!(*node_id, $dest_pubkey);
-                                                               Some(msg.clone())
-                                                       },
-                                                       _ => panic!("Unexpected event"),
-                                               }
-                                       } else { None })
-                               }
-                       }
-               }
-
                node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
                let (as_update, bs_update) = if close_inbound_first {
                        assert!(node_a.get_and_clear_pending_msg_events().is_empty());
@@ -3738,35 +3769,41 @@ mod tests {
        }
 
        macro_rules! commitment_signed_dance {
-               ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
+               ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
                        {
                                check_added_monitors!($node_a, 0);
                                assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
                                $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
-                               let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
                                check_added_monitors!($node_a, 1);
+                               commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
+                       }
+               };
+               ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
+                       {
+                               let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
                                check_added_monitors!($node_b, 0);
                                assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
                                $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
                                assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
                                check_added_monitors!($node_b, 1);
                                $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
-                               let bs_revoke_and_ack = get_event_msg!($node_b, MessageSendEvent::SendRevokeAndACK, $node_a.node.get_our_node_id());
+                               let (bs_revoke_and_ack, extra_msg_option) = {
+                                       let events = $node_b.node.get_and_clear_pending_msg_events();
+                                       assert!(events.len() <= 2);
+                                       (match events[0] {
+                                               MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+                                                       assert_eq!(*node_id, $node_a.node.get_our_node_id());
+                                                       (*msg).clone()
+                                               },
+                                               _ => panic!("Unexpected event"),
+                                       }, events.get(1).map(|e| e.clone()))
+                               };
                                check_added_monitors!($node_b, 1);
                                if $fail_backwards {
                                        assert!($node_a.node.get_and_clear_pending_events().is_empty());
                                        assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
                                }
                                $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
-                               if $fail_backwards {
-                                       let channel_state = $node_a.node.channel_state.lock().unwrap();
-                                       assert_eq!(channel_state.pending_msg_events.len(), 1);
-                                       if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
-                                               assert_ne!(*node_id, $node_b.node.get_our_node_id());
-                                       } else { panic!("Unexpected event"); }
-                               } else {
-                                       assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
-                               }
                                {
                                        let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
                                        if $fail_backwards {
@@ -3777,6 +3814,26 @@ mod tests {
                                        }
                                        added_monitors.clear();
                                }
+                               extra_msg_option
+                       }
+               };
+               ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
+                       {
+                               assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
+                       }
+               };
+               ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
+                       {
+                               commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
+                               if $fail_backwards {
+                                       let channel_state = $node_a.node.channel_state.lock().unwrap();
+                                       assert_eq!(channel_state.pending_msg_events.len(), 1);
+                                       if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
+                                               assert_ne!(*node_id, $node_b.node.get_our_node_id());
+                                       } else { panic!("Unexpected event"); }
+                               } else {
+                                       assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
+                               }
                        }
                }
        }
@@ -4622,6 +4679,375 @@ mod tests {
                close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
        }
 
+       #[test]
+       fn pre_funding_lock_shutdown_test() {
+               // Test sending a shutdown prior to funding_locked after funding generation
+               let nodes = create_network(2);
+               let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0);
+               let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+               nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
+               nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
+
+               nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
+               let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+               let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+
+               let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+               let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+               let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+               assert!(node_0_none.is_none());
+
+               assert!(nodes[0].node.list_channels().is_empty());
+               assert!(nodes[1].node.list_channels().is_empty());
+       }
+
+       #[test]
+       fn updates_shutdown_wait() {
+               // Test sending a shutdown with outstanding updates pending
+               let mut nodes = create_network(3);
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+               let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+               let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+
+               let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+               nodes[0].node.close_channel(&chan_1.2).unwrap();
+               let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+               let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+               let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+               if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
+               else { panic!("New sends should fail!") };
+               if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
+               else { panic!("New sends should fail!") };
+
+               assert!(nodes[2].node.claim_funds(our_payment_preimage));
+               check_added_monitors!(nodes[2], 1);
+               let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+               nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+               check_added_monitors!(nodes[1], 1);
+               let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+               assert!(updates_2.update_add_htlcs.is_empty());
+               assert!(updates_2.update_fail_htlcs.is_empty());
+               assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+               assert!(updates_2.update_fee.is_none());
+               assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
+               commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentSent { ref payment_preimage } => {
+                               assert_eq!(our_payment_preimage, *payment_preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+               let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+               let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+               assert!(node_0_none.is_none());
+
+               assert!(nodes[0].node.list_channels().is_empty());
+
+               assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+               nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+               close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+               assert!(nodes[1].node.list_channels().is_empty());
+               assert!(nodes[2].node.list_channels().is_empty());
+       }
+
+       #[test]
+       fn htlc_fail_async_shutdown() {
+               // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
+               let mut nodes = create_network(3);
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+
+               let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+               let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+               nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert_eq!(updates.update_add_htlcs.len(), 1);
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+
+               nodes[1].node.close_channel(&chan_1.2).unwrap();
+               let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+               let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+               check_added_monitors!(nodes[1], 1);
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+               commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
+
+               let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               assert!(updates_2.update_add_htlcs.is_empty());
+               assert!(updates_2.update_fulfill_htlcs.is_empty());
+               assert_eq!(updates_2.update_fail_htlcs.len(), 1);
+               assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+               assert!(updates_2.update_fee.is_none());
+
+               nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
+               commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentFailed { ref payment_hash, ref rejected_by_dest } => {
+                               assert_eq!(our_payment_hash, *payment_hash);
+                               assert!(!rejected_by_dest);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+               let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+               let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+               assert!(node_0_none.is_none());
+
+               assert!(nodes[0].node.list_channels().is_empty());
+
+               assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+               nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+               close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+               assert!(nodes[1].node.list_channels().is_empty());
+               assert!(nodes[2].node.list_channels().is_empty());
+       }
+
+       #[test]
+       fn update_fee_async_shutdown() {
+               // Test update_fee works after shutdown start if messages are delivered out-of-order
+               let nodes = create_network(2);
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               let starting_feerate = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().get_feerate();
+               nodes[0].node.update_fee(chan_1.2.clone(), starting_feerate + 20).unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fulfill_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_some());
+
+               nodes[1].node.close_channel(&chan_1.2).unwrap();
+               let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+               // Note that we don't actually test normative behavior here. The spec indicates we could
+               // actually send a closing_signed here, but is kinda unclear and could possibly be amended
+               // to require waiting on the full commitment dance before doing so (see
+               // https://github.com/lightningnetwork/lightning-rfc/issues/499). In any case, to avoid
+               // ambiguity, we should wait until after the full commitment dance to send closing_signed.
+               let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+
+               nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap()).unwrap();
+               nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+               check_added_monitors!(nodes[1], 1);
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+               let node_0_closing_signed = commitment_signed_dance!(nodes[1], nodes[0], (), false, true, true);
+
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+               nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), match node_0_closing_signed.unwrap() {
+                       MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+                               assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                               msg
+                       },
+                       _ => panic!("Unexpected event"),
+               }).unwrap();
+               let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+               nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+               let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+               assert!(node_0_none.is_none());
+       }
+
+       fn do_test_shutdown_rebroadcast(recv_count: u8) {
+               // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
+               // messages delivered prior to disconnect
+               let nodes = create_network(3);
+               let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+               let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+
+               let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+               nodes[1].node.close_channel(&chan_1.2).unwrap();
+               let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               if recv_count > 0 {
+                       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+                       let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+                       if recv_count > 1 {
+                               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+                       }
+               }
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+               let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+               let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+               nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
+               let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+               assert!(node_1_shutdown == node_1_2nd_shutdown);
+
+               nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
+               let node_0_2nd_shutdown = if recv_count > 0 {
+                       let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+                       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
+                       node_0_2nd_shutdown
+               } else {
+                       assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+                       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
+                       get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
+               };
+               nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
+
+               assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+               assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+               assert!(nodes[2].node.claim_funds(our_payment_preimage));
+               check_added_monitors!(nodes[2], 1);
+               let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+               assert!(updates.update_add_htlcs.is_empty());
+               assert!(updates.update_fail_htlcs.is_empty());
+               assert!(updates.update_fail_malformed_htlcs.is_empty());
+               assert!(updates.update_fee.is_none());
+               assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+               nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+               check_added_monitors!(nodes[1], 1);
+               let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+               commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+               assert!(updates_2.update_add_htlcs.is_empty());
+               assert!(updates_2.update_fail_htlcs.is_empty());
+               assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+               assert!(updates_2.update_fee.is_none());
+               assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
+               commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 1);
+               match events[0] {
+                       Event::PaymentSent { ref payment_preimage } => {
+                               assert_eq!(our_payment_preimage, *payment_preimage);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+               if recv_count > 0 {
+                       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+                       let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+                       assert!(node_1_closing_signed.is_some());
+               }
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+               nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+               let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+               nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+               if recv_count == 0 {
+                       // If all closing_signeds weren't delivered we can just resume where we left off...
+                       let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+                       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
+                       let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+                       assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
+
+                       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
+                       let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+                       assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
+
+                       nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
+                       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+                       nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
+                       let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+                       assert!(node_0_closing_signed == node_0_2nd_closing_signed);
+
+                       nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
+                       let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+                       nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+                       let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+                       assert!(node_0_none.is_none());
+               } else {
+                       // If one node, however, received + responded with an identical closing_signed we end
+                       // up erroring and node[0] will try to broadcast its own latest commitment transaction.
+                       // There isn't really anything better we can do simply, but in the future we might
+                       // explore storing a set of recently-closed channels that got disconnected during
+                       // closing_signed and avoiding broadcasting local commitment txn for some timeout to
+                       // give our counterparty enough time to (potentially) broadcast a cooperative closing
+                       // transaction.
+                       assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+                       if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
+                                       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
+                               nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
+                               let msgs::ErrorMessage {ref channel_id, ..} = msg;
+                               assert_eq!(*channel_id, chan_1.2);
+                       } else { panic!("Needed SendErrorMessage close"); }
+
+                       // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
+                       // checks it, but in this case nodes[0] didn't ever get a chance to receive a
+                       // closing_signed so we do it ourselves
+                       let events = nodes[0].node.get_and_clear_pending_msg_events();
+                       assert_eq!(events.len(), 1);
+                       match events[0] {
+                               MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+                                       assert_eq!(msg.contents.flags & 2, 2);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+               }
+
+               assert!(nodes[0].node.list_channels().is_empty());
+
+               assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+               nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+               close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+               assert!(nodes[1].node.list_channels().is_empty());
+               assert!(nodes[2].node.list_channels().is_empty());
+       }
+
+       #[test]
+       fn test_shutdown_rebroadcast() {
+               do_test_shutdown_rebroadcast(0);
+               do_test_shutdown_rebroadcast(1);
+               do_test_shutdown_rebroadcast(2);
+       }
+
        #[test]
        fn fake_network_test() {
                // Simple test which builds a network of ChannelManagers, connects them to each other, and
index 110e2f336adb7e112fe5104c67a35db8aedf22e7..5c5d32f39d51483a1a8c0776d40527565ccfbb78 100644 (file)
@@ -151,6 +151,7 @@ pub struct Init {
 }
 
 /// An error message to be sent or received from a peer
+#[derive(Clone)]
 pub struct ErrorMessage {
        pub(crate) channel_id: [u8; 32],
        pub(crate) data: String,
@@ -235,14 +236,14 @@ pub struct FundingLocked {
 }
 
 /// A shutdown message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, PartialEq)]
 pub struct Shutdown {
        pub(crate) channel_id: [u8; 32],
        pub(crate) scriptpubkey: Script,
 }
 
 /// A closing_signed message to be sent or received from a peer
-#[derive(Clone)]
+#[derive(Clone, PartialEq)]
 pub struct ClosingSigned {
        pub(crate) channel_id: [u8; 32],
        pub(crate) fee_satoshis: u64,
@@ -448,6 +449,7 @@ pub struct ChannelUpdate {
 }
 
 /// Used to put an error message in a HandleError
+#[derive(Clone)]
 pub enum ErrorAction {
        /// The peer took some action which made us think they were useless. Disconnect them.
        DisconnectPeer {
@@ -486,6 +488,7 @@ pub struct CommitmentUpdate {
 /// The information we received from a peer along the route of a payment we originated. This is
 /// returned by ChannelMessageHandler::handle_update_fail_htlc to be passed into
 /// RoutingMessageHandler::handle_htlc_fail_channel_update to update our network map.
+#[derive(Clone)]
 pub enum HTLCFailChannelUpdate {
        /// We received an error which included a full ChannelUpdate message.
        ChannelUpdateMessage {
index 37cee2eded48778d581f1b808aafbe6ab5989c16..a113217943f9d268cea2cc3e8cf8a3056b3f6be6 100644 (file)
@@ -27,6 +27,8 @@ pub enum Event {
        /// Used to indicate that the client should generate a funding transaction with the given
        /// parameters and then call ChannelManager::funding_transaction_generated.
        /// Generated in ChannelManager message handling.
+       /// Note that *all inputs* in the funding transaction must spend SegWit outputs or your
+       /// counterparty can steal your funds!
        FundingGenerationReady {
                /// The random channel_id we picked which you'll need to pass into
                /// ChannelManager::funding_transaction_generated.
@@ -101,6 +103,7 @@ pub enum Event {
 /// An event generated by ChannelManager which indicates a message should be sent to a peer (or
 /// broadcast to most peers).
 /// These events are handled by PeerManager::process_events if you are using a PeerManager.
+#[derive(Clone)]
 pub enum MessageSendEvent {
        /// Used to indicate that we've accepted a channel open and should send the accept_channel
        /// message provided to the given peer.