Improve error message.
authorjoe.miyamoto <joe.miyamoto@bitcoinbank.co.jp>
Mon, 13 Jul 2020 04:16:32 +0000 (13:16 +0900)
committerjoe.miyamoto <joe.miyamoto@bitcoinbank.co.jp>
Wed, 22 Jul 2020 01:34:47 +0000 (10:34 +0900)
... for ChannelError and APIMisuseError
Before this commit, When rl returns error, we don't know
The actual parameter which caused the error.
By returning parameterised `String` instead of predefined `&'static str`,
We can give a caller improved error message.

TestLogger now has two additional methods
1. `assert_log_contains` which checks the logged messsage
  has how many entry which includes the specified string as a substring.
2. `aasert_log_regex` mostly the same with `assert_log_contains`
  but it is more flexible that caller specifies regex which has
  to be satisfied instead of just a substring.
For regex, tests now includes `regex` as dev-dependency.

13 files changed:
lightning/Cargo.toml
lightning/src/lib.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs
lightning/src/ln/msgs.rs
lightning/src/ln/onion_utils.rs
lightning/src/ln/peer_channel_encryptor.rs
lightning/src/routing/network_graph.rs
lightning/src/routing/router.rs
lightning/src/util/errors.rs
lightning/src/util/test_utils.rs

index 3826123a0fbbefc26677d334d9c2a38c7c9c26c2..e1b9462668b1ac8f9348a479f918f3be11581a51 100644 (file)
@@ -28,3 +28,4 @@ features = ["bitcoinconsensus"]
 
 [dev-dependencies]
 hex = "0.3"
+regex = "0.1.80"
index 72017e46553b476f3d01f6bda97d7d5c0058d96b..96000a753fef4e28b6bf31ba1a3556f19c78d401 100644 (file)
@@ -20,6 +20,7 @@
 
 extern crate bitcoin;
 #[cfg(test)] extern crate hex;
+#[cfg(test)] extern crate regex;
 
 #[macro_use]
 pub mod util;
index c95d6e73fe25227e2062f8c865b2a5db61583618..a0879d4e7a554895a5636d57a4e6485bcc1bb086 100644 (file)
@@ -34,6 +34,7 @@ use std;
 use std::default::Default;
 use std::{cmp,mem,fmt};
 use std::ops::Deref;
+use bitcoin::hashes::hex::ToHex;
 
 #[cfg(test)]
 pub struct ChannelValueStat {
@@ -409,17 +410,17 @@ pub const MAX_FUNDING_SATOSHIS: u64 = 1 << 24;
 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
 /// channel_id in ChannelManager.
 pub(super) enum ChannelError {
-       Ignore(&'static str),
-       Close(&'static str),
-       CloseDelayBroadcast(&'static str),
+       Ignore(String),
+       Close(String),
+       CloseDelayBroadcast(String),
 }
 
 impl fmt::Debug for ChannelError {
        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
                match self {
-                       &ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
-                       &ChannelError::Close(e) => write!(f, "Close : {}", e),
-                       &ChannelError::CloseDelayBroadcast(e) => write!(f, "CloseDelayBroadcast : {}", e)
+                       &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
+                       &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
+                       &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
                }
        }
 }
@@ -460,17 +461,15 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let chan_keys = keys_provider.get_channel_keys(false, channel_value_satoshis);
 
                if channel_value_satoshis >= MAX_FUNDING_SATOSHIS {
-                       return Err(APIError::APIMisuseError{err: "funding value > 2^24"});
+                       return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than {}, it was {}", MAX_FUNDING_SATOSHIS, channel_value_satoshis)});
                }
-
-               if push_msat > channel_value_satoshis * 1000 {
-                       return Err(APIError::APIMisuseError{err: "push value > channel value"});
+               let channel_value_msat = channel_value_satoshis * 1000;
+               if push_msat > channel_value_msat {
+                       return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
                }
                if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
-                       return Err(APIError::APIMisuseError{err: "Configured with an unreasonable our_to_self_delay putting user funds at risks"});
+                       return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", config.own_channel_config.our_to_self_delay)});
                }
-
-
                let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
                if Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(channel_value_satoshis) < Channel::<ChanSigner>::derive_our_dust_limit_satoshis(background_feerate) {
                        return Err(APIError::FeeRateTooHigh{err: format!("Not enough reserve above dust limit can be found at current fee rate({})", background_feerate), feerate: background_feerate});
@@ -558,11 +557,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        fn check_remote_fee<F: Deref>(fee_estimator: &F, feerate_per_kw: u32) -> Result<(), ChannelError>
                where F::Target: FeeEstimator
        {
-               if feerate_per_kw < fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) {
-                       return Err(ChannelError::Close("Peer's feerate much too low"));
+               let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+               if feerate_per_kw < lower_limit {
+                       return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
                }
-               if feerate_per_kw as u64 > fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64  * 2 {
-                       return Err(ChannelError::Close("Peer's feerate much too high"));
+               let upper_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64  * 2;
+               if feerate_per_kw as u64 > upper_limit {
+                       return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
                }
                Ok(())
        }
@@ -585,61 +586,64 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let mut local_config = (*config).channel_options.clone();
 
                if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close("Configured with an unreasonable our_to_self_delay putting user funds at risks"));
+                       return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.own_channel_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
                }
 
                // Check sanity of message fields:
                if msg.funding_satoshis >= MAX_FUNDING_SATOSHIS {
-                       return Err(ChannelError::Close("funding value > 2^24"));
+                       return Err(ChannelError::Close(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, msg.funding_satoshis)));
                }
                if msg.channel_reserve_satoshis > msg.funding_satoshis {
-                       return Err(ChannelError::Close("Bogus channel_reserve_satoshis"));
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
                }
-               if msg.push_msat > (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 {
-                       return Err(ChannelError::Close("push_msat larger than funding value"));
+               let funding_value = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.push_msat > funding_value {
+                       return Err(ChannelError::Close(format!("push_msat {} was larger than funding value {}", msg.push_msat, funding_value)));
                }
                if msg.dust_limit_satoshis > msg.funding_satoshis {
-                       return Err(ChannelError::Close("Peer never wants payout outputs?"));
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
                }
                if msg.dust_limit_satoshis > msg.channel_reserve_satoshis {
-                       return Err(ChannelError::Close("Bogus; channel reserve is less than dust limit"));
+                       return Err(ChannelError::Close(format!("Bogus; channel reserve ({}) is less than dust limit ({})", msg.channel_reserve_satoshis, msg.dust_limit_satoshis)));
                }
-               if msg.htlc_minimum_msat >= (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is full channel value"));
+               let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
                Channel::<ChanSigner>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
 
-               if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period"));
+               let max_to_self_delay = u16::min(config.peer_channel_config_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if msg.to_self_delay > max_to_self_delay {
+                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_to_self_delay, msg.to_self_delay)));
                }
                if msg.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accpted_htlcs makes for a useless channel"));
+                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
                }
                if msg.max_accepted_htlcs > 483 {
-                       return Err(ChannelError::Close("max_accpted_htlcs > 483"));
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than 483", msg.max_accepted_htlcs)));
                }
 
                // Now check against optional parameters as set by config...
                if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis {
-                       return Err(ChannelError::Close("funding satoshis is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.peer_channel_config_limits.min_funding_satoshis)));
                }
                if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
+                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.peer_channel_config_limits.max_htlc_minimum_msat)));
                }
                if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat)));
                }
                if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.peer_channel_config_limits.max_channel_reserve_satoshis)));
                }
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
                if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
-                       return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the user specified limit ({})", msg.dust_limit_satoshis, config.peer_channel_config_limits.min_dust_limit_satoshis)));
                }
                if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
-                       return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the user specified limit ({})", msg.dust_limit_satoshis, config.peer_channel_config_limits.max_dust_limit_satoshis)));
                }
 
                // Convert things into internal flags and prep our state:
@@ -647,7 +651,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let their_announce = if (msg.channel_flags & 1) == 1 { true } else { false };
                if config.peer_channel_config_limits.force_announced_channel_preference {
                        if local_config.announced_channel != their_announce {
-                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours"));
+                               return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
                        }
                }
                // we either accept their preference or the preferences match
@@ -658,26 +662,27 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let our_dust_limit_satoshis = Channel::<ChanSigner>::derive_our_dust_limit_satoshis(background_feerate);
                let remote_channel_reserve_satoshis = Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(msg.funding_satoshis);
                if remote_channel_reserve_satoshis < our_dust_limit_satoshis {
-                       return Err(ChannelError::Close("Suitable channel reserve not found. aborting"));
+                       return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). our_dust_limit_satoshis is ({}).", remote_channel_reserve_satoshis, our_dust_limit_satoshis)));
                }
                if msg.channel_reserve_satoshis < our_dust_limit_satoshis {
-                       return Err(ChannelError::Close("channel_reserve_satoshis too small"));
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is smaller than our dust limit ({})", msg.channel_reserve_satoshis, our_dust_limit_satoshis)));
                }
                if remote_channel_reserve_satoshis < msg.dust_limit_satoshis {
-                       return Err(ChannelError::Close("Dust limit too high for the channel reserve we require the remote to keep"));
+                       return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, remote_channel_reserve_satoshis)));
                }
 
                // check if the funder's amount for the initial commitment tx is sufficient
                // for full fee payment
                let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
-               if funders_amount_msat < background_feerate as u64 * COMMITMENT_TX_BASE_WEIGHT {
-                       return Err(ChannelError::Close("Insufficient funding amount for initial commitment"));
+               let lower_limit = background_feerate as u64 * COMMITMENT_TX_BASE_WEIGHT;
+               if funders_amount_msat < lower_limit {
+                       return Err(ChannelError::Close(format!("Insufficient funding amount ({}) for initial commitment. Must be at least {}", funders_amount_msat, lower_limit)));
                }
 
                let to_local_msat = msg.push_msat;
                let to_remote_msat = funders_amount_msat - background_feerate as u64 * COMMITMENT_TX_BASE_WEIGHT;
                if to_local_msat <= msg.channel_reserve_satoshis * 1000 && to_remote_msat <= remote_channel_reserve_satoshis * 1000 {
-                       return Err(ChannelError::Close("Insufficient funding amount for initial commitment"));
+                       return Err(ChannelError::Close("Insufficient funding amount for initial commitment".to_owned()));
                }
 
                let their_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
@@ -691,12 +696,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                None
                                        // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
                                        } else {
-                                               return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
+                                               return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: ({})", script.to_bytes().to_hex())));
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                                &OptionalField::Absent => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
+                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
                } else { None };
@@ -1123,7 +1128,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint;
                let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
 
-               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys"))
+               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &their_pubkeys.revocation_basepoint, &their_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned()))
        }
 
        #[inline]
@@ -1137,7 +1142,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let htlc_basepoint = &self.local_keys.pubkeys().htlc_basepoint;
                let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
 
-               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys"))
+               Ok(secp_check!(TxCreationKeys::new(&self.secp_ctx, &self.their_cur_commitment_point.unwrap(), &their_pubkeys.delayed_payment_basepoint, &their_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned()))
        }
 
        /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
@@ -1200,7 +1205,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        }
                }
                if pending_idx == std::usize::MAX {
-                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
+                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
                }
 
                // Now update local state:
@@ -1310,14 +1315,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                        },
                                        _ => {
                                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                                               return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
+                                               return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
                                        }
                                }
                                pending_idx = idx;
                        }
                }
                if pending_idx == std::usize::MAX {
-                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
+                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
                }
 
                // Now update local state:
@@ -1327,13 +1332,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
                                                        debug_assert!(false, "Tried to fail an HTLC that was already fulfilled");
-                                                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
+                                                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
                                                if htlc_id_arg == htlc_id {
                                                        debug_assert!(false, "Tried to fail an HTLC that was already failed");
-                                                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID"));
+                                                       return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
                                                }
                                        },
                                        _ => {}
@@ -1363,60 +1368,63 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError> {
                // Check sanity of message fields:
                if !self.channel_outbound {
-                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer"));
+                       return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
                }
                if self.channel_state != ChannelState::OurInitSent as u32 {
-                       return Err(ChannelError::Close("Got an accept_channel message at a strange time"));
+                       return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
                }
                if msg.dust_limit_satoshis > 21000000 * 100000000 {
-                       return Err(ChannelError::Close("Peer never wants payout outputs?"));
+                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
                }
                if msg.channel_reserve_satoshis > self.channel_value_satoshis {
-                       return Err(ChannelError::Close("Bogus channel_reserve_satoshis"));
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.channel_value_satoshis)));
                }
                if msg.dust_limit_satoshis > msg.channel_reserve_satoshis {
-                       return Err(ChannelError::Close("Bogus channel_reserve and dust_limit"));
+                       return Err(ChannelError::Close(format!("Bogus channel_reserve ({}) and dust_limit ({})", msg.channel_reserve_satoshis, msg.dust_limit_satoshis)));
                }
                if msg.channel_reserve_satoshis < self.our_dust_limit_satoshis {
-                       return Err(ChannelError::Close("Peer never wants payout outputs?"));
+                       return Err(ChannelError::Close(format!("Peer never wants payout outputs? channel_reserve_satoshis was ({}). our_dust_limit is ({})", msg.channel_reserve_satoshis, self.our_dust_limit_satoshis)));
                }
-               if msg.dust_limit_satoshis > Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(self.channel_value_satoshis) {
-                       return Err(ChannelError::Close("Dust limit is bigger than our channel reverse"));
+               let remote_reserve = Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(self.channel_value_satoshis);
+               if msg.dust_limit_satoshis > remote_reserve {
+                       return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, remote_reserve)));
                }
-               if msg.htlc_minimum_msat >= (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000 {
-                       return Err(ChannelError::Close("Minimum htlc value is full channel value"));
+               let full_channel_value_msat = (self.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
+               if msg.htlc_minimum_msat >= full_channel_value_msat {
+                       return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
                }
-               if msg.to_self_delay > config.peer_channel_config_limits.their_to_self_delay || msg.to_self_delay > MAX_LOCAL_BREAKDOWN_TIMEOUT {
-                       return Err(ChannelError::Close("They wanted our payments to be delayed by a needlessly long period"));
+               let max_delay_acceptable = u16::min(config.peer_channel_config_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+               if msg.to_self_delay > max_delay_acceptable {
+                       return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
                }
                if msg.max_accepted_htlcs < 1 {
-                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel"));
+                       return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
                }
                if msg.max_accepted_htlcs > 483 {
-                       return Err(ChannelError::Close("max_accepted_htlcs > 483"));
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than 483", msg.max_accepted_htlcs)));
                }
 
                // Now check against optional parameters as set by config...
                if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
-                       return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
+                       return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.peer_channel_config_limits.max_htlc_minimum_msat)));
                }
                if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat)));
                }
                if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
-                       return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
+                       return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.peer_channel_config_limits.max_channel_reserve_satoshis)));
                }
                if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
-                       return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
                }
                if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
-                       return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the user specified limit ({})", msg.dust_limit_satoshis, config.peer_channel_config_limits.min_dust_limit_satoshis)));
                }
                if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
-                       return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
+                       return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the user specified limit ({})", msg.dust_limit_satoshis, config.peer_channel_config_limits.max_dust_limit_satoshis)));
                }
                if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
-                       return Err(ChannelError::Close("We consider the minimum depth to be unreasonably large"));
+                       return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", config.peer_channel_config_limits.max_minimum_depth, msg.minimum_depth)));
                }
 
                let their_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
@@ -1430,12 +1438,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                                None
                                        // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
                                        } else {
-                                               return Err(ChannelError::Close("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format"));
+                                               return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. scriptpubkey: ({})", script.to_bytes().to_hex())));
                                        }
                                },
                                // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
                                &OptionalField::Absent => {
-                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out"));
+                                       return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
                                }
                        }
                } else { None };
@@ -1476,14 +1484,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                // They sign the "local" commitment transaction...
                log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.their_funding_pubkey().serialize()), encode::serialize_hex(&local_initial_commitment_tx), log_bytes!(local_sighash[..]), encode::serialize_hex(&funding_script));
-               secp_check!(self.secp_ctx.verify(&local_sighash, &sig, self.their_funding_pubkey()), "Invalid funding_created signature from peer");
+               secp_check!(self.secp_ctx.verify(&local_sighash, &sig, self.their_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
 
                let localtx = LocalCommitmentTransaction::new_missing_local_sig(local_initial_commitment_tx, sig.clone(), &self.local_keys.pubkeys().funding_pubkey, self.their_funding_pubkey(), local_keys, self.feerate_per_kw, Vec::new());
 
                let remote_keys = self.build_remote_transaction_keys()?;
                let remote_initial_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, false, self.feerate_per_kw, logger).0;
                let remote_signature = self.local_keys.sign_remote_commitment(self.feerate_per_kw, &remote_initial_commitment_tx, &remote_keys, &Vec::new(), self.our_to_self_delay, &self.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed"))?.0;
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
 
                // We sign the "remote" commitment transaction, allowing them to broadcast the tx if they wish.
                Ok((remote_initial_commitment_tx, localtx, remote_signature))
@@ -1495,13 +1503,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError> where L::Target: Logger {
                if self.channel_outbound {
-                       return Err(ChannelError::Close("Received funding_created for an outbound channel?"));
+                       return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
                }
                if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
                        // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
                        // remember the channel, so it's safe to just send an error_message here and drop the
                        // channel.
-                       return Err(ChannelError::Close("Received funding_created after we got the channel!"));
+                       return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
                }
                if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
@@ -1558,10 +1566,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// If this call is successful, broadcast the funding transaction (and not before!)
        pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, logger: &L) -> Result<ChannelMonitor<ChanSigner>, ChannelError> where L::Target: Logger {
                if !self.channel_outbound {
-                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?"));
+                       return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
                if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
-                       return Err(ChannelError::Close("Received funding_signed in strange state!"));
+                       return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
                }
                if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
                                self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
@@ -1582,7 +1590,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                // They sign the "local" commitment transaction, allowing us to broadcast the tx if we wish.
                if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, their_funding_pubkey) {
-                       return Err(ChannelError::Close("Invalid funding_signed signature from peer"));
+                       return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
                }
 
                let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
@@ -1619,7 +1627,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish".to_owned()));
                }
 
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
@@ -1637,12 +1645,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
                                                      (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32)) {
                        if self.their_cur_commitment_point != Some(msg.next_per_commitment_point) {
-                               return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point"));
+                               return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point".to_owned()));
                        }
                        // They probably disconnected/reconnected and re-sent the funding_locked, which is required
                        return Ok(());
                } else {
-                       return Err(ChannelError::Close("Peer sent a funding_locked at a strange time"));
+                       return Err(ChannelError::Close("Peer sent a funding_locked at a strange time".to_owned()));
                }
 
                self.their_prev_commitment_point = self.their_cur_commitment_point;
@@ -1765,28 +1773,28 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
                let remote_sent_shutdown = (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32);
                if remote_sent_shutdown {
-                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state"));
+                       return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
                }
                if msg.amount_msat > self.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel"));
+                       return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
                }
                if msg.amount_msat == 0 {
-                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC"));
+                       return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
                }
                if msg.amount_msat < self.our_htlc_minimum_msat {
-                       return Err(ChannelError::Close("Remote side tried to send less than our minimum HTLC value"));
+                       return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.our_htlc_minimum_msat, msg.amount_msat)));
                }
 
                let (inbound_htlc_count, htlc_inbound_value_msat) = self.get_inbound_pending_htlc_stats();
                if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
-                       return Err(ChannelError::Close("Remote tried to push more than our max accepted HTLCs"));
+                       return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS)));
                }
-               // Check our_max_htlc_value_in_flight_msat
-               if htlc_inbound_value_msat + msg.amount_msat > Channel::<ChanSigner>::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis) {
-                       return Err(ChannelError::Close("Remote HTLC add would put them over our max HTLC value"));
+               let our_max_htlc_value_in_flight_msat = Channel::<ChanSigner>::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis);
+               if htlc_inbound_value_msat + msg.amount_msat > our_max_htlc_value_in_flight_msat {
+                       return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", our_max_htlc_value_in_flight_msat)));
                }
                // Check remote_channel_reserve_satoshis (we're getting paid, so they have to at least meet
                // the reserve_satoshis we told them to always have as direct payment so that they lose
@@ -1814,7 +1822,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let pending_remote_value_msat =
                        self.channel_value_satoshis * 1000 - pending_value_to_self_msat;
                if pending_remote_value_msat < msg.amount_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds"));
+                       return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
                }
 
                // Check that the remote can afford to pay for this HTLC on-chain at the current
@@ -1824,13 +1832,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        self.next_remote_commit_tx_fee_msat(1)
                };
                if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees"));
+                       return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
                };
 
                let chan_reserve_msat =
                        Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(self.channel_value_satoshis) * 1000;
                if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < chan_reserve_msat {
-                       return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value"));
+                       return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
                }
 
                if !self.channel_outbound {
@@ -1854,15 +1862,15 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // +1 for this HTLC.
                        let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(1);
                        if self.value_to_self_msat < self.local_channel_reserve_satoshis * 1000 + local_commit_tx_fee_msat {
-                               return Err(ChannelError::Close("Cannot receive value that would put us under local channel reserve value"));
+                               return Err(ChannelError::Close("Cannot receive value that would put us under local channel reserve value".to_owned()));
                        }
                }
 
                if self.next_remote_htlc_id != msg.htlc_id {
-                       return Err(ChannelError::Close("Remote skipped HTLC ID"));
+                       return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.next_remote_htlc_id)));
                }
                if msg.cltv_expiry >= 500000000 {
-                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height"));
+                       return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
                }
 
                if self.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
@@ -1892,30 +1900,30 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                        None => {},
                                        Some(payment_hash) =>
                                                if payment_hash != htlc.payment_hash {
-                                                       return Err(ChannelError::Close("Remote tried to fulfill HTLC with an incorrect preimage"));
+                                                       return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
                                                }
                                };
                                match htlc.state {
                                        OutboundHTLCState::LocalAnnounced(_) =>
-                                               return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC before it had been committed")),
+                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
                                        OutboundHTLCState::Committed => {
                                                htlc.state = OutboundHTLCState::RemoteRemoved(fail_reason);
                                        },
                                        OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
-                                               return Err(ChannelError::Close("Remote tried to fulfill/fail HTLC that they'd already fulfilled/failed")),
+                                               return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
                                }
                                return Ok(&htlc.source);
                        }
                }
-               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find"))
+               Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
        }
 
        pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state"));
+                       return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner());
@@ -1924,10 +1932,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state"));
+                       return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -1936,10 +1944,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
        pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state"));
+                       return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
                }
 
                self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
@@ -1951,13 +1959,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                L::Target: Logger
        {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state")));
+                       return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish")));
+                       return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())));
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
-                       return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds")));
+                       return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())));
                }
 
                let funding_script = self.get_funding_redeemscript();
@@ -1981,7 +1989,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let local_sighash = hash_to_message!(&bip143::SighashComponents::new(&local_commitment_tx.0).sighash_all(&local_commitment_tx.0.input[0], &funding_script, self.channel_value_satoshis)[..]);
                log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.their_funding_pubkey().serialize()), encode::serialize_hex(&local_commitment_tx.0), log_bytes!(local_sighash[..]), encode::serialize_hex(&funding_script));
                if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey()) {
-                       return Err((None, ChannelError::Close("Invalid commitment tx signature from peer")));
+                       return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
                }
 
                //If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction
@@ -1991,12 +1999,12 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        let remote_reserve_we_require = Channel::<ChanSigner>::get_remote_channel_reserve_satoshis(self.channel_value_satoshis);
                        if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + remote_reserve_we_require {
-                               return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee")));
+                               return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())));
                        }
                }
 
                if msg.htlc_signatures.len() != local_commitment_tx.1 {
-                       return Err((None, ChannelError::Close("Got wrong number of HTLC signatures from remote")));
+                       return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), local_commitment_tx.1))));
                }
 
                // TODO: Merge these two, sadly they are currently both required to be passed separately to
@@ -2010,7 +2018,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                let htlc_sighash = hash_to_message!(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]);
                                log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(local_keys.b_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript));
                                if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key) {
-                                       return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer")));
+                                       return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
                                }
                                htlcs_without_source.push((htlc.clone(), Some(msg.htlc_signatures[idx])));
                                htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
@@ -2090,7 +2098,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        }
                        // TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
                        // re-send the message on restoration)
-                       return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA")));
+                       return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned())));
                }
 
                let (our_commitment_signed, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
@@ -2248,18 +2256,18 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                        L::Target: Logger,
        {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state"));
+                       return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
-                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds"));
+                       return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
                }
 
                if let Some(their_prev_commitment_point) = self.their_prev_commitment_point {
-                       if PublicKey::from_secret_key(&self.secp_ctx, &secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret")) != their_prev_commitment_point {
-                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey"));
+                       if PublicKey::from_secret_key(&self.secp_ctx, &secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned())) != their_prev_commitment_point {
+                               return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
                        }
                }
 
@@ -2271,11 +2279,11 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // lot of work, and there's some chance this is all a misunderstanding anyway.
                        // We have to do *something*, though, since our signer may get mad at us for otherwise
                        // jumping a remote commitment number, so best to just force-close and move on.
-                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack"));
+                       return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
                }
 
                self.commitment_secrets.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
-                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one"))?;
+                       .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
                self.latest_monitor_update_id += 1;
                let mut monitor_update = ChannelMonitorUpdate {
                        update_id: self.latest_monitor_update_id,
@@ -2647,10 +2655,10 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                where F::Target: FeeEstimator
        {
                if self.channel_outbound {
-                       return Err(ChannelError::Close("Non-funding remote tried to update channel fee"));
+                       return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
                }
                Channel::<ChanSigner>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
                self.pending_update_fee = Some(msg.feerate_per_kw);
@@ -2732,23 +2740,23 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
                        // almost certainly indicates we are going to end up out-of-sync in some way, so we
                        // just close here instead of trying to recover.
-                       return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect"));
+                       return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
                }
 
                if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
                        msg.next_local_commitment_number == 0 {
-                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent a garbage channel_reestablish".to_owned()));
                }
 
                if msg.next_remote_commitment_number > 0 {
                        match msg.data_loss_protect {
                                OptionalField::Present(ref data_loss) => {
                                        if self.local_keys.commitment_secret(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret {
-                                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
+                                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
                                        }
                                        if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
                                                return Err(ChannelError::CloseDelayBroadcast(
-                                                       "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"
+                                                       "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
                                                ));
                                        }
                                },
@@ -2772,7 +2780,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
                                        self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                if msg.next_remote_commitment_number != 0 {
-                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet"));
+                                       return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned()));
                                }
                                // Short circuit the whole handler as there is nothing we can resend them
                                return Ok((None, None, None, None, RAACommitmentOrder::CommitmentFirst, shutdown_msg));
@@ -2799,7 +2807,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                Some(self.get_last_revoke_and_ack())
                        }
                } else {
-                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction"));
+                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
                };
 
                // We increment cur_remote_commitment_transaction_number only upon receipt of
@@ -2853,7 +2861,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                        return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update(logger)), None, self.resend_order.clone(), shutdown_msg));
                } else {
-                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
+                       return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()));
                }
        }
 
@@ -2891,17 +2899,17 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                where F::Target: FeeEstimator
        {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
                }
                if self.channel_state < ChannelState::FundingSent as u32 {
                        // Spec says we should fail the connection, not the channel, but that's nonsense, there
                        // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
                        // can do that via error message without getting a connection fail anyway...
-                       return Err(ChannelError::Close("Peer sent shutdown pre-funding generation"));
+                       return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
                }
                for htlc in self.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
-                               return Err(ChannelError::Close("Got shutdown with remote pending HTLCs"));
+                               return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
                        }
                }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
@@ -2909,17 +2917,17 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                // BOLT 2 says we must only send a scriptpubkey of certain standard forms, which are up to
                // 34 bytes in length, so don't let the remote peer feed us some super fee-heavy script.
                if self.channel_outbound && msg.scriptpubkey.len() > 34 {
-                       return Err(ChannelError::Close("Got shutdown_scriptpubkey of absurd length from remote peer"));
+                       return Err(ChannelError::Close(format!("Got shutdown_scriptpubkey ({}) of absurd length from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
                }
 
                //Check shutdown_scriptpubkey form as BOLT says we must
                if !msg.scriptpubkey.is_p2pkh() && !msg.scriptpubkey.is_p2sh() && !msg.scriptpubkey.is_v0_p2wpkh() && !msg.scriptpubkey.is_v0_p2wsh() {
-                       return Err(ChannelError::Close("Got a nonstandard scriptpubkey from remote peer"));
+                       return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
                }
 
                if self.their_shutdown_scriptpubkey.is_some() {
                        if Some(&msg.scriptpubkey) != self.their_shutdown_scriptpubkey.as_ref() {
-                               return Err(ChannelError::Close("Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"));
+                               return Err(ChannelError::Close(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
                        }
                } else {
                        self.their_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
@@ -2989,22 +2997,22 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                where F::Target: FeeEstimator
        {
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown"));
+                       return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
                }
                if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
-                       return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish"));
+                       return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
                }
                if !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() {
-                       return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs"));
+                       return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
                }
                if msg.fee_satoshis > 21000000 * 10000000 { //this is required to stop potential overflow in build_closing_transaction
-                       return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee"));
+                       return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
                }
 
                let funding_redeemscript = self.get_funding_redeemscript();
                let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
                if used_total_fee != msg.fee_satoshis {
-                       return Err(ChannelError::Close("Remote sent us a closing_signed with a fee greater than the value they can claim"));
+                       return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee greater than the value they can claim. Fee in message: {}", msg.fee_satoshis)));
                }
                let mut sighash = hash_to_message!(&bip143::SighashComponents::new(&closing_tx).sighash_all(&closing_tx.input[0], &funding_redeemscript, self.channel_value_satoshis)[..]);
 
@@ -3017,7 +3025,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                // limits, so check for that case by re-checking the signature here.
                                closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
                                sighash = hash_to_message!(&bip143::SighashComponents::new(&closing_tx).sighash_all(&closing_tx.input[0], &funding_redeemscript, self.channel_value_satoshis)[..]);
-                               secp_check!(self.secp_ctx.verify(&sighash, &msg.signature, self.their_funding_pubkey()), "Invalid closing tx signature from peer");
+                               secp_check!(self.secp_ctx.verify(&sighash, &msg.signature, self.their_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
                        },
                };
 
@@ -3036,7 +3044,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                                let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * closing_tx_max_weight / 1000, false);
                                let our_sig = self.local_keys
                                        .sign_closing_transaction(&closing_tx, &self.secp_ctx)
-                                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction"))?;
+                                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                                self.last_sent_closing_fee = Some(($new_feerate, used_total_fee, our_sig.clone()));
                                return Ok((Some(msgs::ClosingSigned {
                                        channel_id: self.channel_id,
@@ -3052,7 +3060,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        if (proposed_sat_per_kw as u32) > our_max_feerate {
                                if let Some((last_feerate, _, _)) = self.last_sent_closing_fee {
                                        if our_max_feerate <= last_feerate {
-                                               return Err(ChannelError::Close("Unable to come to consensus about closing feerate, remote wanted something higher than our Normal feerate"));
+                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something higher ({}) than our Normal feerate ({})", last_feerate, our_max_feerate)));
                                        }
                                }
                                propose_new_feerate!(our_max_feerate);
@@ -3062,7 +3070,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        if (proposed_sat_per_kw as u32) < our_min_feerate {
                                if let Some((last_feerate, _, _)) = self.last_sent_closing_fee {
                                        if our_min_feerate >= last_feerate {
-                                               return Err(ChannelError::Close("Unable to come to consensus about closing feerate, remote wanted something lower than our Background feerate"));
+                                               return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wanted something lower ({}) than our Background feerate ({}).", last_feerate, our_min_feerate)));
                                        }
                                }
                                propose_new_feerate!(our_min_feerate);
@@ -3071,7 +3079,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
 
                let our_sig = self.local_keys
                        .sign_closing_transaction(&closing_tx, &self.secp_ctx)
-                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction"))?;
+                       .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
                self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &our_sig);
 
                self.channel_state = ChannelState::ShutdownComplete as u32;
@@ -3513,7 +3521,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                let remote_keys = self.build_remote_transaction_keys()?;
                let remote_initial_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, false, self.feerate_per_kw, logger).0;
                Ok(self.local_keys.sign_remote_commitment(self.feerate_per_kw, &remote_initial_commitment_tx, &remote_keys, &Vec::new(), self.our_to_self_delay, &self.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed"))?.0)
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
        }
 
        /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
@@ -3571,13 +3579,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// https://github.com/lightningnetwork/lightning-rfc/issues/468
        pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: BlockHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError> {
                if !self.config.announced_channel {
-                       return Err(ChannelError::Ignore("Channel is not available for public announcements"));
+                       return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
                }
                if self.channel_state & (ChannelState::ChannelFunded as u32) == 0 {
-                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement until the channel funding has been locked"));
+                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement until the channel funding has been locked".to_owned()));
                }
                if (self.channel_state & (ChannelState::LocalShutdownSent as u32 | ChannelState::ShutdownComplete as u32)) != 0 {
-                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement once the channel is closing"));
+                       return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement once the channel is closing".to_owned()));
                }
 
                let were_node_one = our_node_id.serialize()[..] < self.their_node_id.serialize()[..];
@@ -3594,7 +3602,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                };
 
                let sig = self.local_keys.sign_channel_announcement(&msg, &self.secp_ctx)
-                       .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement"))?;
+                       .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
 
                Ok((msg, sig))
        }
@@ -3662,19 +3670,19 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        /// If an Err is returned, it's a ChannelError::Ignore!
        pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
                if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
-                       return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down"));
+                       return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
                }
-
-               if amount_msat > self.channel_value_satoshis * 1000 {
-                       return Err(ChannelError::Ignore("Cannot send more than the total value of the channel"));
+               let channel_total_msat = self.channel_value_satoshis * 1000;
+               if amount_msat > channel_total_msat {
+                       return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
                }
 
                if amount_msat == 0 {
-                       return Err(ChannelError::Ignore("Cannot send 0-msat HTLC"));
+                       return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
                }
 
                if amount_msat < self.their_htlc_minimum_msat {
-                       return Err(ChannelError::Ignore("Cannot send less than their minimum HTLC value"));
+                       return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.their_htlc_minimum_msat)));
                }
 
                if (self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
@@ -3684,16 +3692,16 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // disconnected during the time the previous hop was doing the commitment dance we may
                        // end up getting here after the forwarding delay. In any case, returning an
                        // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
-                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update"));
+                       return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected/frozen for channel monitor update".to_owned()));
                }
 
                let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats();
                if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
-                       return Err(ChannelError::Ignore("Cannot push more than their max accepted HTLCs"));
+                       return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.their_max_accepted_htlcs)));
                }
                // Check their_max_htlc_value_in_flight_msat
                if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
+                       return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.their_max_htlc_value_in_flight_msat)));
                }
 
                if !self.channel_outbound {
@@ -3704,13 +3712,13 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        // 1 additional HTLC corresponding to this HTLC.
                        let remote_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(1);
                        if remote_balance_msat < remote_chan_reserve_msat + remote_commit_tx_fee_msat {
-                               return Err(ChannelError::Ignore("Cannot send value that would put them under remote channel reserve value"));
+                               return Err(ChannelError::Ignore("Cannot send value that would put them under remote channel reserve value".to_owned()));
                        }
                }
 
                let pending_value_to_self_msat = self.value_to_self_msat - htlc_outbound_value_msat;
                if pending_value_to_self_msat < amount_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would overdraw remaining funds"));
+                       return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, pending_value_to_self_msat)));
                }
 
                // The `+1` is for the HTLC currently being added to the commitment tx and
@@ -3719,14 +3727,14 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        2 * self.next_local_commit_tx_fee_msat(1 + 1)
                } else { 0 };
                if pending_value_to_self_msat - amount_msat < local_commit_tx_fee_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would not leave enough to pay for fees"));
+                       return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, local_commit_tx_fee_msat)));
                }
 
                // Check self.local_channel_reserve_satoshis (the amount we must keep as
                // reserve for the remote to have something to claim if we misbehave)
                let chan_reserve_msat = self.local_channel_reserve_satoshis * 1000;
                if pending_value_to_self_msat - amount_msat - local_commit_tx_fee_msat < chan_reserve_msat {
-                       return Err(ChannelError::Ignore("Cannot send value that would put us under local channel reserve value"));
+                       return Err(ChannelError::Ignore(format!("Cannot send value that would put us under local channel reserve value ({})", chan_reserve_msat)));
                }
 
                // Now update local state:
@@ -3866,7 +3874,7 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
                        }
 
                        let res = self.local_keys.sign_remote_commitment(feerate_per_kw, &remote_commitment_tx.0, &remote_keys, &htlcs, self.our_to_self_delay, &self.secp_ctx)
-                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed"))?;
+                               .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
                        signature = res.0;
                        htlc_signatures = res.1;
 
@@ -3910,20 +3918,20 @@ impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
        pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, PaymentHash)>), APIError> {
                for htlc in self.pending_outbound_htlcs.iter() {
                        if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
-                               return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first"});
+                               return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
                        }
                }
                if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
                        if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
-                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress"});
+                               return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
                        }
                        else if (self.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
-                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote"});
+                               return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
                        }
                }
                assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
                if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) != 0 {
-                       return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?"});
+                       return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
 
                let our_closing_script = self.get_closing_scriptpubkey();
index 28c6430bbc2e35702720ac0fa9e78ac34fbfd1e9..d668cdd59e0651a32b9d8a246fca0258d3e810fb 100644 (file)
@@ -51,6 +51,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::time::Duration;
 use std::marker::{Sync, Send};
 use std::ops::Deref;
+use bitcoin::hashes::hex::ToHex;
 
 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
 //
@@ -192,14 +193,14 @@ struct MsgHandleErrInternal {
 }
 impl MsgHandleErrInternal {
        #[inline]
-       fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
+       fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
                Self {
                        err: LightningError {
-                               err,
+                               err: err.clone(),
                                action: msgs::ErrorAction::SendErrorMessage {
                                        msg: msgs::ErrorMessage {
                                                channel_id,
-                                               data: err.to_string()
+                                               data: err
                                        },
                                },
                        },
@@ -207,7 +208,7 @@ impl MsgHandleErrInternal {
                }
        }
        #[inline]
-       fn ignore_no_close(err: &'static str) -> Self {
+       fn ignore_no_close(err: String) -> Self {
                Self {
                        err: LightningError {
                                err,
@@ -221,14 +222,14 @@ impl MsgHandleErrInternal {
                Self { err, shutdown_finish: None }
        }
        #[inline]
-       fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+       fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
                Self {
                        err: LightningError {
-                               err,
+                               err: err.clone(),
                                action: msgs::ErrorAction::SendErrorMessage {
                                        msg: msgs::ErrorMessage {
                                                channel_id,
-                                               data: err.to_string()
+                                               data: err
                                        },
                                },
                        },
@@ -244,20 +245,20 @@ impl MsgHandleErrInternal {
                                        action: msgs::ErrorAction::IgnoreError,
                                },
                                ChannelError::Close(msg) => LightningError {
-                                       err: msg,
+                                       err: msg.clone(),
                                        action: msgs::ErrorAction::SendErrorMessage {
                                                msg: msgs::ErrorMessage {
                                                        channel_id,
-                                                       data: msg.to_string()
+                                                       data: msg
                                                },
                                        },
                                },
                                ChannelError::CloseDelayBroadcast(msg) => LightningError {
-                                       err: msg,
+                                       err: msg.clone(),
                                        action: msgs::ErrorAction::SendErrorMessage {
                                                msg: msgs::ErrorMessage {
                                                        channel_id,
-                                                       data: msg.to_string()
+                                                       data: msg
                                                },
                                        },
                                },
@@ -632,7 +633,7 @@ macro_rules! handle_monitor_err {
                                // splitting hairs we'd prefer to claim payments that were to us, but we haven't
                                // given up the preimage yet, so might as well just wait until the payment is
                                // retried, avoiding the on-chain fees.
-                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
+                               let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
                                res
                        },
                        ChannelMonitorUpdateErr::TemporaryFailure => {
@@ -655,7 +656,7 @@ macro_rules! handle_monitor_err {
                                        debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
                                }
                                $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
-                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
+                               Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
                        },
                }
        }
@@ -757,7 +758,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
        pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
                if channel_value_satoshis < 1000 {
-                       return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
+                       return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
                }
 
                let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
@@ -769,7 +770,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(channel.channel_id()) {
                        hash_map::Entry::Occupied(_) => {
                                if cfg!(feature = "fuzztarget") {
-                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
+                                       return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
                                } else {
                                        panic!("RNG is bad???");
                                }
@@ -855,7 +856,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (failed_htlcs, Some(chan_entry.remove_entry().1))
                                        } else { (failed_htlcs, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
+                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
                        }
                };
                for htlc_source in failed_htlcs.drain(..) {
@@ -1201,7 +1202,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        /// May be called with channel_state already locked!
        fn get_channel_update(&self, chan: &Channel<ChanSigner>) -> Result<msgs::ChannelUpdate, LightningError> {
                let short_channel_id = match chan.get_short_channel_id() {
-                       None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}),
+                       None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
                        Some(id) => id,
                };
 
@@ -1246,7 +1247,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let err: Result<(), _> = loop {
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
-                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
+                               None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
                                Some(id) => id.clone(),
                        };
 
@@ -1257,7 +1258,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
                                        }
                                        if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
+                                               return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
                                        }
                                        break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
                                                path: path.clone(),
@@ -2213,7 +2214,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
 
        fn internal_open_channel(&self, their_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
                if msg.chain_hash != self.genesis_hash {
-                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
+                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
                }
 
                let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_features, msg, 0, &self.default_configuration)
@@ -2221,7 +2222,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(channel.channel_id()) {
-                       hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
+                       hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())),
                        hash_map::Entry::Vacant(entry) => {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
                                        node_id: their_node_id.clone(),
@@ -2240,12 +2241,12 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.temporary_channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
                                        try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
                                        (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -2265,11 +2266,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
                                        (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
                        }
                };
                // Because we have exclusive ownership of the channel here we can release the channel_state
@@ -2281,7 +2282,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        // channel, not the temporary_channel_id. This is compatible with ourselves, but the
                                        // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
                                        // any messages referencing a previously-closed channel anyway.
-                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None));
+                                       return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id, chan.force_shutdown(true), None));
                                },
                                ChannelMonitorUpdateErr::TemporaryFailure => {
                                        // There's no problem signing a counterparty's funding transaction if our monitor
@@ -2296,7 +2297,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let channel_state = &mut *channel_state_lock;
                match channel_state.by_id.entry(funding_msg.channel_id) {
                        hash_map::Entry::Occupied(_) => {
-                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
+                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
                        },
                        hash_map::Entry::Vacant(e) => {
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
@@ -2316,7 +2317,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
                                                Ok(update) => update,
@@ -2327,7 +2328,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        }
                                        (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                let mut pending_events = self.pending_events.lock().unwrap();
@@ -2344,7 +2345,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
                                if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
@@ -2365,7 +2366,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2377,7 +2378,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = shutdown {
@@ -2399,7 +2400,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (dropped_htlcs, Some(chan_entry.remove_entry().1))
                                        } else { (dropped_htlcs, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                for htlc_source in dropped_htlcs.drain(..) {
@@ -2423,7 +2424,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id.clone()) {
                                hash_map::Entry::Occupied(mut chan_entry) => {
                                        if chan_entry.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
                                        if let Some(msg) = closing_signed {
@@ -2444,7 +2445,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                (tx, Some(chan_entry.remove_entry().1))
                                        } else { (tx, None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                if let Some(broadcast_tx) = tx {
@@ -2478,7 +2479,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
 
                                let create_pending_htlc_status = |chan: &Channel<ChanSigner>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
@@ -2521,7 +2522,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                };
                                try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2533,11 +2534,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
@@ -2550,11 +2551,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2565,16 +2566,16 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                if (msg.failure_code & 0x8000) == 0 {
-                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
+                                       let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
                                        try_chan_entry!(self, Err(chan_err), channel_state, chan);
                                }
                                try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2584,7 +2585,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
                                        match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
@@ -2626,7 +2627,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2672,7 +2673,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        match channel_state.by_id.entry(msg.channel_id) {
                                hash_map::Entry::Occupied(mut chan) => {
                                        if chan.get().get_their_node_id() != *their_node_id {
-                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                               return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                                        let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) =
@@ -2680,7 +2681,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
                                                        assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
-                                                       return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
+                                                       return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
                                                        return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures);
                                                }
@@ -2699,7 +2700,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        }
                                        (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
                                },
-                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                               hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
                for failure in pending_failures.drain(..) {
@@ -2716,11 +2717,11 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2732,10 +2733,10 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                if !chan.get().is_usable() {
-                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError}));
+                                       return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
                                }
 
                                let our_node_id = self.get_our_node_id();
@@ -2746,7 +2747,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
                                if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
                                                self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
-                                       let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature");
+                                       let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature".to_owned());
                                        try_chan_entry!(self, Err(chan_err), channel_state, chan);
                                }
 
@@ -2763,7 +2764,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                        update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
                                });
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
                Ok(())
        }
@@ -2775,7 +2776,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                match channel_state.by_id.entry(msg.channel_id) {
                        hash_map::Entry::Occupied(mut chan) => {
                                if chan.get().get_their_node_id() != *their_node_id {
-                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
                                let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
                                        try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
@@ -2834,7 +2835,7 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                }
                                Ok(())
                        },
-                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+                       hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                }
        }
 
@@ -2851,16 +2852,16 @@ impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        let channel_state = &mut *channel_state_lock;
 
                        match channel_state.by_id.entry(channel_id) {
-                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
+                               hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
                                hash_map::Entry::Occupied(mut chan) => {
                                        if !chan.get().is_outbound() {
-                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
+                                               return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
                                        }
                                        if chan.get().is_awaiting_monitor_update() {
                                                return Err(APIError::MonitorUpdateFailed);
                                        }
                                        if !chan.get().is_live() {
-                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
+                                               return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
                                        }
                                        their_node_id = chan.get().get_their_node_id();
                                        if let Some((update_fee, commitment_signed, monitor_update)) =
index 24fe127d70e771462ffdf1062a51d0f3483b8340..2c994c12c8ae48b47d019ef9b5375fb6c19ae37c 100644 (file)
@@ -972,8 +972,8 @@ pub fn route_over_limit<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_rou
        }
 
        let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
-       unwrap_send_err!(origin_node.node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-               assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
+       unwrap_send_err!(origin_node.node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+               assert!(err.contains("Cannot send value that would put us over the max HTLC value in flight our peer will accept")));
 }
 
 pub fn send_payment<'a, 'b, 'c>(origin: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64, expected_value: u64)  {
index 5e8edb3ca5e602adfac4ad94878dae7fa44e4e3a..4d2b155ce30b1ff4e1085e62df26e0debc1e6b72 100644 (file)
@@ -43,6 +43,8 @@ use bitcoin::hashes::Hash;
 use bitcoin::secp256k1::{Secp256k1, Message};
 use bitcoin::secp256k1::key::{PublicKey,SecretKey};
 
+use regex;
+
 use std::collections::{BTreeSet, HashMap, HashSet};
 use std::default::Default;
 use std::sync::{Arc, Mutex};
@@ -77,10 +79,11 @@ fn test_insane_channel_opens() {
                nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &message_mutator(open_channel_message.clone()));
                let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
                assert_eq!(msg_events.len(), 1);
+               let expected_regex = regex::Regex::new(expected_error_str).unwrap();
                if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] {
                        match action {
                                &ErrorAction::SendErrorMessage { .. } => {
-                                       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), expected_error_str.to_string(), 1);
+                                       nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1);
                                },
                                _ => panic!("unexpected event!"),
                        }
@@ -91,23 +94,23 @@ fn test_insane_channel_opens() {
        use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
 
        // Test all mutations that would make the channel open message insane
-       insane_open_helper("funding value > 2^24", |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+       insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
 
        insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
 
-       insane_open_helper("push_msat larger than funding value", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+       insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
 
        insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
 
-       insane_open_helper("Bogus; channel reserve is less than dust limit", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
+       insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
 
-       insane_open_helper("Minimum htlc value is full channel value", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+       insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
 
        insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
 
-       insane_open_helper("0 max_accpted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+       insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
 
-       insane_open_helper("max_accpted_htlcs > 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+       insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
 }
 
 #[test]
@@ -1270,7 +1273,7 @@ fn fake_network_test() {
        route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
        let events = nodes[0].node.get_and_clear_pending_msg_events();
        assert_eq!(events.len(), 0);
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+       nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
 
        //TODO: Test that routes work again here as we've been notified that the channel is full
 
@@ -1321,10 +1324,10 @@ fn holding_cell_htlc_counting() {
        {
                let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
                let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap();
-               unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable { err },
-                       assert_eq!(err, "Cannot push more than their max accepted HTLCs"));
+               unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable { ref err },
+                       assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
        }
 
        // This should also be true if we try to forward a payment.
@@ -1540,16 +1543,16 @@ fn test_basic_channel_reserve() {
        let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap();
        match err {
                PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
-                       match fails[0] {
-                               APIError::ChannelUnavailable{err} =>
-                                       assert_eq!(err, "Cannot send value that would put us under local channel reserve value"),
+                       match &fails[0] {
+                               &APIError::ChannelUnavailable{ref err} =>
+                                       assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)),
                                _ => panic!("Unexpected error variant"),
                        }
                },
                _ => panic!("Unexpected error variant"),
        }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 1);
 
        send_payment(&nodes[0], &vec![&nodes[1]], max_can_send, max_can_send);
 }
@@ -1751,7 +1754,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        };
 
        let (route, our_payment_hash, _) = get_route_and_payment_hash!(1000);
-       unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
+       unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
                assert_eq!(err, "Cannot send value that would put them under remote channel reserve value"));
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
        nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put them under remote channel reserve value".to_string(), 1);
@@ -1929,10 +1932,10 @@ fn test_channel_reserve_holding_cell_htlcs() {
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
                assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
-               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-                       assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
+               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+                       assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
        }
 
        // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
@@ -1993,8 +1996,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
        let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs;
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
-               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-                       assert_eq!(err, "Cannot send value that would put us under local channel reserve value"));
+               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+                       assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        }
 
@@ -2019,10 +2022,10 @@ fn test_channel_reserve_holding_cell_htlcs() {
        // test with outbound holding cell amount > 0
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
-               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-                       assert_eq!(err, "Cannot send value that would put us under local channel reserve value"));
+               unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+                       assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)));
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 2);
+               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 2);
        }
 
        let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
@@ -2105,16 +2108,16 @@ fn test_channel_reserve_holding_cell_htlcs() {
                let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap();
                match err {
                        PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
-                               match fails[0] {
-                                       APIError::ChannelUnavailable{err} =>
-                                               assert_eq!(err, "Cannot send value that would put us under local channel reserve value"),
+                               match &fails[0] {
+                                       &APIError::ChannelUnavailable{ref err} =>
+                                               assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)),
                                        _ => panic!("Unexpected error variant"),
                                }
                        },
                        _ => panic!("Unexpected error variant"),
                }
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 3);
+               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 3);
        }
 
        send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3, recv_value_3);
@@ -6403,10 +6406,10 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
        let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
        route.paths[0][0].fee_msat = 100;
 
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-               assert_eq!(err, "Cannot send less than their minimum HTLC value"));
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+               assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1);
 }
 
 #[test]
@@ -6423,11 +6426,11 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() {
        let logger = test_utils::TestLogger::new();
        let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
        route.paths[0][0].fee_msat = 0;
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
                assert_eq!(err, "Cannot send 0-msat HTLC"));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1);
 }
 
 #[test]
@@ -6469,8 +6472,8 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
 
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000000, 500000001, &logger).unwrap();
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::RouteError { err },
-               assert_eq!(err, "Channel CLTV overflowed?!"));
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::RouteError { ref err },
+               assert_eq!(err, &"Channel CLTV overflowed?"));
 }
 
 #[test]
@@ -6513,11 +6516,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-               assert_eq!(err, "Cannot push more than their max accepted HTLCs"));
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+               assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1);
 }
 
 #[test]
@@ -6537,11 +6540,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
        let logger = test_utils::TestLogger::new();
        let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV, &logger).unwrap();
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err },
-               assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
+               assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
+       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1);
 
        send_payment(&nodes[0], &[&nodes[1]], max_in_flight, max_in_flight);
 }
@@ -6573,7 +6576,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
-       assert_eq!(err_msg.data, "Remote side tried to send less than our minimum HTLC value");
+       assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 }
 
@@ -6653,7 +6656,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
 
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
-       assert_eq!(err_msg.data, "Remote tried to push more than our max accepted HTLCs");
+       assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 }
 
@@ -6678,7 +6681,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
 
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
-       assert_eq!(err_msg.data,"Remote HTLC add would put them over our max HTLC value");
+       assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 }
 
@@ -6752,7 +6755,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
 
        assert!(nodes[1].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[1], true).unwrap();
-       assert_eq!(err_msg.data, "Remote skipped HTLC ID");
+       assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[1], 1);
 }
 
@@ -6785,7 +6788,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
-       assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 }
 
@@ -6818,7 +6821,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
-       assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 }
 
@@ -6852,7 +6855,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment()
 
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
-       assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed");
+       assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 }
 
@@ -6934,7 +6937,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
 
        assert!(nodes[0].node.list_channels().is_empty());
        let err_msg = check_closed_broadcast!(nodes[0], true).unwrap();
-       assert_eq!(err_msg.data, "Remote tried to fulfill HTLC with an incorrect preimage");
+       assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str()));
        check_added_monitors!(nodes[0], 1);
 }
 
@@ -7328,7 +7331,7 @@ fn test_upfront_shutdown_script() {
        node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
        // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that  we disconnect peer
        nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
-       assert_eq!(check_closed_broadcast!(nodes[2], true).unwrap().data, "Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
+    assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
        check_added_monitors!(nodes[2], 1);
 
        // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
@@ -7409,7 +7412,7 @@ fn test_user_configurable_csv_delay() {
        let keys_manager: Arc<KeysInterface<ChanKeySigner = EnforcingChannelKeys>> = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet));
        if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
                match error {
-                       APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
+                       APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
                        _ => panic!("Unexpected event"),
                }
        } else { assert!(false) }
@@ -7420,7 +7423,7 @@ fn test_user_configurable_csv_delay() {
        open_channel.to_self_delay = 200;
        if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
                match error {
-                       ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
+                       ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str()));  },
                        _ => panic!("Unexpected event"),
                }
        } else { assert!(false); }
@@ -7434,7 +7437,7 @@ fn test_user_configurable_csv_delay() {
        if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] {
                match action {
                        &ErrorAction::SendErrorMessage { ref msg } => {
-                               assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
+                               assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str()));
                        },
                        _ => { assert!(false); }
                }
@@ -7446,7 +7449,7 @@ fn test_user_configurable_csv_delay() {
        open_channel.to_self_delay = 200;
        if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
                match error {
-                       ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); },
+                       ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
                        _ => panic!("Unexpected event"),
                }
        } else { assert!(false); }
index 24e8120de6cc4e14c32fd07d95903621b1f9a2eb..bd5d23504b19961b63224c301853ce26971fd68e 100644 (file)
@@ -461,7 +461,7 @@ pub enum ErrorAction {
 /// An Err type for failure to process messages.
 pub struct LightningError {
        /// A human-readable message describing the error
-       pub err: &'static str,
+       pub err: String,
        /// The action which should be taken against the offending peer.
        pub action: ErrorAction,
 }
@@ -701,7 +701,7 @@ impl fmt::Display for DecodeError {
 
 impl fmt::Debug for LightningError {
        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-               f.write_str(self.err)
+               f.write_str(self.err.as_str())
        }
 }
 
index 36a6ccabc070bdca0e9b68a09ce236e60569cc62..08e9e6a88cecb6f30bd6824a41da493b243f72b7 100644 (file)
@@ -146,11 +146,11 @@ pub(super) fn build_onion_payloads(path: &Vec<RouteHop>, total_msat: u64, paymen
                });
                cur_value_msat += hop.fee_msat;
                if cur_value_msat >= 21000000 * 100000000 * 1000 {
-                       return Err(APIError::RouteError{err: "Channel fees overflowed?!"});
+                       return Err(APIError::RouteError{err: "Channel fees overflowed?"});
                }
                cur_cltv += hop.cltv_expiry_delta as u32;
                if cur_cltv >= 500000000 {
-                       return Err(APIError::RouteError{err: "Channel CLTV overflowed?!"});
+                       return Err(APIError::RouteError{err: "Channel CLTV overflowed?"});
                }
                last_short_channel_id = hop.short_channel_id;
        }
index 152426577adf33a7980c00890c3eae830a738b4f..68f3c505b9beabf342b2938784f92f63537650ee 100644 (file)
@@ -11,6 +11,7 @@ use bitcoin::secp256k1;
 
 use util::chacha20poly1305rfc::ChaCha20Poly1305RFC;
 use util::byte_utils;
+use bitcoin::hashes::hex::ToHex;
 
 // Sha256("Noise_XK_secp256k1_ChaChaPoly_SHA256")
 const NOISE_CK: [u8; 32] = [0x26, 0x40, 0xf5, 0x2e, 0xeb, 0xcd, 0x9e, 0x88, 0x29, 0x58, 0x95, 0x1c, 0x79, 0x42, 0x50, 0xee, 0xdb, 0x28, 0x00, 0x2c, 0x05, 0xd7, 0xdc, 0x2e, 0xa0, 0xf1, 0x95, 0x40, 0x60, 0x42, 0xca, 0xf1];
@@ -139,7 +140,7 @@ impl PeerChannelEncryptor {
 
                let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h);
                if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) {
-                       return Err(LightningError{err: "Bad MAC", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+                       return Err(LightningError{err: "Bad MAC".to_owned(), action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
                }
                Ok(())
        }
@@ -193,11 +194,11 @@ impl PeerChannelEncryptor {
                assert_eq!(act.len(), 50);
 
                if act[0] != 0 {
-                       return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+                       return Err(LightningError{err: format!("Unknown handshake version number {}", act[0]), action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
                }
 
                let their_pub = match PublicKey::from_slice(&act[1..34]) {
-                       Err(_) => return Err(LightningError{err: "Invalid public key", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
+                       Err(_) => return Err(LightningError{err: format!("Invalid public key {}", &act[1..34].to_hex()), action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
                        Ok(key) => key,
                };
 
@@ -330,14 +331,14 @@ impl PeerChannelEncryptor {
                                                        panic!("Requested act at wrong step");
                                                }
                                                if act_three[0] != 0 {
-                                                       return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+                                                       return Err(LightningError{err: format!("Unknown handshake version number {}", act_three[0]), action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
                                                }
 
                                                let mut their_node_id = [0; 33];
                                                PeerChannelEncryptor::decrypt_with_ad(&mut their_node_id, 1, &temp_k2.unwrap(), &bidirectional_state.h, &act_three[1..50])?;
                                                self.their_node_id = Some(match PublicKey::from_slice(&their_node_id) {
                                                        Ok(key) => key,
-                                                       Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
+                                                       Err(_) => return Err(LightningError{err: format!("Bad node_id from peer, {}", &their_node_id.to_hex()), action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
                                                });
 
                                                let mut sha = Sha256::engine();
index b6fb7a948847d7c72e5fdc8dea1d920b6820ebe3..9c93b246f0762b72e2601151d5eff2f64ca7cdee 100644 (file)
@@ -22,6 +22,7 @@ use std::sync::atomic::{AtomicUsize, Ordering};
 use std::collections::BTreeMap;
 use std::collections::btree_map::Entry as BtreeEntry;
 use std::ops::Deref;
+use bitcoin::hashes::hex::ToHex;
 
 /// Receives and validates network updates from peers,
 /// stores authentic and relevant data as a network graph.
@@ -74,7 +75,7 @@ macro_rules! secp_verify_sig {
        ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => {
                match $secp_ctx.verify($msg, $sig, $pubkey) {
                        Ok(_) => {},
-                       Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: ErrorAction::IgnoreError}),
+                       Err(_) => return Err(LightningError{err: "Invalid signature from remote node".to_owned(), action: ErrorAction::IgnoreError}),
                }
        };
 }
@@ -86,7 +87,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
 
        fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
                if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 {
-                       return Err(LightningError{err: "Channel announcement node had a channel with itself", action: ErrorAction::IgnoreError});
+                       return Err(LightningError{err: "Channel announcement node had a channel with itself".to_owned(), action: ErrorAction::IgnoreError});
                }
 
                let checked_utxo = match self.chain_monitor.get_chain_utxo(msg.contents.chain_hash, msg.contents.short_channel_id) {
@@ -97,7 +98,7 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                                                                    .push_opcode(opcodes::all::OP_PUSHNUM_2)
                                                                    .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
                                if script_pubkey != expected_script {
-                                       return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: ErrorAction::IgnoreError});
+                                       return Err(LightningError{err: format!("Channel announcement key ({}) didn't match on-chain script ({})", script_pubkey.to_hex(), expected_script.to_hex()), action: ErrorAction::IgnoreError});
                                }
                                //TODO: Check if value is worth storing, use it to inform routing, and compare it
                                //to the new HTLC max field in channel_update
@@ -108,10 +109,10 @@ impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for N
                                false
                        },
                        Err(ChainError::NotWatched) => {
-                               return Err(LightningError{err: "Channel announced on an unknown chain", action: ErrorAction::IgnoreError});
+                               return Err(LightningError{err: format!("Channel announced on an unknown chain ({})", msg.contents.chain_hash.encode().to_hex()), action: ErrorAction::IgnoreError});
                        },
                        Err(ChainError::UnknownTx) => {
-                               return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: ErrorAction::IgnoreError});
+                               return Err(LightningError{err: "Channel announced without corresponding UTXO entry".to_owned(), action: ErrorAction::IgnoreError});
                        },
                };
                let result = self.network_graph.write().unwrap().update_channel_from_announcement(msg, checked_utxo, Some(&self.secp_ctx));
@@ -522,11 +523,11 @@ impl NetworkGraph {
                }
 
                match self.nodes.get_mut(&msg.contents.node_id) {
-                       None => Err(LightningError{err: "No existing channels for node_announcement", action: ErrorAction::IgnoreError}),
+                       None => Err(LightningError{err: "No existing channels for node_announcement".to_owned(), action: ErrorAction::IgnoreError}),
                        Some(node) => {
                                if let Some(node_info) = node.announcement_info.as_ref() {
                                        if node_info.last_update  >= msg.contents.timestamp {
-                                               return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
+                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
                                        }
                                }
 
@@ -588,7 +589,7 @@ impl NetworkGraph {
                                        Self::remove_channel_in_nodes(&mut self.nodes, &entry.get(), msg.contents.short_channel_id);
                                        *entry.get_mut() = chan_info;
                                } else {
-                                       return Err(LightningError{err: "Already have knowledge of channel", action: ErrorAction::IgnoreError})
+                                       return Err(LightningError{err: "Already have knowledge of channel".to_owned(), action: ErrorAction::IgnoreError})
                                }
                        },
                        BtreeEntry::Vacant(entry) => {
@@ -656,13 +657,13 @@ impl NetworkGraph {
                let chan_was_enabled;
 
                match self.channels.get_mut(&msg.contents.short_channel_id) {
-                       None => return Err(LightningError{err: "Couldn't find channel for update", action: ErrorAction::IgnoreError}),
+                       None => return Err(LightningError{err: "Couldn't find channel for update".to_owned(), action: ErrorAction::IgnoreError}),
                        Some(channel) => {
                                macro_rules! maybe_update_channel_info {
                                        ( $target: expr, $src_node: expr) => {
                                                if let Some(existing_chan_info) = $target.as_ref() {
                                                        if existing_chan_info.last_update >= msg.contents.timestamp {
-                                                               return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
+                                                               return Err(LightningError{err: "Update older than last processed update".to_owned(), action: ErrorAction::IgnoreError});
                                                        }
                                                        chan_was_enabled = existing_chan_info.enabled;
                                                } else {
index dfdfb5e8f877bc515e92cb2de6559bb64bd80ede..883d0015fbb464a3d3ec994db838ead6930104aa 100644 (file)
@@ -165,11 +165,11 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, targ
        // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by
        // uptime/success in using a node in the past.
        if *target == *our_node_id {
-               return Err(LightningError{err: "Cannot generate a route to ourselves", action: ErrorAction::IgnoreError});
+               return Err(LightningError{err: "Cannot generate a route to ourselves".to_owned(), action: ErrorAction::IgnoreError});
        }
 
        if final_value_msat > 21_000_000 * 1_0000_0000 * 1000 {
-               return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: ErrorAction::IgnoreError});
+               return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis".to_owned(), action: ErrorAction::IgnoreError});
        }
 
        // We do a dest-to-source Dijkstra's sorting by each node's distance from the destination
@@ -209,7 +209,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, targ
                        first_hop_targets.insert(chan.remote_network_id, (short_channel_id, chan.counterparty_features.clone()));
                }
                if first_hop_targets.is_empty() {
-                       return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: ErrorAction::IgnoreError});
+                       return Err(LightningError{err: "Cannot route when there are no outbound routes away from us".to_owned(), action: ErrorAction::IgnoreError});
                }
        }
 
@@ -374,7 +374,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, targ
 
                                let new_entry = match dist.remove(&res.last().unwrap().pubkey) {
                                        Some(hop) => hop.3,
-                                       None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: ErrorAction::IgnoreError}),
+                                       None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination".to_owned(), action: ErrorAction::IgnoreError}),
                                };
                                res.last_mut().unwrap().fee_msat = new_entry.fee_msat;
                                res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta;
@@ -395,7 +395,7 @@ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, targ
                }
        }
 
-       Err(LightningError{err: "Failed to find a path to the given destination", action: ErrorAction::IgnoreError})
+       Err(LightningError{err: "Failed to find a path to the given destination".to_owned(), action: ErrorAction::IgnoreError})
 }
 
 #[cfg(test)]
@@ -881,7 +881,7 @@ mod tests {
                assert_eq!(route.paths[0][0].fee_msat, 200);
                assert_eq!(route.paths[0][0].cltv_expiry_delta, (13 << 8) | 1);
                assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]); // it should also override our view of their features
-               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::new()); // No feature flags will meet the relevant-to-channel conversion
+               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
 
                assert_eq!(route.paths[0][1].pubkey, node3);
                assert_eq!(route.paths[0][1].short_channel_id, 13);
@@ -945,7 +945,7 @@ mod tests {
                assert_eq!(route.paths[0][0].fee_msat, 200);
                assert_eq!(route.paths[0][0].cltv_expiry_delta, (13 << 8) | 1);
                assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]); // it should also override our view of their features
-               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::new()); // No feature flags will meet the relevant-to-channel conversion
+               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
 
                assert_eq!(route.paths[0][1].pubkey, node3);
                assert_eq!(route.paths[0][1].short_channel_id, 13);
@@ -1008,7 +1008,7 @@ mod tests {
                assert_eq!(route.paths[0][0].fee_msat, 200);
                assert_eq!(route.paths[0][0].cltv_expiry_delta, (13 << 8) | 1);
                assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]);
-               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::new()); // No feature flags will meet the relevant-to-channel conversion
+               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
 
                assert_eq!(route.paths[0][1].pubkey, node3);
                assert_eq!(route.paths[0][1].short_channel_id, 13);
@@ -1082,8 +1082,8 @@ mod tests {
                assert_eq!(route.paths[0][4].short_channel_id, 8);
                assert_eq!(route.paths[0][4].fee_msat, 100);
                assert_eq!(route.paths[0][4].cltv_expiry_delta, 42);
-               assert_eq!(route.paths[0][4].node_features.le_flags(), &Vec::new()); // We dont pass flags in from invoices yet
-               assert_eq!(route.paths[0][4].channel_features.le_flags(), &Vec::new()); // We can't learn any flags from invoices, sadly
+               assert_eq!(route.paths[0][4].node_features.le_flags(), &Vec::<u8>::new()); // We dont pass flags in from invoices yet
+               assert_eq!(route.paths[0][4].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                // Simple test with outbound channel to 4 to test that last_hops and first_hops connect
                let our_chans = vec![channelmanager::ChannelDetails {
@@ -1105,14 +1105,14 @@ mod tests {
                assert_eq!(route.paths[0][0].fee_msat, 0);
                assert_eq!(route.paths[0][0].cltv_expiry_delta, (8 << 8) | 1);
                assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]);
-               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::new()); // No feature flags will meet the relevant-to-channel conversion
+               assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
 
                assert_eq!(route.paths[0][1].pubkey, node7);
                assert_eq!(route.paths[0][1].short_channel_id, 8);
                assert_eq!(route.paths[0][1].fee_msat, 100);
                assert_eq!(route.paths[0][1].cltv_expiry_delta, 42);
-               assert_eq!(route.paths[0][1].node_features.le_flags(), &Vec::new()); // We dont pass flags in from invoices yet
-               assert_eq!(route.paths[0][1].channel_features.le_flags(), &Vec::new()); // We can't learn any flags from invoices, sadly
+               assert_eq!(route.paths[0][1].node_features.le_flags(), &Vec::<u8>::new()); // We dont pass flags in from invoices yet
+               assert_eq!(route.paths[0][1].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                last_hops[0].fees.base_msat = 1000;
 
@@ -1147,8 +1147,8 @@ mod tests {
                assert_eq!(route.paths[0][3].short_channel_id, 10);
                assert_eq!(route.paths[0][3].fee_msat, 100);
                assert_eq!(route.paths[0][3].cltv_expiry_delta, 42);
-               assert_eq!(route.paths[0][3].node_features.le_flags(), &Vec::new()); // We dont pass flags in from invoices yet
-               assert_eq!(route.paths[0][3].channel_features.le_flags(), &Vec::new()); // We can't learn any flags from invoices, sadly
+               assert_eq!(route.paths[0][3].node_features.le_flags(), &Vec::<u8>::new()); // We dont pass flags in from invoices yet
+               assert_eq!(route.paths[0][3].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
 
                // ...but still use 8 for larger payments as 6 has a variable feerate
                let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &node7, None, &last_hops, 2000, 42, Arc::clone(&logger)).unwrap();
@@ -1188,7 +1188,7 @@ mod tests {
                assert_eq!(route.paths[0][4].short_channel_id, 8);
                assert_eq!(route.paths[0][4].fee_msat, 2000);
                assert_eq!(route.paths[0][4].cltv_expiry_delta, 42);
-               assert_eq!(route.paths[0][4].node_features.le_flags(), &Vec::new()); // We dont pass flags in from invoices yet
-               assert_eq!(route.paths[0][4].channel_features.le_flags(), &Vec::new()); // We can't learn any flags from invoices, sadly
+               assert_eq!(route.paths[0][4].node_features.le_flags(), &Vec::<u8>::new()); // We dont pass flags in from invoices yet
+               assert_eq!(route.paths[0][4].channel_features.le_flags(), &Vec::<u8>::new()); // We can't learn any flags from invoices, sadly
        }
 }
index 1b29916e0f56b5e700032ebdff470253eb48f30b..7119c3a023a17d39dd1f657bad2e35f80477bc75 100644 (file)
@@ -9,7 +9,7 @@ pub enum APIError {
        /// are documented, but generally indicates some precondition of a function was violated.
        APIMisuseError {
                /// A human-readable error message
-               err: &'static str
+               err: String
        },
        /// Due to a high feerate, we were unable to complete the request.
        /// For example, this may be returned if the feerate implies we cannot open a channel at the
@@ -31,7 +31,7 @@ pub enum APIError {
        /// peer, channel at capacity, channel shutting down, etc.
        ChannelUnavailable {
                /// A human-readable error message
-               err: &'static str
+               err: String
        },
        /// An attempt to call add/update_monitor returned an Err (ie you did this!), causing the
        /// attempted action to fail.
index 2134c26f9f605e81126bab0632e25eac4227c523..deaa4680cd18e13ea95d085b049226c920c29790 100644 (file)
@@ -22,6 +22,8 @@ use bitcoin::hash_types::{Txid, BlockHash};
 
 use bitcoin::secp256k1::{SecretKey, PublicKey, Secp256k1, Signature};
 
+use regex;
+
 use std::time::Duration;
 use std::sync::Mutex;
 use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
@@ -240,15 +242,15 @@ impl TestRoutingMessageHandler {
 }
 impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
        fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, msgs::LightningError> {
-               Err(msgs::LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+               Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
        }
        fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, msgs::LightningError> {
                self.chan_anns_recvd.fetch_add(1, Ordering::AcqRel);
-               Err(msgs::LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+               Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
        }
        fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, msgs::LightningError> {
                self.chan_upds_recvd.fetch_add(1, Ordering::AcqRel);
-               Err(msgs::LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+               Err(msgs::LightningError { err: "".to_owned(), action: msgs::ErrorAction::IgnoreError })
        }
        fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {}
        fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, Option<msgs::ChannelUpdate>, Option<msgs::ChannelUpdate>)> {
@@ -300,6 +302,30 @@ impl TestLogger {
                let log_entries = self.lines.lock().unwrap();
                assert_eq!(log_entries.get(&(module, line)), Some(&count));
        }
+
+       /// Search for the number of occurrence of the logged lines which
+       /// 1. belongs to the specified module and
+       /// 2. contains `line` in it.
+       /// And asserts if the number of occurrences is the same with the given `count`
+       pub fn assert_log_contains(&self, module: String, line: String, count: usize) {
+               let log_entries = self.lines.lock().unwrap();
+               let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
+                       m == &module && l.contains(line.as_str())
+               }).map(|(_, c) | { c }).sum();
+               assert_eq!(l, count)
+       }
+
+    /// Search for the number of occurrences of logged lines which
+    /// 1. belong to the specified module and
+    /// 2. match the given regex pattern.
+    /// Assert that the number of occurrences equals the given `count`
+       pub fn assert_log_regex(&self, module: String, pattern: regex::Regex, count: usize) {
+               let log_entries = self.lines.lock().unwrap();
+               let l: usize = log_entries.iter().filter(|&(&(ref m, ref l), _c)| {
+                       m == &module && pattern.is_match(&l)
+               }).map(|(_, c) | { c }).sum();
+               assert_eq!(l, count)
+       }
 }
 
 impl Logger for TestLogger {