}
}
-static mut IN_RESTORE: bool = false;
pub struct TestChannelMonitor {
+ pub logger: Arc<dyn Logger>,
pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, Arc<chaininterface::BroadcasterInterface>>>,
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
- pub latest_good_update: Mutex<HashMap<OutPoint, Vec<u8>>>,
- pub latest_update_good: Mutex<HashMap<OutPoint, bool>>,
- pub latest_updates_good_at_last_ser: Mutex<HashMap<OutPoint, bool>>,
+ // If we reload a node with an old copy of ChannelMonitors, the ChannelManager deserialization
+ // logic will automatically force-close our channels for us (as we don't have an up-to-date
+ // monitor implying we are not able to punish misbehaving counterparties). Because this test
+ // "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
+ // fully-serialized monitor state here, as well as the corresponding update_id.
+ pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
pub should_update_manager: atomic::AtomicBool,
}
impl TestChannelMonitor {
pub fn new(chain_monitor: Arc<dyn chaininterface::ChainWatchInterface>, broadcaster: Arc<dyn chaininterface::BroadcasterInterface>, logger: Arc<dyn Logger>, feeest: Arc<dyn chaininterface::FeeEstimator>) -> Self {
Self {
- simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest)),
+ simple_monitor: Arc::new(channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger.clone(), feeest)),
+ logger,
update_ret: Mutex::new(Ok(())),
- latest_good_update: Mutex::new(HashMap::new()),
- latest_update_good: Mutex::new(HashMap::new()),
- latest_updates_good_at_last_ser: Mutex::new(HashMap::new()),
+ latest_monitors: Mutex::new(HashMap::new()),
should_update_manager: atomic::AtomicBool::new(false),
}
}
}
impl channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor {
- fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
- let ret = self.update_ret.lock().unwrap().clone();
- if let Ok(()) = ret {
- let mut ser = VecWriter(Vec::new());
- monitor.write_for_disk(&mut ser).unwrap();
- self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0);
- match self.latest_update_good.lock().unwrap().entry(funding_txo) {
- hash_map::Entry::Vacant(e) => { e.insert(true); },
- hash_map::Entry::Occupied(mut e) => {
- if !e.get() && unsafe { IN_RESTORE } {
- // Technically we can't consider an update to be "good" unless we're doing
- // it in response to a test_restore_channel_monitor as the channel may
- // still be waiting on such a call, so only set us to good if we're in the
- // middle of a restore call.
- e.insert(true);
- }
- },
- }
- self.should_update_manager.store(true, atomic::Ordering::Relaxed);
- } else {
- self.latest_update_good.lock().unwrap().insert(funding_txo, false);
+ fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ let mut ser = VecWriter(Vec::new());
+ monitor.write_for_disk(&mut ser).unwrap();
+ if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
+ panic!("Already had monitor pre-add_monitor");
}
- assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
- ret
+ self.should_update_manager.store(true, atomic::Ordering::Relaxed);
+ assert!(self.simple_monitor.add_monitor(funding_txo, monitor).is_ok());
+ self.update_ret.lock().unwrap().clone()
+ }
+
+ fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ let mut map_lock = self.latest_monitors.lock().unwrap();
+ let mut map_entry = match map_lock.entry(funding_txo) {
+ hash_map::Entry::Occupied(entry) => entry,
+ hash_map::Entry::Vacant(_) => panic!("Didn't have monitor on update call"),
+ };
+ let mut deserialized_monitor = <(Sha256d, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::
+ read(&mut Cursor::new(&map_entry.get().1), Arc::clone(&self.logger)).unwrap().1;
+ deserialized_monitor.update_monitor(update.clone()).unwrap();
+ let mut ser = VecWriter(Vec::new());
+ deserialized_monitor.write_for_disk(&mut ser).unwrap();
+ map_entry.insert((update.update_id, ser.0));
+ self.should_update_manager.store(true, atomic::Ordering::Relaxed);
+ self.update_ret.lock().unwrap().clone()
}
fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
let mut monitors = HashMap::new();
- let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap();
- for (outpoint, monitor_ser) in old_monitors.drain() {
+ let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
+ for (outpoint, (update_id, monitor_ser)) in old_monitors.drain() {
monitors.insert(outpoint, <(Sha256d, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1);
- monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser);
+ monitor.latest_monitors.lock().unwrap().insert(outpoint, (update_id, monitor_ser));
}
let mut monitor_refs = HashMap::new();
for (outpoint, monitor) in monitors.iter_mut() {
channel_monitors: &mut monitor_refs,
};
- let res = (<(Sha256d, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor);
- for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() {
- if !was_good {
- // If the last time we updated a monitor we didn't successfully update (and we
- // have sense updated our serialized copy of the ChannelManager) we may
- // force-close the channel on our counterparty cause we know we're missing
- // something. Thus, we just return here since we can't continue to test.
- return;
- }
- }
- res
+ (<(Sha256d, ChannelManager<EnforcingChannelKeys, Arc<TestChannelMonitor>, Arc<TestBroadcaster>>)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor)
} }
}
};
$source.handle_accept_channel(&$dest.get_our_node_id(), InitFeatures::supported(), &accept_channel);
+ let funding_output;
{
let events = $source.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
value: *channel_value_satoshis, script_pubkey: output_script.clone(),
}]};
- let funding_output = OutPoint::new(tx.txid(), 0);
+ funding_output = OutPoint::new(tx.txid(), 0);
$source.funding_transaction_generated(&temporary_channel_id, funding_output);
channel_txn.push(tx);
} else { panic!("Wrong event type"); }
if let events::Event::FundingBroadcastSafe { .. } = events[0] {
} else { panic!("Wrong event type"); }
}
+ funding_output
} }
}
let mut nodes = [node_a, node_b, node_c];
- make_channel!(nodes[0], nodes[1], 0);
- make_channel!(nodes[1], nodes[2], 1);
+ let chan_1_funding = make_channel!(nodes[0], nodes[1], 0);
+ let chan_2_funding = make_channel!(nodes[1], nodes[2], 1);
for node in nodes.iter() {
confirm_txn!(node);
0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
- 0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
- 0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
- 0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
+ 0x06 => {
+ if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) {
+ nodes[0].channel_monitor_updated(&chan_1_funding, *id);
+ }
+ },
+ 0x07 => {
+ if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) {
+ nodes[1].channel_monitor_updated(&chan_1_funding, *id);
+ }
+ },
+ 0x24 => {
+ if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) {
+ nodes[1].channel_monitor_updated(&chan_2_funding, *id);
+ }
+ },
+ 0x08 => {
+ if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) {
+ nodes[2].channel_monitor_updated(&chan_2_funding, *id);
+ }
+ },
0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
nodes[2] = node_c.clone();
monitor_c = new_monitor_c;
},
+ // 0x24 defined above
_ => test_return!(),
}
- if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
- node_a_ser.0.clear();
- nodes[0].write(&mut node_a_ser).unwrap();
- monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
- *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone();
- }
- if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) {
- node_b_ser.0.clear();
- nodes[1].write(&mut node_b_ser).unwrap();
- monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
- *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone();
- }
- if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
- node_c_ser.0.clear();
- nodes[2].write(&mut node_c_ser).unwrap();
- monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
- *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone();
- }
+ node_a_ser.0.clear();
+ nodes[0].write(&mut node_a_ser).unwrap();
+ monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ node_b_ser.0.clear();
+ nodes[1].write(&mut node_b_ser).unwrap();
+ monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ node_c_ser.0.clear();
+ nodes[2].write(&mut node_c_ser).unwrap();
+ monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
}
}
/// (TODO: We shouldn't require that, and should have an API to get them at deser time, due mostly
/// to the possibility of reentrancy issues by calling the user's code during our deserialization
/// routine).
-/// TODO: remove Clone once we start returning ChannelUpdate objects instead of copying ChannelMonitor
+/// TODO: We should remove Clone by instead requesting a new ChannelKeys copy when we create
+/// ChannelMonitors instead of expecting to clone the one out of the Channel into the monitors.
pub trait ChannelKeys : Send+Clone {
/// Gets the private key for the anchor tx
fn funding_key<'a>(&'a self) -> &'a SecretKey;
}
}
+impl_writeable!(OutPoint, 0, { txid, index });
+
#[cfg(test)]
mod tests {
use chain::transaction::OutPoint;
use ln::channelmanager::{PaymentHash, PaymentPreimage};
use ln::msgs::DecodeError;
use util::ser::{Readable, Writeable, Writer, WriterWriteAdaptor};
+use util::byte_utils;
use secp256k1::key::{SecretKey, PublicKey};
use secp256k1::{Secp256k1, Signature};
res
}
+/// Implements the per-commitment secret storage scheme from
+/// [BOLT 3](https://github.com/lightningnetwork/lightning-rfc/blob/dcbf8583976df087c79c3ce0b535311212e6812d/03-transactions.md#efficient-per-commitment-secret-storage).
+///
+/// Allows us to keep track of all of the revocation secrets of counterarties in just 50*32 bytes
+/// or so.
+#[derive(Clone)]
+pub(super) struct CounterpartyCommitmentSecrets {
+ old_secrets: [([u8; 32], u64); 49],
+}
+
+impl PartialEq for CounterpartyCommitmentSecrets {
+ fn eq(&self, other: &Self) -> bool {
+ for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
+ if secret != o_secret || idx != o_idx {
+ return false
+ }
+ }
+ true
+ }
+}
+
+impl CounterpartyCommitmentSecrets {
+ pub(super) fn new() -> Self {
+ Self { old_secrets: [([0; 32], 1 << 48); 49], }
+ }
+
+ #[inline]
+ fn place_secret(idx: u64) -> u8 {
+ for i in 0..48 {
+ if idx & (1 << i) == (1 << i) {
+ return i
+ }
+ }
+ 48
+ }
+
+ pub(super) fn get_min_seen_secret(&self) -> u64 {
+ //TODO This can be optimized?
+ let mut min = 1 << 48;
+ for &(_, idx) in self.old_secrets.iter() {
+ if idx < min {
+ min = idx;
+ }
+ }
+ min
+ }
+
+ #[inline]
+ pub(super) fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
+ let mut res: [u8; 32] = secret;
+ for i in 0..bits {
+ let bitpos = bits - 1 - i;
+ if idx & (1 << bitpos) == (1 << bitpos) {
+ res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7);
+ res = Sha256::hash(&res).into_inner();
+ }
+ }
+ res
+ }
+
+ pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), ()> {
+ let pos = Self::place_secret(idx);
+ for i in 0..pos {
+ let (old_secret, old_idx) = self.old_secrets[i as usize];
+ if Self::derive_secret(secret, pos, old_idx) != old_secret {
+ return Err(());
+ }
+ }
+ if self.get_min_seen_secret() <= idx {
+ return Ok(());
+ }
+ self.old_secrets[pos as usize] = (secret, idx);
+ Ok(())
+ }
+
+ /// Can only fail if idx is < get_min_seen_secret
+ pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
+ for i in 0..self.old_secrets.len() {
+ if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
+ return Some(Self::derive_secret(self.old_secrets[i].0, i as u8, idx))
+ }
+ }
+ assert!(idx < self.get_min_seen_secret());
+ None
+ }
+}
+
+impl Writeable for CounterpartyCommitmentSecrets {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ for &(ref secret, ref idx) in self.old_secrets.iter() {
+ writer.write_all(secret)?;
+ writer.write_all(&byte_utils::be64_to_array(*idx))?;
+ }
+ Ok(())
+ }
+}
+impl<R: ::std::io::Read> Readable<R> for CounterpartyCommitmentSecrets {
+ fn read(reader: &mut R) -> Result<Self, DecodeError> {
+ let mut old_secrets = [([0; 32], 1 << 48); 49];
+ for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
+ *secret = Readable::read(reader)?;
+ *idx = Readable::read(reader)?;
+ }
+
+ Ok(Self { old_secrets })
+ }
+}
+
/// Derives a per-commitment-transaction private key (eg an htlc key or payment key) from the base
/// private key for that type of key and the per_commitment_point (available in TxCreationKeys)
pub fn derive_private_key<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, per_commitment_point: &PublicKey, base_secret: &SecretKey) -> Result<SecretKey, secp256k1::Error> {
/// The set of public keys which are used in the creation of one commitment transaction.
/// These are derived from the channel base keys and per-commitment data.
-#[derive(PartialEq)]
+#[derive(PartialEq, Clone)]
pub struct TxCreationKeys {
/// The per-commitment public key which was used to derive the other keys.
pub per_commitment_point: PublicKey,
/// B's Payment Key
pub(crate) b_payment_key: PublicKey,
}
+impl_writeable!(TxCreationKeys, 33*6,
+ { per_commitment_point, revocation_key, a_htlc_key, b_htlc_key, a_delayed_payment_key, b_payment_key });
/// One counterparty's public keys which do not change over the life of a channel.
#[derive(Clone, PartialEq)]
pub transaction_output_index: Option<u32>,
}
+impl_writeable!(HTLCOutputInCommitment, 1 + 8 + 4 + 32 + 5, {
+ offered,
+ amount_msat,
+ cltv_expiry,
+ payment_hash,
+ transaction_output_index
+});
+
#[inline]
pub(super) fn get_htlc_redeemscript_with_explicit_keys(htlc: &HTLCOutputInCommitment, a_htlc_key: &PublicKey, b_htlc_key: &PublicKey, revocation_key: &PublicKey) -> Script {
let payment_hash160 = Ripemd160::hash(&htlc.payment_hash.0[..]).into_inner();
Ok(Self { tx })
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::CounterpartyCommitmentSecrets;
+ use hex;
+
+ #[test]
+ fn test_per_commitment_storage() {
+ // Test vectors from BOLT 3:
+ let mut secrets: Vec<[u8; 32]> = Vec::new();
+ let mut monitor;
+
+ macro_rules! test_secrets {
+ () => {
+ let mut idx = 281474976710655;
+ for secret in secrets.iter() {
+ assert_eq!(monitor.get_secret(idx).unwrap(), *secret);
+ idx -= 1;
+ }
+ assert_eq!(monitor.get_min_seen_secret(), idx + 1);
+ assert!(monitor.get_secret(idx).is_none());
+ };
+ }
+
+ {
+ // insert_secret correct sequence
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+ }
+
+ {
+ // insert_secret #1 incorrect
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ assert!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #2 incorrect (#1 derived from incorrect)
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #3 incorrect
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ assert!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #4 incorrect (1,2,3 derived from incorrect)
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #5 incorrect
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ assert!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #6 incorrect (5 derived from incorrect)
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #7 incorrect
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+ }
+
+ {
+ // insert_secret #8 incorrect
+ monitor = CounterpartyCommitmentSecrets::new();
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap());
+ assert!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).is_err());
+ }
+ }
+}
//! There are a bunch of these as their handling is relatively error-prone so they are split out
//! here. See also the chanmon_fail_consistency fuzz test.
+use chain::transaction::OutPoint;
use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
use ln::channelmonitor::ChannelMonitorUpdateErr;
use ln::features::InitFeatures;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(&nodes[0]);
}
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
- // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
- *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ // ...and make sure we can force-close a frozen channel
+ nodes[0].node.force_close_channel(&channel_id);
+ check_added_monitors!(nodes[0], 0);
check_closed_broadcast!(nodes[0], false);
// TODO: Once we hit the chain with the failure transaction we should check that we get a
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
}
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
+ check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
}
// Now fix monitor updating...
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
macro_rules! disconnect_reconnect_peers { () => { {
nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let responses = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(responses.len(), 2);
}
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
#[test]
fn test_monitor_update_fail_no_rebroadcast() {
// Tests handling of a monitor update failure when no message rebroadcasting on
- // test_restore_channel_monitor() is required. Backported from
- // chanmon_fail_consistency fuzz tests.
+ // channel_monitor_updated() is required. Backported from chanmon_fail_consistency
+ // fuzz tests.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
check_added_monitors!(nodes[1], 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- check_added_monitors!(nodes[1], 1);
+ check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable!(nodes[1]);
let events = nodes[1].node.get_and_clear_pending_events();
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
send_payment(&nodes[0], &[&nodes[1]], 5000000, 5_000_000);
check_added_monitors!(nodes[0], 1);
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0);
send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
+ check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Restore monitor updating, ensuring we immediately get a fail-back update and a
// update_add update.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
check_added_monitors!(nodes[1], 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
// nodes[1] should be AwaitingRAA here!
- check_added_monitors!(nodes[1], 1);
+ check_added_monitors!(nodes[1], 0);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, 1000000);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
// Forward a payment for B to claim
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
// Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
- // until we've test_restore_channel_monitor'd and updated for the new commitment transaction.
+ // until we've channel_monitor_update'd and updated for the new commitment transaction.
// Now un-fail the monitor, which will result in B sending its original commitment update,
// receiving the commitment update from A, and the resulting commitment dances.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(bs_msgs.len(), 2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
// Route the payment and deliver the initial commitment_signed (with a monitor update failure
// on receipt).
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
// Route the first payment outbound, holding the last RAA for B until we are set up so that we
// can deliver it and fail the monitor update.
check_added_monitors!(nodes[1], 1);
// Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
- // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
- // the appropriate HTLC acceptance).
+ // RAA/CS response, which should be generated when we call channel_monitor_update (with the
+ // appropriate HTLC acceptance).
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
+ check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, 1000000);
check_added_monitors!(nodes[2], 1);
// Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
- // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
+ // paused, so forward shouldn't succeed until we call channel_monitor_updated().
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
let mut events = nodes[2].node.get_and_clear_pending_msg_events();
} else { panic!("Unexpected event!"); }
// Now restore monitor updating on the 0<->1 channel and claim the funds on B.
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::supported(), InitFeatures::supported());
// Rebalance a bit so that we can send backwards from 3 to 1.
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported());
+ let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::supported(), InitFeatures::supported()).2;
// Forward a payment for B to claim
let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[0], 1);
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
- nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()));
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+ let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
if restore_between_fails {
assert!(fail_on_generate);
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[0], 1);
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
}
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
if fail_on_generate && !restore_between_fails {
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented funding_signed from allowing funding broadcast".to_string(), 1);
- check_added_monitors!(nodes[0], 0);
+ check_added_monitors!(nodes[0], 1);
} else {
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
check_added_monitors!(nodes[0], 1);
}
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[0].node.test_restore_channel_monitor();
+ let (outpoint, latest_update) = nodes[0].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[0], 0);
+ } else {
+ check_added_monitors!(nodes[0], 1);
}
- check_added_monitors!(nodes[0], 1);
-
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
}
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
- nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 1);
+ let (outpoint, latest_update) = nodes[1].chan_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
+ nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
+ check_added_monitors!(nodes[1], 0);
let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id()));
use ln::features::{ChannelFeatures, InitFeatures};
use ln::msgs;
use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
-use ln::channelmonitor::ChannelMonitor;
-use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
-use ln::chan_utils::{LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep};
+use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, LocalCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys};
use ln::chan_utils;
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
use chain::transaction::OutPoint;
secp_ctx: Secp256k1<secp256k1::All>,
channel_value_satoshis: u64,
+ latest_monitor_update_id: u64,
+
#[cfg(not(test))]
local_keys: ChanSigner,
#[cfg(test)]
pub(super) local_keys: ChanSigner,
shutdown_pubkey: PublicKey,
+ destination_script: Script,
// Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
// generation start at 0 and count up...this simplifies some parts of implementation at the
monitor_pending_funding_locked: bool,
monitor_pending_revoke_and_ack: bool,
monitor_pending_commitment_signed: bool,
- monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
+ monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
// pending_update_fee is filled when sending and receiving update_fee
last_sent_closing_fee: Option<(u64, u64, Signature)>, // (feerate, fee, our_sig)
+ funding_txo: Option<OutPoint>,
+
/// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
/// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
/// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
their_shutdown_scriptpubkey: Option<Script>,
- channel_monitor: ChannelMonitor<ChanSigner>,
+ /// Used exclusively to broadcast the latest local state, mostly a historical quirk that this
+ /// is here:
+ channel_monitor: Option<ChannelMonitor<ChanSigner>>,
+ commitment_secrets: CounterpartyCommitmentSecrets,
network_sync: UpdateStatus,
/// Used to return a simple Error back to ChannelManager. Will get converted to a
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
/// channel_id in ChannelManager.
-pub(super) enum ChannelError<ChanSigner: ChannelKeys> {
+pub(super) enum ChannelError {
Ignore(&'static str),
Close(&'static str),
CloseDelayBroadcast {
msg: &'static str,
- update: Option<ChannelMonitor<ChanSigner>>,
+ update: ChannelMonitorUpdate,
},
}
-impl<ChanSigner: ChannelKeys> fmt::Debug for ChannelError<ChanSigner> {
+impl fmt::Debug for ChannelError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
- let secp_ctx = Secp256k1::new();
- let channel_monitor = ChannelMonitor::new(chan_keys.clone(),
- chan_keys.funding_key(), chan_keys.revocation_base_key(), chan_keys.delayed_payment_base_key(),
- chan_keys.htlc_base_key(), chan_keys.payment_base_key(), &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
- keys_provider.get_destination_script(), logger.clone());
-
Ok(Channel {
user_id: user_id,
config: config.channel_options.clone(),
channel_id: keys_provider.get_channel_id(),
channel_state: ChannelState::OurInitSent as u32,
channel_outbound: true,
- secp_ctx: secp_ctx,
+ secp_ctx: Secp256k1::new(),
channel_value_satoshis: channel_value_satoshis,
+ latest_monitor_update_id: 0,
+
local_keys: chan_keys,
shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+ destination_script: keys_provider.get_destination_script(),
+
cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat: channel_value_satoshis * 1000 - push_msat,
last_sent_closing_fee: None,
+ funding_txo: None,
funding_tx_confirmed_in: None,
short_channel_id: None,
last_block_connected: Default::default(),
their_shutdown_scriptpubkey: None,
- channel_monitor: channel_monitor,
+ channel_monitor: None,
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
network_sync: UpdateStatus::Fresh,
})
}
- fn check_remote_fee(fee_estimator: &FeeEstimator, feerate_per_kw: u32) -> Result<(), ChannelError<ChanSigner>> {
+ fn check_remote_fee(fee_estimator: &FeeEstimator, feerate_per_kw: u32) -> Result<(), ChannelError> {
if (feerate_per_kw as u64) < fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background) {
return Err(ChannelError::Close("Peer's feerate much too low"));
}
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface<ChanKeySigner = ChanSigner>>, their_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, ChannelError<ChanSigner>> {
+ pub fn new_from_req(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface<ChanKeySigner = ChanSigner>>, their_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel<ChanSigner>, ChannelError> {
let mut chan_keys = keys_provider.get_channel_keys(true, msg.funding_satoshis);
let their_pubkeys = ChannelPublicKeys {
funding_pubkey: msg.funding_pubkey,
return Err(ChannelError::Close("Insufficient funding amount for initial commitment"));
}
- let secp_ctx = Secp256k1::new();
- let channel_monitor = ChannelMonitor::new(chan_keys.clone(),
- chan_keys.funding_key(), chan_keys.revocation_base_key(), chan_keys.delayed_payment_base_key(),
- chan_keys.htlc_base_key(), chan_keys.payment_base_key(), &keys_provider.get_shutdown_pubkey(), config.own_channel_config.our_to_self_delay,
- keys_provider.get_destination_script(), logger.clone());
-
let their_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
match &msg.shutdown_scriptpubkey {
&OptionalField::Present(ref script) => {
}
} else { None };
- let mut chan = Channel {
+ let chan = Channel {
user_id: user_id,
config: local_config,
channel_id: msg.temporary_channel_id,
channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
channel_outbound: false,
- secp_ctx: secp_ctx,
+ secp_ctx: Secp256k1::new(),
+
+ latest_monitor_update_id: 0,
local_keys: chan_keys,
shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
+ destination_script: keys_provider.get_destination_script(),
+
cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat: msg.push_msat,
last_sent_closing_fee: None,
+ funding_txo: None,
funding_tx_confirmed_in: None,
short_channel_id: None,
last_block_connected: Default::default(),
their_shutdown_scriptpubkey,
- channel_monitor: channel_monitor,
+ channel_monitor: None,
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
network_sync: UpdateStatus::Fresh,
logger,
};
- let obscure_factor = chan.get_commitment_transaction_number_obscure_factor();
- let funding_redeemscript = chan.get_funding_redeemscript();
- chan.channel_monitor.set_basic_channel_info(&msg.htlc_basepoint, &msg.delayed_payment_basepoint, msg.to_self_delay, funding_redeemscript, msg.funding_satoshis, obscure_factor);
-
Ok(chan)
}
let txins = {
let mut ins: Vec<TxIn> = Vec::new();
ins.push(TxIn {
- previous_output: self.channel_monitor.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
script_sig: Script::new(),
sequence: ((0x80 as u32) << 8*3) | ((obscured_commitment_transaction_number >> 3*8) as u32),
witness: Vec::new(),
let txins = {
let mut ins: Vec<TxIn> = Vec::new();
ins.push(TxIn {
- previous_output: self.channel_monitor.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
script_sig: Script::new(),
sequence: 0xffffffff,
witness: Vec::new(),
/// our counterparty!)
/// The result is a transaction which we can revoke ownership of (ie a "local" transaction)
/// TODO Some magic rust shit to compile-time check this?
- fn build_local_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError<ChanSigner>> {
+ fn build_local_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError> {
let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(commitment_number));
let delayed_payment_base = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.delayed_payment_base_key());
let htlc_basepoint = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.htlc_base_key());
/// Creates a set of keys for build_commitment_transaction to generate a transaction which we
/// will sign and send to our counterparty.
/// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
- fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError<ChanSigner>> {
+ fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError> {
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
let payment_basepoint = PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.payment_base_key());
/// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
/// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
/// Ok(_) if debug assertions are turned on and preconditions are met.
- fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitor<ChanSigner>>), ChannelError<ChanSigner>> {
+ fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> {
// Either ChannelFunded got set (which means it won't be unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
// incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
//
// We have to put the payment_preimage in the channel_monitor right away here to ensure we
// can claim it even if the channel hits the chain before we see their next commitment.
- self.channel_monitor.provide_payment_preimage(&payment_hash_calc, &payment_preimage_arg);
+ self.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_arg.clone(),
+ }],
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
for pending_update in self.holding_cell_htlc_updates.iter() {
log_warn!(self, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
// TODO: We may actually be able to switch to a fulfill here, though its
// rare enough it may not be worth the complexity burden.
- return Ok((None, Some(self.channel_monitor.clone())));
+ return Ok((None, Some(monitor_update)));
}
},
_ => {}
self.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
- return Ok((None, Some(self.channel_monitor.clone())));
+ return Ok((None, Some(monitor_update)));
}
{
if let InboundHTLCState::Committed = htlc.state {
} else {
debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return Ok((None, Some(self.channel_monitor.clone())));
+ return Ok((None, Some(monitor_update)));
}
log_trace!(self, "Upgrading HTLC {} to LocalRemoved with a Fulfill!", log_bytes!(htlc.payment_hash.0));
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
channel_id: self.channel_id(),
htlc_id: htlc_id_arg,
payment_preimage: payment_preimage_arg,
- }), Some(self.channel_monitor.clone())))
+ }), Some(monitor_update)))
}
- pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitor<ChanSigner>>), ChannelError<ChanSigner>> {
+ pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitorUpdate>), ChannelError> {
match self.get_update_fulfill_htlc(htlc_id, payment_preimage)? {
- (Some(update_fulfill_htlc), _) => {
+ (Some(update_fulfill_htlc), Some(mut monitor_update)) => {
+ let (commitment, mut additional_update) = self.send_commitment_no_status_check()?;
+ // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
+ },
+ (Some(update_fulfill_htlc), None) => {
let (commitment, monitor_update) = self.send_commitment_no_status_check()?;
Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
},
- (None, Some(channel_monitor)) => Ok((None, Some(channel_monitor))),
+ (None, Some(monitor_update)) => Ok((None, Some(monitor_update))),
(None, None) => Ok((None, None))
}
}
/// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
/// In such cases we debug_assert!(false) and return an IgnoreError. Thus, will always return
/// Ok(_) if debug assertions are turned on and preconditions are met.
- pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError<ChanSigner>> {
+ pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
panic!("Was asked to fail an HTLC when channel was not in an operational state");
}
// Message handlers:
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError> {
// Check sanity of message fields:
if !self.channel_outbound {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer"));
self.their_cur_commitment_point = Some(msg.first_per_commitment_point);
self.their_shutdown_scriptpubkey = their_shutdown_scriptpubkey;
- let obscure_factor = self.get_commitment_transaction_number_obscure_factor();
- let funding_redeemscript = self.get_funding_redeemscript();
- self.channel_monitor.set_basic_channel_info(&msg.htlc_basepoint, &msg.delayed_payment_basepoint, msg.to_self_delay, funding_redeemscript, self.channel_value_satoshis, obscure_factor);
-
self.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
Ok(())
}
- fn funding_created_signature(&mut self, sig: &Signature) -> Result<(Transaction, LocalCommitmentTransaction, Signature, TxCreationKeys), ChannelError<ChanSigner>> {
+ fn funding_created_signature(&mut self, sig: &Signature) -> Result<(Transaction, LocalCommitmentTransaction, Signature, TxCreationKeys), ChannelError> {
let funding_script = self.get_funding_redeemscript();
let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
&self.their_pubkeys.as_ref().expect("their_funding_pubkey() only allowed after accept_channel").funding_pubkey
}
- pub fn funding_created(&mut self, msg: &msgs::FundingCreated) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ pub fn funding_created(&mut self, msg: &msgs::FundingCreated) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError> {
if self.channel_outbound {
return Err(ChannelError::Close("Received funding_created for an outbound channel?"));
}
// channel.
return Err(ChannelError::Close("Received funding_created after we got the channel!"));
}
- if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+ if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
let funding_txo = OutPoint::new(msg.funding_txid, msg.funding_output_index);
- let funding_txo_script = self.get_funding_redeemscript().to_v0_p2wsh();
- self.channel_monitor.set_funding_info((funding_txo, funding_txo_script));
+ self.funding_txo = Some(funding_txo.clone());
let (remote_initial_commitment_tx, local_initial_commitment_tx, our_signature, local_keys) = match self.funding_created_signature(&msg.signature) {
Ok(res) => res,
Err(e) => {
- self.channel_monitor.unset_funding_info();
+ self.funding_txo = None;
return Err(e);
}
};
// Now that we're past error-generating stuff, update our local state:
- self.channel_monitor.provide_latest_remote_commitment_tx_info(&remote_initial_commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
- self.channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx, local_keys, self.feerate_per_kw, Vec::new());
+ let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
+ let funding_redeemscript = self.get_funding_redeemscript();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ macro_rules! create_monitor {
+ () => { {
+ let mut channel_monitor = ChannelMonitor::new(self.local_keys.clone(),
+ &self.shutdown_pubkey, self.our_to_self_delay,
+ &self.destination_script, (funding_txo, funding_txo_script.clone()),
+ &their_pubkeys.htlc_basepoint, &their_pubkeys.delayed_payment_basepoint,
+ self.their_to_self_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
+ self.get_commitment_transaction_number_obscure_factor(),
+ self.logger.clone());
+
+ channel_monitor.provide_latest_remote_commitment_tx_info(&remote_initial_commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+ channel_monitor.provide_latest_local_commitment_tx_info(local_initial_commitment_tx.clone(), local_keys.clone(), self.feerate_per_kw, Vec::new()).unwrap();
+ channel_monitor
+ } }
+ }
+
+ self.channel_monitor = Some(create_monitor!());
+ let channel_monitor = create_monitor!();
+
self.channel_state = ChannelState::FundingSent as u32;
self.channel_id = funding_txo.to_channel_id();
self.cur_remote_commitment_transaction_number -= 1;
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
signature: our_signature
- }, self.channel_monitor.clone()))
+ }, channel_monitor))
}
/// Handles a funding_signed message from the remote end.
/// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed(&mut self, msg: &msgs::FundingSigned) -> Result<ChannelMonitor<ChanSigner>, ChannelError<ChanSigner>> {
+ pub fn funding_signed(&mut self, msg: &msgs::FundingSigned) -> Result<ChannelMonitorUpdate, (Option<ChannelMonitorUpdate>, ChannelError)> {
if !self.channel_outbound {
- return Err(ChannelError::Close("Received funding_signed for an inbound channel?"));
+ return Err((None, ChannelError::Close("Received funding_signed for an inbound channel?")));
}
if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
- return Err(ChannelError::Close("Received funding_signed in strange state!"));
+ return Err((None, ChannelError::Close("Received funding_signed in strange state!")));
}
- if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+ if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER - 1 ||
self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
let funding_script = self.get_funding_redeemscript();
- let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
+ let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number).map_err(|e| (None, e))?;
let local_initial_commitment_tx = self.build_commitment_transaction(self.cur_local_commitment_transaction_number, &local_keys, true, false, self.feerate_per_kw).0;
let local_sighash = hash_to_message!(&bip143::SighashComponents::new(&local_initial_commitment_tx).sighash_all(&local_initial_commitment_tx.input[0], &funding_script, self.channel_value_satoshis)[..]);
let their_funding_pubkey = &self.their_pubkeys.as_ref().unwrap().funding_pubkey;
// They sign the "local" commitment transaction, allowing us to broadcast the tx if we wish.
- secp_check!(self.secp_ctx.verify(&local_sighash, &msg.signature, their_funding_pubkey), "Invalid funding_signed signature from peer");
+ if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, their_funding_pubkey) {
+ return Err((None, ChannelError::Close("Invalid funding_signed signature from peer")));
+ }
- self.channel_monitor.provide_latest_local_commitment_tx_info(
- LocalCommitmentTransaction::new_missing_local_sig(local_initial_commitment_tx, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), their_funding_pubkey),
- local_keys, self.feerate_per_kw, Vec::new());
+ self.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+ commitment_tx: LocalCommitmentTransaction::new_missing_local_sig(local_initial_commitment_tx, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), their_funding_pubkey),
+ local_keys, feerate_per_kw: self.feerate_per_kw, htlc_outputs: Vec::new(),
+ }]
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
self.channel_state = ChannelState::FundingSent as u32 | (self.channel_state & (ChannelState::MonitorUpdateFailed as u32));
self.cur_local_commitment_transaction_number -= 1;
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- Ok(self.channel_monitor.clone())
+ Ok(monitor_update)
} else {
- Err(ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast"))
+ Err((Some(monitor_update),
+ ChannelError::Ignore("Previous monitor update failure prevented funding_signed from allowing funding broadcast")))
}
}
- pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish"));
}
cmp::min(self.value_to_self_msat as i64 - self.get_outbound_pending_htlc_stats().1 as i64, 0) as u64)
}
- pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state"));
}
/// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
#[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError<ChanSigner>> {
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentHash>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
for htlc in self.pending_outbound_htlcs.iter_mut() {
if htlc.htlc_id == htlc_id {
match check_preimage {
Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find"))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError<ChanSigner>> {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<HTLCSource, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state"));
}
self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone())
}
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state"));
}
Ok(())
}
- pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state"));
}
Ok(())
}
- pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &FeeEstimator) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, fee_estimator: &FeeEstimator) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate), (Option<ChannelMonitorUpdate>, ChannelError)> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
- return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state"));
+ return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state")));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish"));
+ return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish")));
}
if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds"));
+ return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds")));
}
let funding_script = self.get_funding_redeemscript();
- let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number)?;
+ let local_keys = self.build_local_transaction_keys(self.cur_local_commitment_transaction_number).map_err(|e| (None, e))?;
let mut update_fee = false;
let feerate_per_kw = if !self.channel_outbound && self.pending_update_fee.is_some() {
let local_commitment_txid = local_commitment_tx.0.txid();
let local_sighash = hash_to_message!(&bip143::SighashComponents::new(&local_commitment_tx.0).sighash_all(&local_commitment_tx.0.input[0], &funding_script, self.channel_value_satoshis)[..]);
log_trace!(self, "Checking commitment tx signature {} by key {} against tx {} with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.their_funding_pubkey().serialize()), encode::serialize_hex(&local_commitment_tx.0), encode::serialize_hex(&funding_script));
- secp_check!(self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey()), "Invalid commitment tx signature from peer");
+ if let Err(_) = self.secp_ctx.verify(&local_sighash, &msg.signature, &self.their_funding_pubkey()) {
+ return Err((None, ChannelError::Close("Invalid commitment tx signature from peer")));
+ }
//If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction
if update_fee {
let total_fee: u64 = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + self.their_channel_reserve_satoshis {
- return Err(ChannelError::Close("Funding remote cannot afford proposed new fee"));
+ return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee")));
}
}
if msg.htlc_signatures.len() != local_commitment_tx.1 {
- return Err(ChannelError::Close("Got wrong number of HTLC signatures from remote"));
+ return Err((None, ChannelError::Close("Got wrong number of HTLC signatures from remote")));
}
let mut htlcs_and_sigs = Vec::with_capacity(local_commitment_tx.2.len());
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &local_keys);
log_trace!(self, "Checking HTLC tx signature {} by key {} against tx {} with redeemscript {}", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(local_keys.b_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), encode::serialize_hex(&htlc_redeemscript));
let htlc_sighash = hash_to_message!(&bip143::SighashComponents::new(&htlc_tx).sighash_all(&htlc_tx.input[0], &htlc_redeemscript, htlc.amount_msat / 1000)[..]);
- secp_check!(self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key), "Invalid HTLC tx signature from peer");
+ if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &local_keys.b_htlc_key) {
+ return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer")));
+ }
htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
} else {
htlcs_and_sigs.push((htlc, None, source));
let their_funding_pubkey = self.their_pubkeys.as_ref().unwrap().funding_pubkey;
- self.channel_monitor.provide_latest_local_commitment_tx_info(
- LocalCommitmentTransaction::new_missing_local_sig(local_commitment_tx.0, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), &their_funding_pubkey),
- local_keys, self.feerate_per_kw, htlcs_and_sigs);
+ self.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+ commitment_tx: LocalCommitmentTransaction::new_missing_local_sig(local_commitment_tx.0, &msg.signature, &PublicKey::from_secret_key(&self.secp_ctx, self.local_keys.funding_key()), &their_funding_pubkey),
+ local_keys, feerate_per_kw: self.feerate_per_kw, htlc_outputs: htlcs_and_sigs
+ }]
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
for htlc in self.pending_inbound_htlcs.iter_mut() {
let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
// If we were going to send a commitment_signed after the RAA, go ahead and do all
// the corresponding HTLC status updates so that get_last_commitment_update
// includes the right HTLCs.
- // Note that this generates a monitor update that we ignore! This is OK since we
- // won't actually send the commitment_signed that generated the update to the other
- // side until the latest monitor has been pulled from us and stored.
self.monitor_pending_commitment_signed = true;
- self.send_commitment_no_status_check()?;
+ let (_, mut additional_update) = self.send_commitment_no_status_check().map_err(|e| (None, e))?;
+ // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
}
// TODO: Call maybe_propose_first_closing_signed on restoration (or call it here and
// re-send the message on restoration)
- return Err(ChannelError::Ignore("Previous monitor update failure prevented generation of RAA"));
+ return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA")));
}
- let (our_commitment_signed, monitor_update, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ let (our_commitment_signed, closing_signed) = if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
// If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
// we'll send one right away when we get the revoke_and_ack when we
// free_holding_cell_htlcs().
- let (msg, monitor) = self.send_commitment_no_status_check()?;
- (Some(msg), monitor, None)
+ let (msg, mut additional_update) = self.send_commitment_no_status_check().map_err(|e| (None, e))?;
+ // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ (Some(msg), None)
} else if !need_our_commitment {
- (None, self.channel_monitor.clone(), self.maybe_propose_first_closing_signed(fee_estimator))
- } else { (None, self.channel_monitor.clone(), None) };
+ (None, self.maybe_propose_first_closing_signed(fee_estimator))
+ } else { (None, None) };
Ok((msgs::RevokeAndACK {
channel_id: self.channel_id,
/// Used to fulfill holding_cell_htlcs when we get a remote ack (or implicitly get it by them
/// fulfilling or failing the last pending HTLC)
- fn free_holding_cell_htlcs(&mut self) -> Result<Option<(msgs::CommitmentUpdate, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+ fn free_holding_cell_htlcs(&mut self) -> Result<Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, ChannelError> {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() {
log_trace!(self, "Freeing holding cell with {} HTLC updates{}", self.holding_cell_htlc_updates.len(), if self.holding_cell_update_fee.is_some() { " and a fee update" } else { "" });
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
+
let mut htlc_updates = Vec::new();
mem::swap(&mut htlc_updates, &mut self.holding_cell_htlc_updates);
let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
match self.get_update_fulfill_htlc(htlc_id, *payment_preimage) {
- Ok(update_fulfill_msg_option) => update_fulfill_htlcs.push(update_fulfill_msg_option.0.unwrap()),
+ Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => {
+ update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+ if let Some(mut additional_monitor_update) = additional_monitor_update_opt {
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
+ }
+ },
Err(e) => {
if let ChannelError::Ignore(_) = e {}
else {
} else {
None
};
- let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
+
+ let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check()?;
+ // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
Ok(Some((msgs::CommitmentUpdate {
update_add_htlcs,
update_fulfill_htlcs,
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &FeeEstimator) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, fee_estimator: &FeeEstimator) -> Result<(Option<msgs::CommitmentUpdate>, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<msgs::ClosingSigned>, ChannelMonitorUpdate), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state"));
}
return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey"));
}
}
- self.channel_monitor.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
- .map_err(|e| ChannelError::Close(e.0))?;
if self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
// Our counterparty seems to have burned their coins to us (by revoking a state when we
return Err(ChannelError::Close("Received an unexpected revoke_and_ack"));
}
+ self.commitment_secrets.provide_secret(self.cur_remote_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one"))?;
+ self.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.cur_remote_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
+
// Update state now that we've passed all the can-fail calls...
// (note that we may still fail to generate the new commitment_signed message, but that's
// OK, we step the channel here and *then* if the new generation fails we can fail the
// When the monitor updating is restored we'll call get_last_commitment_update(),
// which does not update state, but we're definitely now awaiting a remote revoke
// before we can step forward any more, so set it here.
- self.send_commitment_no_status_check()?;
+ let (_, mut additional_update) = self.send_commitment_no_status_check()?;
+ // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
}
self.monitor_pending_forwards.append(&mut to_forward_infos);
self.monitor_pending_failures.append(&mut revoked_htlcs);
- return Ok((None, Vec::new(), Vec::new(), None, self.channel_monitor.clone()));
+ return Ok((None, Vec::new(), Vec::new(), None, monitor_update))
}
match self.free_holding_cell_htlcs()? {
- Some(mut commitment_update) => {
- commitment_update.0.update_fail_htlcs.reserve(update_fail_htlcs.len());
+ Some((mut commitment_update, mut additional_update)) => {
+ commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len());
for fail_msg in update_fail_htlcs.drain(..) {
- commitment_update.0.update_fail_htlcs.push(fail_msg);
+ commitment_update.update_fail_htlcs.push(fail_msg);
}
- commitment_update.0.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
+ commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len());
for fail_msg in update_fail_malformed_htlcs.drain(..) {
- commitment_update.0.update_fail_malformed_htlcs.push(fail_msg);
+ commitment_update.update_fail_malformed_htlcs.push(fail_msg);
}
- Ok((Some(commitment_update.0), to_forward_infos, revoked_htlcs, None, commitment_update.1))
+
+ // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ Ok((Some(commitment_update), to_forward_infos, revoked_htlcs, None, monitor_update))
},
None => {
if require_commitment {
- let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
+ let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check()?;
+
+ // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
Ok((Some(msgs::CommitmentUpdate {
update_add_htlcs: Vec::new(),
update_fulfill_htlcs: Vec::new(),
commitment_signed
}), to_forward_infos, revoked_htlcs, None, monitor_update))
} else {
- Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), self.channel_monitor.clone()))
+ Ok((None, to_forward_infos, revoked_htlcs, self.maybe_propose_first_closing_signed(fee_estimator), monitor_update))
}
}
}
})
}
- pub fn send_update_fee_and_commit(&mut self, feerate_per_kw: u64) -> Result<Option<(msgs::UpdateFee, msgs::CommitmentSigned, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+ pub fn send_update_fee_and_commit(&mut self, feerate_per_kw: u64) -> Result<Option<(msgs::UpdateFee, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> {
match self.send_update_fee(feerate_per_kw) {
Some(update_fee) => {
let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
/// which failed. The messages which were generated from that call which generated the
/// monitor update failure must *not* have been sent to the remote end, and must instead
/// have been dropped. They will be regenerated when monitor_updating_restored is called.
- pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
+ pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
self.monitor_pending_revoke_and_ack = resend_raa;
self.monitor_pending_commitment_signed = resend_commitment;
/// Indicates that the latest ChannelMonitor update has been committed by the client
/// successfully and we should restore normal operation. Returns messages which should be sent
/// to the remote side.
- pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingForwardHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
+ pub fn monitor_updating_restored(&mut self) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
(raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
}
- pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError<ChanSigner>> {
+ pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> {
if self.channel_outbound {
return Err(ChannelError::Close("Non-funding remote tried to update channel fee"));
}
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
- pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor<ChanSigner>>, RAACommitmentOrder, Option<msgs::Shutdown>), ChannelError<ChanSigner>> {
+ pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitorUpdate>, RAACommitmentOrder, Option<msgs::Shutdown>), ChannelError> {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
// While BOLT 2 doesn't indicate explicitly we should error this channel here, it
// almost certainly indicates we are going to end up out-of-sync in some way, so we
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
- self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
- return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())});
+ self.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo {
+ their_current_per_commitment_point: data_loss.my_current_per_commitment_point
+ }]
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
+ return Err(ChannelError::CloseDelayBroadcast {
+ msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting",
+ update: monitor_update
+ });
}
},
OptionalField::Absent => {}
match self.free_holding_cell_htlcs() {
Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
- Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
+ Ok(Some((commitment_update, monitor_update))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(monitor_update), self.resend_order.clone(), shutdown_msg)),
Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
}
} else {
})
}
- pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError<ChanSigner>> {
+ pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError> {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish"));
}
tx.input[0].witness.push(self.get_funding_redeemscript().into_bytes());
}
- pub fn closing_signed(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError<ChanSigner>> {
+ pub fn closing_signed(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError> {
if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown"));
}
if self.channel_state < ChannelState::FundingCreated as u32 {
panic!("Can't get a channel monitor until funding has been created");
}
- &mut self.channel_monitor
+ self.channel_monitor.as_mut().unwrap()
}
/// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
/// Returns the funding_txo we either got from our peer, or were given by
/// get_outbound_funding_created.
pub fn get_funding_txo(&self) -> Option<OutPoint> {
- self.channel_monitor.get_funding_txo()
+ self.funding_txo
}
/// Allowed in any state (including after shutdown)
self.channel_update_count
}
+ pub fn get_latest_monitor_update_id(&self) -> u64 {
+ self.latest_monitor_update_id
+ }
+
pub fn should_announce(&self) -> bool {
self.config.announced_channel
}
}
if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
for (ref tx, index_in_block) in txn_matched.iter().zip(indexes_of_txn_matched) {
- if tx.txid() == self.channel_monitor.get_funding_txo().unwrap().txid {
- let txo_idx = self.channel_monitor.get_funding_txo().unwrap().index as usize;
+ if tx.txid() == self.funding_txo.unwrap().txid {
+ let txo_idx = self.funding_txo.unwrap().index as usize;
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
tx.output[txo_idx].value != self.channel_value_satoshis {
if self.channel_outbound {
}
if header.bitcoin_hash() != self.last_block_connected {
self.last_block_connected = header.bitcoin_hash();
- self.channel_monitor.last_block_hash = self.last_block_connected;
+ if let Some(channel_monitor) = self.channel_monitor.as_mut() {
+ channel_monitor.last_block_hash = self.last_block_connected;
+ }
if self.funding_tx_confirmations > 0 {
if self.funding_tx_confirmations == self.minimum_depth as u64 {
let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
}
self.last_block_connected = header.bitcoin_hash();
- self.channel_monitor.last_block_hash = self.last_block_connected;
+ if let Some(channel_monitor) = self.channel_monitor.as_mut() {
+ channel_monitor.last_block_hash = self.last_block_connected;
+ }
false
}
}
/// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
- fn get_outbound_funding_created_signature(&mut self) -> Result<(Signature, Transaction), ChannelError<ChanSigner>> {
+ fn get_outbound_funding_created_signature(&mut self) -> Result<(Signature, Transaction), ChannelError> {
let remote_keys = self.build_remote_transaction_keys()?;
let remote_initial_commitment_tx = self.build_commitment_transaction(self.cur_remote_commitment_transaction_number, &remote_keys, false, false, self.feerate_per_kw).0;
Ok((self.local_keys.sign_remote_commitment(self.feerate_per_kw, &remote_initial_commitment_tx, &remote_keys, &Vec::new(), self.our_to_self_delay, &self.secp_ctx)
/// Note that channel_id changes during this call!
/// Do NOT broadcast the funding transaction until after a successful funding_signed call!
/// If an Err is returned, it is a ChannelError::Close.
- pub fn get_outbound_funding_created(&mut self, funding_txo: OutPoint) -> Result<(msgs::FundingCreated, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ pub fn get_outbound_funding_created(&mut self, funding_txo: OutPoint) -> Result<(msgs::FundingCreated, ChannelMonitor<ChanSigner>), ChannelError> {
if !self.channel_outbound {
panic!("Tried to create outbound funding_created message on an inbound channel!");
}
if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
}
- if self.channel_monitor.get_min_seen_secret() != (1 << 48) ||
+ if self.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
self.cur_remote_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
self.cur_local_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- let funding_txo_script = self.get_funding_redeemscript().to_v0_p2wsh();
- self.channel_monitor.set_funding_info((funding_txo, funding_txo_script));
-
+ self.funding_txo = Some(funding_txo.clone());
let (our_signature, commitment_tx) = match self.get_outbound_funding_created_signature() {
Ok(res) => res,
Err(e) => {
log_error!(self, "Got bad signatures: {:?}!", e);
- self.channel_monitor.unset_funding_info();
+ self.funding_txo = None;
return Err(e);
}
};
let temporary_channel_id = self.channel_id;
// Now that we're past error-generating stuff, update our local state:
- self.channel_monitor.provide_latest_remote_commitment_tx_info(&commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+
+ let their_pubkeys = self.their_pubkeys.as_ref().unwrap();
+ let funding_redeemscript = self.get_funding_redeemscript();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ macro_rules! create_monitor {
+ () => { {
+ let mut channel_monitor = ChannelMonitor::new(self.local_keys.clone(),
+ &self.shutdown_pubkey, self.our_to_self_delay,
+ &self.destination_script, (funding_txo, funding_txo_script.clone()),
+ &their_pubkeys.htlc_basepoint, &their_pubkeys.delayed_payment_basepoint,
+ self.their_to_self_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
+ self.get_commitment_transaction_number_obscure_factor(),
+ self.logger.clone());
+
+ channel_monitor.provide_latest_remote_commitment_tx_info(&commitment_tx, Vec::new(), self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+ channel_monitor
+ } }
+ }
+
+ self.channel_monitor = Some(create_monitor!());
+ let channel_monitor = create_monitor!();
+
self.channel_state = ChannelState::FundingCreated as u32;
self.channel_id = funding_txo.to_channel_id();
self.cur_remote_commitment_transaction_number -= 1;
funding_txid: funding_txo.txid,
funding_output_index: funding_txo.index,
signature: our_signature
- }, self.channel_monitor.clone()))
+ }, channel_monitor))
}
/// Gets an UnsignedChannelAnnouncement, as well as a signature covering it using our
/// closing).
/// Note that the "channel must be funded" requirement is stricter than BOLT 7 requires - see
/// https://github.com/lightningnetwork/lightning-rfc/issues/468
- pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError<ChanSigner>> {
+ pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError> {
if !self.config.announced_channel {
return Err(ChannelError::Ignore("Channel is not available for public announcements"));
}
assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
- let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
+ let remote_last_secret = self.commitment_secrets.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
OptionalField::Present(DataLossProtect {
your_last_per_commitment_secret: remote_last_secret,
/// HTLCs on the wire or we wouldn't be able to determine what they actually ACK'ed.
/// You MUST call send_commitment prior to any other calls on this Channel
/// If an Err is returned, it's a ChannelError::Ignore!
- pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError<ChanSigner>> {
+ pub fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelFunded as u32) {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down"));
}
/// Always returns a ChannelError::Close if an immediately-preceding (read: the
/// last call to this Channel) send_htlc returned Ok(Some(_)) and there is an Err.
/// May panic if called except immediately after a successful, Ok(Some(_))-returning send_htlc.
- pub fn send_commitment(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ pub fn send_commitment(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
panic!("Cannot create commitment tx until channel is fully established");
}
self.send_commitment_no_status_check()
}
/// Only fails in case of bad keys
- fn send_commitment_no_status_check(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitor<ChanSigner>), ChannelError<ChanSigner>> {
+ fn send_commitment_no_status_check(&mut self) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> {
// We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
// fail to generate this, we still are at least at a position where upgrading their status
// is acceptable.
let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() {
Ok((res, (remote_commitment_tx, mut htlcs))) => {
// Update state now that we've passed all the can-fail calls...
- let htlcs_no_ref = htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+ let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+ htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
(res, remote_commitment_tx, htlcs_no_ref)
},
Err(e) => return Err(e),
};
- self.channel_monitor.provide_latest_remote_commitment_tx_info(&remote_commitment_tx, htlcs, self.cur_remote_commitment_transaction_number, self.their_cur_commitment_point.unwrap());
+ self.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo {
+ unsigned_commitment_tx: remote_commitment_tx.clone(),
+ htlc_outputs: htlcs.clone(),
+ commitment_number: self.cur_remote_commitment_transaction_number,
+ their_revocation_point: self.their_cur_commitment_point.unwrap()
+ }]
+ };
+ self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone()).unwrap();
self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
- Ok((res, self.channel_monitor.clone()))
+ Ok((res, monitor_update))
}
/// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
/// when we shouldn't change HTLC/channel state.
- fn send_commitment_no_state_update(&self) -> Result<(msgs::CommitmentSigned, (Transaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError<ChanSigner>> {
+ fn send_commitment_no_state_update(&self) -> Result<(msgs::CommitmentSigned, (Transaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> {
let mut feerate_per_kw = self.feerate_per_kw;
if let Some(feerate) = self.pending_update_fee {
if self.channel_outbound {
/// to send to the remote peer in one go.
/// Shorthand for calling send_htlc() followed by send_commitment(), see docs on those for
/// more info.
- pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitor<ChanSigner>)>, ChannelError<ChanSigner>> {
+ pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned, ChannelMonitorUpdate)>, ChannelError> {
match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet)? {
Some(update_add_htlc) => {
let (commitment_signed, monitor_update) = self.send_commitment_no_status_check()?;
self.channel_state = ChannelState::ShutdownComplete as u32;
self.channel_update_count += 1;
- (self.channel_monitor.get_latest_local_commitment_txn(), dropped_outbound_htlcs)
+ if self.channel_monitor.is_some() {
+ (self.channel_monitor.as_mut().unwrap().get_latest_local_commitment_txn(), dropped_outbound_htlcs)
+ } else {
+ // We aren't even signed funding yet, so can't broadcast anything
+ (Vec::new(), dropped_outbound_htlcs)
+ }
}
}
self.channel_outbound.write(writer)?;
self.channel_value_satoshis.write(writer)?;
+ self.latest_monitor_update_id.write(writer)?;
+
self.local_keys.write(writer)?;
self.shutdown_pubkey.write(writer)?;
+ self.destination_script.write(writer)?;
self.cur_local_commitment_transaction_number.write(writer)?;
self.cur_remote_commitment_transaction_number.write(writer)?;
None => 0u8.write(writer)?,
}
+ write_option!(self.funding_txo);
write_option!(self.funding_tx_confirmed_in);
write_option!(self.short_channel_id);
write_option!(self.their_shutdown_scriptpubkey);
- self.channel_monitor.write_for_disk(writer)?;
+ self.commitment_secrets.write(writer)?;
+
+ self.channel_monitor.as_ref().unwrap().write_for_disk(writer)?;
Ok(())
}
}
let channel_outbound = Readable::read(reader)?;
let channel_value_satoshis = Readable::read(reader)?;
+ let latest_monitor_update_id = Readable::read(reader)?;
+
let local_keys = Readable::read(reader)?;
let shutdown_pubkey = Readable::read(reader)?;
+ let destination_script = Readable::read(reader)?;
let cur_local_commitment_transaction_number = Readable::read(reader)?;
let cur_remote_commitment_transaction_number = Readable::read(reader)?;
_ => return Err(DecodeError::InvalidValue),
};
+ let funding_txo = Readable::read(reader)?;
let funding_tx_confirmed_in = Readable::read(reader)?;
let short_channel_id = Readable::read(reader)?;
let their_node_id = Readable::read(reader)?;
let their_shutdown_scriptpubkey = Readable::read(reader)?;
+ let commitment_secrets = Readable::read(reader)?;
+
let (monitor_last_block, channel_monitor) = ReadableArgs::read(reader, logger.clone())?;
// We drop the ChannelMonitor's last block connected hash cause we don't actually bother
// doing full block connection operations on the internal ChannelMonitor copies
secp_ctx: Secp256k1::new(),
channel_value_satoshis,
+ latest_monitor_update_id,
+
local_keys,
shutdown_pubkey,
+ destination_script,
cur_local_commitment_transaction_number,
cur_remote_commitment_transaction_number,
last_sent_closing_fee,
+ funding_txo,
funding_tx_confirmed_in,
short_channel_id,
last_block_connected,
their_shutdown_scriptpubkey,
- channel_monitor,
+ channel_monitor: Some(channel_monitor),
+ commitment_secrets,
network_sync: UpdateStatus::Fresh,
chan.our_dust_limit_satoshis = 546;
let funding_info = OutPoint::new(Sha256dHash::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), 0);
- chan.channel_monitor.set_funding_info((funding_info, Script::new()));
+ chan.funding_txo = Some(funding_info);
let their_pubkeys = ChannelPublicKeys {
funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
// forward the HTLC with information it will give back to us when it does so, or if it should Fail
// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
//
-// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
-// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
-// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
-// the HTLC backwards along the relevant path).
+// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
+// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
+// with it to track where it came from (in case of onwards-forward error), waiting a random delay
+// before we forward it.
+//
+// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
+// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
+// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
// our payment, which we can use to decode errors or inform the user that the payment was sent.
-/// Stores the info we will need to send when we want to forward an HTLC onwards
+
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
-pub(super) struct PendingForwardHTLCInfo {
+pub(super) struct PendingHTLCInfo {
onion_packet: Option<msgs::OnionPacket>,
incoming_shared_secret: [u8; 32],
payment_hash: PaymentHash,
/// Stores whether we can't forward an HTLC or relevant forwarding info
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub(super) enum PendingHTLCStatus {
- Forward(PendingForwardHTLCInfo),
+ Forward(PendingHTLCInfo),
Fail(HTLCFailureMsg),
}
+pub(super) enum HTLCForwardInfo {
+ AddHTLC {
+ prev_short_channel_id: u64,
+ prev_htlc_id: u64,
+ forward_info: PendingHTLCInfo,
+ },
+ FailHTLC {
+ htlc_id: u64,
+ err_packet: msgs::OnionErrorPacket,
+ },
+}
+
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, PartialEq)]
pub(super) struct HTLCPreviousHopData {
}
}
#[inline]
- fn from_chan_no_close<ChanSigner: ChannelKeys>(err: ChannelError<ChanSigner>, channel_id: [u8; 32]) -> Self {
+ fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
Self {
err: match err {
ChannelError::Ignore(msg) => LightningError {
/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
-pub(super) enum HTLCForwardInfo {
- AddHTLC {
- prev_short_channel_id: u64,
- prev_htlc_id: u64,
- forward_info: PendingForwardHTLCInfo,
- },
- FailHTLC {
- htlc_id: u64,
- err_packet: msgs::OnionErrorPacket,
- },
-}
-
/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
/// be sent in the order they appear in the return value, however sometimes the order needs to be
/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the existence of a channel with the short id here, nor the short
- /// ids in the PendingForwardHTLCInfo!
+ /// ids in the PendingHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
/// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
/// can be failed/claimed by the user
///
/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
-/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
+/// returning from ManyChannelMonitor::add_/update_monitor, with ChannelManagers, writing updates
/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
/// the serialization process). If the deserialized version is out-of-date compared to the
/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
match $res {
Ok(res) => res,
Err(ChannelError::Ignore(msg)) => {
- break Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore(msg), $entry.key().clone()))
+ break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
},
Err(ChannelError::Close(msg)) => {
log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
match $res {
Ok(res) => res,
Err(ChannelError::Ignore(msg)) => {
- return Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore(msg), $entry.key().clone()))
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
},
Err(ChannelError::Close(msg)) => {
log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
if let Some(short_id) = chan.get_short_channel_id() {
$channel_state.short_to_id.remove(&short_id);
}
- if let Some(update) = update {
- if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update.clone()) {
- match e {
- // Upstream channel is dead, but we want at least to fail backward HTLCs to save
- // downstream channels. In case of PermanentFailure, we are not going to be able
- // to claim back to_remote output on remote commitment transaction. Doesn't
- // make a difference here, we are concern about HTLCs circuit, not onchain funds.
- ChannelMonitorUpdateErr::PermanentFailure => {},
- ChannelMonitorUpdateErr::TemporaryFailure => {},
- }
+ if let Err(e) = $self.monitor.update_monitor(chan.get_funding_txo().unwrap(), update) {
+ match e {
+ // Upstream channel is dead, but we want at least to fail backward HTLCs to save
+ // downstream channels. In case of PermanentFailure, we are not going to be able
+ // to claim back to_remote output on remote commitment transaction. Doesn't
+ // make a difference here, we are concern about HTLCs circuit, not onchain funds.
+ ChannelMonitorUpdateErr::PermanentFailure => {},
+ ChannelMonitorUpdateErr::TemporaryFailure => {},
}
}
let mut shutdown_res = chan.force_shutdown();
} else if $resend_commitment { "commitment" }
else if $resend_raa { "RAA" }
else { "nothing" },
- (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
+ (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
if !$resend_commitment {
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
$entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- Err(MsgHandleErrInternal::from_chan_no_close::<ChanSigner>(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
+ Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
},
}
}
// instead we stay symmetric with the forwarding case, only responding (after a
// delay) once they've send us a commitment_signed!
- PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
onion_packet: None,
payment_hash: msg.payment_hash.clone(),
short_channel_id: 0,
},
};
- PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
onion_packet: Some(outgoing_packet),
payment_hash: msg.payment_hash.clone(),
short_channel_id: short_channel_id,
};
channel_state = Some(self.channel_state.lock().unwrap());
- if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
let forwarding_id = match id_option {
first_hop_htlc_msat: htlc_msat,
}, onion_packet), channel_state, chan)
} {
- Some((update_add, commitment_signed, chan_monitor)) => {
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ Some((update_add, commitment_signed, monitor_update)) => {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
// Note that MonitorUpdateFailed here indicates (per function docs)
// that we will resent the commitment update once we unfree monitor
}
};
// Because we have exclusive ownership of the channel here we can release the channel_state
- // lock before add_update_monitor
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ // lock before add_monitor
+ if let Err(e) = self.monitor.add_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
match e {
ChannelMonitorUpdateErr::PermanentFailure => {
{
}
if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
- let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
+ let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment() {
Ok(res) => res,
Err(e) => {
// We surely failed send_commitment due to bad keys, in that case
}
}
};
- if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
continue;
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
Ok((msgs, monitor_option)) => {
- if let Some(chan_monitor) = monitor_option {
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if let Some(monitor_update) = monitor_option {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
if was_frozen_for_monitor {
assert!(msgs.is_none());
} else {
PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
}
- /// Used to restore channels to normal operation after a
+ /// Restores a single, given channel to normal operation after a
/// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
/// operation.
- pub fn test_restore_channel_monitor(&self) {
+ ///
+ /// All ChannelMonitor updates up to and including highest_applied_update_id must have been
+ /// fully committed in every copy of the given channels' ChannelMonitors.
+ ///
+ /// Note that there is no effect to calling with a highest_applied_update_id other than the
+ /// current latest ChannelMonitorUpdate and one call to this function after multiple
+ /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field
+ /// exists largely only to prevent races between this and concurrent update_monitor calls.
+ ///
+ /// Thus, the anticipated use is, at a high level:
+ /// 1) You register a ManyChannelMonitor with this ChannelManager,
+ /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of
+ /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures
+ /// any time it cannot do so instantly,
+ /// 3) update(s) are applied to each remote copy of a ChannelMonitor,
+ /// 4) once all remote copies are updated, you call this function with the update_id that
+ /// completed, and once it is the latest the Channel will be re-enabled.
+ pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
let mut close_results = Vec::new();
let mut htlc_forwards = Vec::new();
let mut htlc_failures = Vec::new();
let mut pending_events = Vec::new();
- let _ = self.total_consistency_lock.read().unwrap();
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
- channel_state.by_id.retain(|_, channel| {
- if channel.is_awaiting_monitor_update() {
- let chan_monitor = channel.channel_monitor().clone();
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
- match e {
- ChannelMonitorUpdateErr::PermanentFailure => {
- // TODO: There may be some pending HTLCs that we intended to fail
- // backwards when a monitor update failed. We should make sure
- // knowledge of those gets moved into the appropriate in-memory
- // ChannelMonitor and they get failed backwards once we get
- // on-chain confirmations.
- // Note I think #198 addresses this, so once it's merged a test
- // should be written.
- if let Some(short_id) = channel.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- close_results.push(channel.force_shutdown());
- if let Ok(update) = self.get_channel_update(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- false
- },
- ChannelMonitorUpdateErr::TemporaryFailure => true,
- }
- } else {
- let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
- if !pending_forwards.is_empty() {
- htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
- }
- htlc_failures.append(&mut pending_failures);
+ let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
+ Some(chan) => chan,
+ None => return,
+ };
+ if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+ return;
+ }
- macro_rules! handle_cs { () => {
- if let Some(update) = commitment_update {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: channel.get_their_node_id(),
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = raa {
- pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: channel.get_their_node_id(),
- msg: revoke_and_ack,
- });
- }
- } }
- match order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if needs_broadcast_safe {
- pending_events.push(events::Event::FundingBroadcastSafe {
- funding_txo: channel.get_funding_txo().unwrap(),
- user_channel_id: channel.get_user_id(),
- });
- }
- if let Some(msg) = funding_locked {
- pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: channel.get_their_node_id(),
- msg,
- });
- if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_their_node_id(),
- msg: announcement_sigs,
- });
- }
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
- }
- true
- }
- } else { true }
- });
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
+ if !pending_forwards.is_empty() {
+ htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
+ }
+ htlc_failures.append(&mut pending_failures);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: channel.get_their_node_id(),
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: channel.get_their_node_id(),
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ if needs_broadcast_safe {
+ pending_events.push(events::Event::FundingBroadcastSafe {
+ funding_txo: channel.get_funding_txo().unwrap(),
+ user_channel_id: channel.get_user_id(),
+ });
+ }
+ if let Some(msg) = funding_locked {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: channel.get_their_node_id(),
+ msg,
+ });
+ if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.get_their_node_id(),
+ msg: announcement_sigs,
+ });
+ }
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ }
}
self.pending_events.lock().unwrap().append(&mut pending_events);
}
};
// Because we have exclusive ownership of the channel here we can release the channel_state
- // lock before add_update_monitor
- if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
+ // lock before add_monitor
+ if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
match e {
ChannelMonitorUpdateErr::PermanentFailure => {
// Note that we reply with the new channel_id in error messages if we gave up on the
if chan.get().get_their_node_id() != *their_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ let monitor_update = match chan.get_mut().funding_signed(&msg) {
+ Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err((Some(monitor_update), e)) => {
+ assert!(chan.get().is_awaiting_monitor_update());
+ let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update);
+ try_chan_entry!(self, Err(e), channel_state, chan);
+ unreachable!();
+ },
+ Ok(update) => update,
+ };
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
}
(chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
- if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+ if let PendingHTLCStatus::Forward(PendingHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
let chan_update = self.get_channel_update(chan.get());
pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
if (msg.failure_code & 0x8000) == 0 {
- let chan_err: ChannelError<ChanSigner> = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
+ let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set");
try_chan_entry!(self, Err(chan_err), channel_state, chan);
}
try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
if chan.get().get_their_node_id() != *their_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
- try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
+ match chan.get_mut().commitment_signed(&msg, &*self.fee_estimator) {
+ Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err((Some(update), e)) => {
+ assert!(chan.get().is_awaiting_monitor_update());
+ let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), update);
+ try_chan_entry!(self, Err(e), channel_state, chan);
+ unreachable!();
+ },
+ Ok(res) => res
+ };
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
//TODO: Rebroadcast closing_signed if present on monitor update restoration
}
}
#[inline]
- fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingHTLCInfo, u64)>)]) {
for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
+ let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update) =
try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
- if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
if was_frozen_for_monitor {
assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
- let chan_err: ChannelError<ChanSigner> = ChannelError::Close("Bad announcement_signatures node_signature");
+ let chan_err: ChannelError = ChannelError::Close("Bad announcement_signatures node_signature");
try_chan_entry!(self, Err(chan_err), channel_state, chan);
}
if chan.get().get_their_node_id() != *their_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) =
+ let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
- if let Some(monitor) = channel_monitor {
- if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ if let Some(monitor_update) = monitor_update_opt {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
// channel_reestablish doesn't guarantee the order it returns is sensical
// for the messages it returns, but if we're setting what messages to
// re-transmit on monitor update success, we need to make sure it is sane.
return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
}
their_node_id = chan.get().get_their_node_id();
- if let Some((update_fee, commitment_signed, chan_monitor)) =
+ if let Some((update_fee, commitment_signed, monitor_update)) =
break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan)
{
- if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if let Err(_e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
unimplemented!();
}
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl Writeable for PendingForwardHTLCInfo {
+impl Writeable for PendingHTLCInfo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
self.onion_packet.write(writer)?;
self.incoming_shared_secret.write(writer)?;
}
}
-impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
- fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
- Ok(PendingForwardHTLCInfo {
+impl<R: ::std::io::Read> Readable<R> for PendingHTLCInfo {
+ fn read(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
+ Ok(PendingHTLCInfo {
onion_packet: Readable::read(reader)?,
incoming_shared_secret: Readable::read(reader)?,
payment_hash: Readable::read(reader)?,
return Err(DecodeError::InvalidValue);
}
- let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
- channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
+ channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() ||
+ channel.get_latest_monitor_update_id() != monitor.get_latest_update_id() {
let mut force_close_res = channel.force_shutdown();
force_close_res.0 = monitor.get_latest_local_commitment_txn();
closed_channels.push(force_close_res);
use ln::msgs::DecodeError;
use ln::chan_utils;
-use ln::chan_utils::{HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, HTLCOutputInCommitment, LocalCommitmentTransaction, HTLCType};
use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator, ConfirmationTarget, MIN_RELAY_FEE_SAT_PER_1000_WEIGHT};
use chain::transaction::OutPoint;
use std::{hash,cmp, mem};
use std::ops::Deref;
+/// An update generated by the underlying Channel itself which contains some new information the
+/// ChannelMonitor should be made aware of.
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+#[must_use]
+pub struct ChannelMonitorUpdate {
+ pub(super) updates: Vec<ChannelMonitorUpdateStep>,
+ /// The sequence number of this update. Updates *must* be replayed in-order according to this
+ /// sequence number (and updates may panic if they are not). The update_id values are strictly
+ /// increasing and increase by one for each new update.
+ ///
+ /// This sequence number is also used to track up to which points updates which returned
+ /// ChannelMonitorUpdateErr::TemporaryFailure have been applied to all copies of a given
+ /// ChannelMonitor when ChannelManager::channel_monitor_updated is called.
+ pub update_id: u64,
+}
+
+impl Writeable for ChannelMonitorUpdate {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ self.update_id.write(w)?;
+ (self.updates.len() as u64).write(w)?;
+ for update_step in self.updates.iter() {
+ update_step.write(w)?;
+ }
+ Ok(())
+ }
+}
+impl<R: ::std::io::Read> Readable<R> for ChannelMonitorUpdate {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ let update_id: u64 = Readable::read(r)?;
+ let len: u64 = Readable::read(r)?;
+ let mut updates = Vec::with_capacity(cmp::min(len as usize, MAX_ALLOC_SIZE / ::std::mem::size_of::<ChannelMonitorUpdateStep>()));
+ for _ in 0..len {
+ updates.push(Readable::read(r)?);
+ }
+ Ok(Self { update_id, updates })
+ }
+}
+
/// An error enum representing a failure to persist a channel monitor update.
#[derive(Clone)]
pub enum ChannelMonitorUpdateErr {
/// our state failed, but is expected to succeed at some point in the future).
///
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
- /// submitting new commitment transactions to the remote party.
- /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
- /// the channel to an operational state.
+ /// submitting new commitment transactions to the remote party. Once the update(s) which failed
+ /// have been successfully applied, ChannelManager::channel_monitor_updated can be used to
+ /// restore the channel to an operational state.
///
- /// Note that continuing to operate when no copy of the updated ChannelMonitor could be
- /// persisted is unsafe - if you failed to store the update on your own local disk you should
- /// instead return PermanentFailure to force closure of the channel ASAP.
+ /// Note that a given ChannelManager will *never* re-generate a given ChannelMonitorUpdate. If
+ /// you return a TemporaryFailure you must ensure that it is written to disk safely before
+ /// writing out the latest ChannelManager state.
///
/// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
/// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
/// been "frozen".
///
/// Note that even if updates made after TemporaryFailure succeed you must still call
- /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal
- /// channel operation.
+ /// channel_monitor_updated to ensure you have the latest monitor and re-enable normal channel
+ /// operation.
+ ///
+ /// Note that the update being processed here will not be replayed for you when you call
+ /// ChannelManager::channel_monitor_updated, so you must store the update itself along
+ /// with the persisted ChannelMonitor on your own local disk prior to returning a
+ /// TemporaryFailure. You may, of course, employ a journaling approach, storing only the
+ /// ChannelMonitorUpdate on disk without updating the monitor itself, replaying the journal at
+ /// reload-time.
///
/// For deployments where a copy of ChannelMonitors and other local state are backed up in a
/// remote location (with local copies persisted immediately), it is anticipated that all
}
/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
-/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::insert_combine this
-/// means you tried to merge two monitors for different channels or for a channel which was
-/// restored from a backup and then generated new commitment updates.
+/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::update_monitor this
+/// means you tried to update a monitor for a different channel or the ChannelMonitorUpdate was
+/// corrupted.
/// Contains a human-readable error message.
#[derive(Debug)]
pub struct MonitorUpdateError(pub &'static str);
/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
-/// events to it, while also taking any add_update_monitor events and passing them to some remote
+/// events to it, while also taking any add/update_monitor events and passing them to some remote
/// server(s).
///
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
/// BlockNotifier and call the BlockNotifier's `block_(dis)connected` methods, which will notify
/// all registered listeners in one go.
pub trait ManyChannelMonitor<ChanSigner: ChannelKeys>: Send + Sync {
- /// Adds or updates a monitor for the given `funding_txo`.
+ /// Adds a monitor for the given `funding_txo`.
///
/// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
/// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
/// any spends of any of the outputs.
///
/// Any spends of outputs which should have been registered which aren't passed to
- /// ChannelMonitors via block_connected may result in funds loss.
- fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+ /// ChannelMonitors via block_connected may result in FUNDS LOSS.
+ fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+
+ /// Updates a monitor for the given `funding_txo`.
+ ///
+ /// Implementer must also ensure that the funding_txo txid *and* outpoint are registered with
+ /// any relevant ChainWatchInterfaces such that the provided monitor receives block_connected
+ /// callbacks with the funding transaction, or any spends of it.
+ ///
+ /// Further, the implementer must also ensure that each output returned in
+ /// monitor.get_watch_outputs() is registered to ensure that the provided monitor learns about
+ /// any spends of any of the outputs.
+ ///
+ /// Any spends of outputs which should have been registered which aren't passed to
+ /// ChannelMonitors via block_connected may result in FUNDS LOSS.
+ fn update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr>;
/// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
/// with success or failure.
}
/// Adds or updates the monitor which monitors the channel referred to by the given key.
- pub fn add_update_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
+ pub fn add_monitor_by_key(&self, key: Key, monitor: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
let mut monitors = self.monitors.lock().unwrap();
- match monitors.get_mut(&key) {
- Some(orig_monitor) => {
- log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(monitor.key_storage));
- return orig_monitor.insert_combine(monitor);
- },
- None => {}
+ let entry = match monitors.entry(key) {
+ hash_map::Entry::Occupied(_) => return Err(MonitorUpdateError("Channel monitor for given key is already present")),
+ hash_map::Entry::Vacant(e) => e,
};
match monitor.key_storage {
Storage::Local { ref funding_info, .. } => {
self.chain_monitor.install_watch_outpoint((*txid, idx as u32), script);
}
}
- monitors.insert(key, monitor);
+ entry.insert(monitor);
Ok(())
}
+
+ /// Updates the monitor which monitors the channel referred to by the given key.
+ pub fn update_monitor_by_key(&self, key: Key, update: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+ let mut monitors = self.monitors.lock().unwrap();
+ match monitors.get_mut(&key) {
+ Some(orig_monitor) => {
+ log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(orig_monitor.key_storage));
+ orig_monitor.update_monitor(update)
+ },
+ None => Err(MonitorUpdateError("No such monitor registered"))
+ }
+ }
}
impl<ChanSigner: ChannelKeys, T: Deref + Sync + Send> ManyChannelMonitor<ChanSigner> for SimpleManyChannelMonitor<OutPoint, ChanSigner, T>
where T::Target: BroadcasterInterface
{
- fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
- match self.add_update_monitor_by_key(funding_txo, monitor) {
+ fn add_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor<ChanSigner>) -> Result<(), ChannelMonitorUpdateErr> {
+ match self.add_monitor_by_key(funding_txo, monitor) {
+ Ok(_) => Ok(()),
+ Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
+ }
+ }
+
+ fn update_monitor(&self, funding_txo: OutPoint, update: ChannelMonitorUpdate) -> Result<(), ChannelMonitorUpdateErr> {
+ match self.update_monitor_by_key(funding_txo, update) {
Ok(_) => Ok(()),
Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
}
/// keeping bumping another claim tx to solve the outpoint.
pub(crate) const ANTI_REORG_DELAY: u32 = 6;
-#[derive(Clone)]
enum Storage<ChanSigner: ChannelKeys> {
Local {
keys: ChanSigner,
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
+#[cfg_attr(test, derive(PartialEq))]
+#[derive(Clone)]
+pub(super) enum ChannelMonitorUpdateStep {
+ LatestLocalCommitmentTXInfo {
+ // TODO: We really need to not be generating a fully-signed transaction in Channel and
+ // passing it here, we need to hold off so that the ChanSigner can enforce a
+ // only-sign-local-state-for-broadcast once invariant:
+ commitment_tx: LocalCommitmentTransaction,
+ local_keys: chan_utils::TxCreationKeys,
+ feerate_per_kw: u64,
+ htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>,
+ },
+ LatestRemoteCommitmentTXInfo {
+ unsigned_commitment_tx: Transaction, // TODO: We should actually only need the txid here
+ htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>,
+ commitment_number: u64,
+ their_revocation_point: PublicKey,
+ },
+ PaymentPreimage {
+ payment_preimage: PaymentPreimage,
+ },
+ CommitmentSecret {
+ idx: u64,
+ secret: [u8; 32],
+ },
+ /// Indicates our channel is likely a stale version, we're closing, but this update should
+ /// allow us to spend what is ours if our counterparty broadcasts their latest state.
+ RescueRemoteCommitmentTXInfo {
+ their_current_per_commitment_point: PublicKey,
+ },
+}
+
+impl Writeable for ChannelMonitorUpdateStep {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { ref commitment_tx, ref local_keys, ref feerate_per_kw, ref htlc_outputs } => {
+ 0u8.write(w)?;
+ commitment_tx.write(w)?;
+ local_keys.write(w)?;
+ feerate_per_kw.write(w)?;
+ (htlc_outputs.len() as u64).write(w)?;
+ for &(ref output, ref signature, ref source) in htlc_outputs.iter() {
+ output.write(w)?;
+ signature.write(w)?;
+ source.write(w)?;
+ }
+ }
+ &ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { ref unsigned_commitment_tx, ref htlc_outputs, ref commitment_number, ref their_revocation_point } => {
+ 1u8.write(w)?;
+ unsigned_commitment_tx.write(w)?;
+ commitment_number.write(w)?;
+ their_revocation_point.write(w)?;
+ (htlc_outputs.len() as u64).write(w)?;
+ for &(ref output, ref source) in htlc_outputs.iter() {
+ output.write(w)?;
+ match source {
+ &None => 0u8.write(w)?,
+ &Some(ref s) => {
+ 1u8.write(w)?;
+ s.write(w)?;
+ },
+ }
+ }
+ },
+ &ChannelMonitorUpdateStep::PaymentPreimage { ref payment_preimage } => {
+ 2u8.write(w)?;
+ payment_preimage.write(w)?;
+ },
+ &ChannelMonitorUpdateStep::CommitmentSecret { ref idx, ref secret } => {
+ 3u8.write(w)?;
+ idx.write(w)?;
+ secret.write(w)?;
+ },
+ &ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { ref their_current_per_commitment_point } => {
+ 4u8.write(w)?;
+ their_current_per_commitment_point.write(w)?;
+ },
+ }
+ Ok(())
+ }
+}
+impl<R: ::std::io::Read> Readable<R> for ChannelMonitorUpdateStep {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ match Readable::read(r)? {
+ 0u8 => {
+ Ok(ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo {
+ commitment_tx: Readable::read(r)?,
+ local_keys: Readable::read(r)?,
+ feerate_per_kw: Readable::read(r)?,
+ htlc_outputs: {
+ let len: u64 = Readable::read(r)?;
+ let mut res = Vec::new();
+ for _ in 0..len {
+ res.push((Readable::read(r)?, Readable::read(r)?, Readable::read(r)?));
+ }
+ res
+ },
+ })
+ },
+ 1u8 => {
+ Ok(ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo {
+ unsigned_commitment_tx: Readable::read(r)?,
+ commitment_number: Readable::read(r)?,
+ their_revocation_point: Readable::read(r)?,
+ htlc_outputs: {
+ let len: u64 = Readable::read(r)?;
+ let mut res = Vec::new();
+ for _ in 0..len {
+ res.push((Readable::read(r)?, <Option<HTLCSource> as Readable<R>>::read(r)?.map(|o| Box::new(o))));
+ }
+ res
+ },
+ })
+ },
+ 2u8 => {
+ Ok(ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: Readable::read(r)?,
+ })
+ },
+ 3u8 => {
+ Ok(ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: Readable::read(r)?,
+ secret: Readable::read(r)?,
+ })
+ },
+ 4u8 => {
+ Ok(ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo {
+ their_current_per_commitment_point: Readable::read(r)?,
+ })
+ },
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
///
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
-#[derive(Clone)]
pub struct ChannelMonitor<ChanSigner: ChannelKeys> {
+ latest_update_id: u64,
commitment_transaction_number_obscure_factor: u64,
key_storage: Storage<ChanSigner>,
our_to_self_delay: u16,
their_to_self_delay: Option<u16>,
- old_secrets: [([u8; 32], u64); 49],
+ commitment_secrets: CounterpartyCommitmentSecrets,
remote_claimable_outpoints: HashMap<Sha256dHash, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
/// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
/// Nor can we figure out their commitment numbers without the commitment transaction they are
// We simply modify last_block_hash in Channel's block_connected so that serialization is
// consistent but hopefully the users' copy handles block_connected in a consistent way.
- // (we do *not*, however, update them in insert_combine to ensure any local user copies keep
+ // (we do *not*, however, update them in update_monitor to ensure any local user copies keep
// their last_block_hash from its state and not based on updated copies that didn't run through
// the full block_connected).
pub(crate) last_block_hash: Sha256dHash,
secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
logger: Arc<Logger>,
}
-
macro_rules! subtract_high_prio_fee {
($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $used_feerate: expr) => {
{
/// underlying object
impl<ChanSigner: ChannelKeys> PartialEq for ChannelMonitor<ChanSigner> {
fn eq(&self, other: &Self) -> bool {
- if self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
+ if self.latest_update_id != other.latest_update_id ||
+ self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
self.key_storage != other.key_storage ||
self.their_htlc_base_key != other.their_htlc_base_key ||
self.their_delayed_payment_base_key != other.their_delayed_payment_base_key ||
self.their_cur_revocation_points != other.their_cur_revocation_points ||
self.our_to_self_delay != other.our_to_self_delay ||
self.their_to_self_delay != other.their_to_self_delay ||
+ self.commitment_secrets != other.commitment_secrets ||
self.remote_claimable_outpoints != other.remote_claimable_outpoints ||
self.remote_commitment_txn_on_chain != other.remote_commitment_txn_on_chain ||
self.remote_hash_commitment_number != other.remote_hash_commitment_number ||
{
false
} else {
- for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
- if secret != o_secret || idx != o_idx {
- return false
- }
- }
true
}
}
writer.write_all(&[SERIALIZATION_VERSION; 1])?;
writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+ self.latest_update_id.write(writer)?;
+
// Set in initial Channel-object creation, so should always be set by now:
U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
writer.write_all(&byte_utils::be16_to_array(self.our_to_self_delay))?;
writer.write_all(&byte_utils::be16_to_array(self.their_to_self_delay.unwrap()))?;
- for &(ref secret, ref idx) in self.old_secrets.iter() {
- writer.write_all(secret)?;
- writer.write_all(&byte_utils::be64_to_array(*idx))?;
- }
+ self.commitment_secrets.write(writer)?;
macro_rules! serialize_htlc_in_commitment {
($htlc_output: expr) => {
}
impl<ChanSigner: ChannelKeys> ChannelMonitor<ChanSigner> {
- pub(super) fn new(keys: ChanSigner, funding_key: &SecretKey, revocation_base_key: &SecretKey, delayed_payment_base_key: &SecretKey, htlc_base_key: &SecretKey, payment_base_key: &SecretKey, shutdown_pubkey: &PublicKey, our_to_self_delay: u16, destination_script: Script, logger: Arc<Logger>) -> ChannelMonitor<ChanSigner> {
+ pub(super) fn new(keys: ChanSigner, shutdown_pubkey: &PublicKey,
+ our_to_self_delay: u16, destination_script: &Script, funding_info: (OutPoint, Script),
+ their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey,
+ their_to_self_delay: u16, funding_redeemscript: Script, channel_value_satoshis: u64,
+ commitment_transaction_number_obscure_factor: u64,
+ logger: Arc<Logger>) -> ChannelMonitor<ChanSigner> {
+
+ assert!(commitment_transaction_number_obscure_factor <= (1 << 48));
+ let funding_key = keys.funding_key().clone();
+ let revocation_base_key = keys.revocation_base_key().clone();
+ let htlc_base_key = keys.htlc_base_key().clone();
+ let delayed_payment_base_key = keys.delayed_payment_base_key().clone();
+ let payment_base_key = keys.payment_base_key().clone();
ChannelMonitor {
- commitment_transaction_number_obscure_factor: 0,
+ latest_update_id: 0,
+ commitment_transaction_number_obscure_factor,
key_storage: Storage::Local {
keys,
- funding_key: funding_key.clone(),
- revocation_base_key: revocation_base_key.clone(),
- htlc_base_key: htlc_base_key.clone(),
- delayed_payment_base_key: delayed_payment_base_key.clone(),
- payment_base_key: payment_base_key.clone(),
+ funding_key,
+ revocation_base_key,
+ htlc_base_key,
+ delayed_payment_base_key,
+ payment_base_key,
shutdown_pubkey: shutdown_pubkey.clone(),
- funding_info: None,
+ funding_info: Some(funding_info),
current_remote_commitment_txid: None,
prev_remote_commitment_txid: None,
},
- their_htlc_base_key: None,
- their_delayed_payment_base_key: None,
- funding_redeemscript: None,
- channel_value_satoshis: None,
+ their_htlc_base_key: Some(their_htlc_base_key.clone()),
+ their_delayed_payment_base_key: Some(their_delayed_payment_base_key.clone()),
+ funding_redeemscript: Some(funding_redeemscript),
+ channel_value_satoshis: Some(channel_value_satoshis),
their_cur_revocation_points: None,
our_to_self_delay: our_to_self_delay,
- their_to_self_delay: None,
+ their_to_self_delay: Some(their_to_self_delay),
- old_secrets: [([0; 32], 1 << 48); 49],
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
remote_claimable_outpoints: HashMap::new(),
remote_commitment_txn_on_chain: HashMap::new(),
remote_hash_commitment_number: HashMap::new(),
payment_preimages: HashMap::new(),
pending_htlcs_updated: Vec::new(),
- destination_script: destination_script,
+ destination_script: destination_script.clone(),
to_remote_rescue: None,
pending_claim_requests: HashMap::new(),
current_height + 15
}
- #[inline]
- fn place_secret(idx: u64) -> u8 {
- for i in 0..48 {
- if idx & (1 << i) == (1 << i) {
- return i
- }
- }
- 48
- }
-
- #[inline]
- fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
- let mut res: [u8; 32] = secret;
- for i in 0..bits {
- let bitpos = bits - 1 - i;
- if idx & (1 << bitpos) == (1 << bitpos) {
- res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7);
- res = Sha256::hash(&res).into_inner();
- }
- }
- res
- }
-
/// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
/// needed by local commitment transactions HTCLs nor by remote ones. Unless we haven't already seen remote
/// commitment transaction's secret, they are de facto pruned (we can use revocation key).
pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
- let pos = ChannelMonitor::<ChanSigner>::place_secret(idx);
- for i in 0..pos {
- let (old_secret, old_idx) = self.old_secrets[i as usize];
- if ChannelMonitor::<ChanSigner>::derive_secret(secret, pos, old_idx) != old_secret {
- return Err(MonitorUpdateError("Previous secret did not match new one"));
- }
- }
- if self.get_min_seen_secret() <= idx {
- return Ok(());
+ if let Err(()) = self.commitment_secrets.provide_secret(idx, secret) {
+ return Err(MonitorUpdateError("Previous secret did not match new one"));
}
- self.old_secrets[pos as usize] = (secret, idx);
// Prune HTLCs from the previous remote commitment tx so we don't generate failure/fulfill
// events for now-revoked/fulfilled HTLCs.
- // TODO: We should probably consider whether we're really getting the next secret here.
if let Storage::Local { ref mut prev_remote_commitment_txid, .. } = self.key_storage {
if let Some(txid) = prev_remote_commitment_txid.take() {
for &mut (_, ref mut source) in self.remote_claimable_outpoints.get_mut(&txid).unwrap() {
/// is important that any clones of this channel monitor (including remote clones) by kept
/// up-to-date as our local commitment transaction is updated.
/// Panics if set_their_to_self_delay has never been called.
- pub(super) fn provide_latest_local_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, local_keys: chan_utils::TxCreationKeys, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) {
- assert!(self.their_to_self_delay.is_some());
+ pub(super) fn provide_latest_local_commitment_tx_info(&mut self, commitment_tx: LocalCommitmentTransaction, local_keys: chan_utils::TxCreationKeys, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Signature>, Option<HTLCSource>)>) -> Result<(), MonitorUpdateError> {
+ if self.their_to_self_delay.is_none() {
+ return Err(MonitorUpdateError("Got a local commitment tx info update before we'd set basic information about the channel"));
+ }
self.prev_local_signed_commitment_tx = self.current_local_signed_commitment_tx.take();
self.current_local_signed_commitment_tx = Some(LocalSignedTx {
txid: commitment_tx.txid(),
feerate_per_kw,
htlc_outputs,
});
+ Ok(())
}
/// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
}
- /// Combines this ChannelMonitor with the information contained in the other ChannelMonitor.
- /// After a successful call this ChannelMonitor is up-to-date and is safe to use to monitor the
- /// chain for new blocks/transactions.
- pub fn insert_combine(&mut self, mut other: ChannelMonitor<ChanSigner>) -> Result<(), MonitorUpdateError> {
- match self.key_storage {
- Storage::Local { ref funding_info, .. } => {
- if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
- let our_funding_info = funding_info;
- if let Storage::Local { ref funding_info, .. } = other.key_storage {
- if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
- // We should be able to compare the entire funding_txo, but in fuzztarget it's trivially
- // easy to collide the funding_txo hash and have a different scriptPubKey.
- if funding_info.as_ref().unwrap().0 != our_funding_info.as_ref().unwrap().0 {
- return Err(MonitorUpdateError("Funding transaction outputs are not identical!"));
- }
- } else {
- return Err(MonitorUpdateError("Try to combine a Local monitor with a Watchtower one !"));
- }
- },
- Storage::Watchtower { .. } => {
- if let Storage::Watchtower { .. } = other.key_storage {
- unimplemented!();
- } else {
- return Err(MonitorUpdateError("Try to combine a Watchtower monitor with a Local one !"));
- }
- },
- }
- let other_min_secret = other.get_min_seen_secret();
- let our_min_secret = self.get_min_seen_secret();
- if our_min_secret > other_min_secret {
- self.provide_secret(other_min_secret, other.get_secret(other_min_secret).unwrap())?;
- }
- if let Some(ref local_tx) = self.current_local_signed_commitment_tx {
- if let Some(ref other_local_tx) = other.current_local_signed_commitment_tx {
- let our_commitment_number = 0xffffffffffff - ((((local_tx.tx.without_valid_witness().input[0].sequence as u64 & 0xffffff) << 3*8) | (local_tx.tx.without_valid_witness().lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
- let other_commitment_number = 0xffffffffffff - ((((other_local_tx.tx.without_valid_witness().input[0].sequence as u64 & 0xffffff) << 3*8) | (other_local_tx.tx.without_valid_witness().lock_time as u64 & 0xffffff)) ^ other.commitment_transaction_number_obscure_factor);
- if our_commitment_number >= other_commitment_number {
- self.key_storage = other.key_storage;
- }
- }
- }
- // TODO: We should use current_remote_commitment_number and the commitment number out of
- // local transactions to decide how to merge
- if our_min_secret >= other_min_secret {
- self.their_cur_revocation_points = other.their_cur_revocation_points;
- for (txid, htlcs) in other.remote_claimable_outpoints.drain() {
- self.remote_claimable_outpoints.insert(txid, htlcs);
- }
- if let Some(local_tx) = other.prev_local_signed_commitment_tx {
- self.prev_local_signed_commitment_tx = Some(local_tx);
- }
- if let Some(local_tx) = other.current_local_signed_commitment_tx {
- self.current_local_signed_commitment_tx = Some(local_tx);
- }
- self.payment_preimages = other.payment_preimages;
- self.to_remote_rescue = other.to_remote_rescue;
- }
-
- self.current_remote_commitment_number = cmp::min(self.current_remote_commitment_number, other.current_remote_commitment_number);
+ /// Used in Channel to cheat wrt the update_ids since it plays games, will be removed soon!
+ pub(super) fn update_monitor_ooo(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+ for update in updates.updates.drain(..) {
+ match update {
+ ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, local_keys, feerate_per_kw, htlc_outputs } =>
+ self.provide_latest_local_commitment_tx_info(commitment_tx, local_keys, feerate_per_kw, htlc_outputs)?,
+ ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
+ self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point),
+ ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
+ self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
+ ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
+ self.provide_secret(idx, secret)?,
+ ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
+ self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+ }
+ }
+ self.latest_update_id = updates.update_id;
Ok(())
}
- /// Allows this monitor to scan only for transactions which are applicable. Note that this is
- /// optional, without it this monitor cannot be used in an SPV client, but you may wish to
- /// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
- /// provides slightly better privacy.
- /// It's the responsibility of the caller to register outpoint and script with passing the former
- /// value as key to add_update_monitor.
- pub(super) fn set_funding_info(&mut self, new_funding_info: (OutPoint, Script)) {
- match self.key_storage {
- Storage::Local { ref mut funding_info, .. } => {
- *funding_info = Some(new_funding_info);
- },
- Storage::Watchtower { .. } => {
- panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
- }
- }
- }
-
- /// We log these base keys at channel opening to being able to rebuild redeemscript in case of leaked revoked commit tx
- /// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
- pub(super) fn set_basic_channel_info(&mut self, their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey, their_to_self_delay: u16, funding_redeemscript: Script, channel_value_satoshis: u64, commitment_transaction_number_obscure_factor: u64) {
- self.their_htlc_base_key = Some(their_htlc_base_key.clone());
- self.their_delayed_payment_base_key = Some(their_delayed_payment_base_key.clone());
- self.their_to_self_delay = Some(their_to_self_delay);
- self.funding_redeemscript = Some(funding_redeemscript);
- self.channel_value_satoshis = Some(channel_value_satoshis);
- assert!(commitment_transaction_number_obscure_factor < (1 << 48));
- self.commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor;
+ /// Updates a ChannelMonitor on the basis of some new information provided by the Channel
+ /// itself.
+ ///
+ /// panics if the given update is not the next update by update_id.
+ pub fn update_monitor(&mut self, mut updates: ChannelMonitorUpdate) -> Result<(), MonitorUpdateError> {
+ if self.latest_update_id + 1 != updates.update_id {
+ panic!("Attempted to apply ChannelMonitorUpdates out of order, check the update_id before passing an update to update_monitor!");
+ }
+ for update in updates.updates.drain(..) {
+ match update {
+ ChannelMonitorUpdateStep::LatestLocalCommitmentTXInfo { commitment_tx, local_keys, feerate_per_kw, htlc_outputs } =>
+ self.provide_latest_local_commitment_tx_info(commitment_tx, local_keys, feerate_per_kw, htlc_outputs)?,
+ ChannelMonitorUpdateStep::LatestRemoteCommitmentTXInfo { unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point } =>
+ self.provide_latest_remote_commitment_tx_info(&unsigned_commitment_tx, htlc_outputs, commitment_number, their_revocation_point),
+ ChannelMonitorUpdateStep::PaymentPreimage { payment_preimage } =>
+ self.provide_payment_preimage(&PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner()), &payment_preimage),
+ ChannelMonitorUpdateStep::CommitmentSecret { idx, secret } =>
+ self.provide_secret(idx, secret)?,
+ ChannelMonitorUpdateStep::RescueRemoteCommitmentTXInfo { their_current_per_commitment_point } =>
+ self.provide_rescue_remote_commitment_tx_info(their_current_per_commitment_point),
+ }
+ }
+ self.latest_update_id = updates.update_id;
+ Ok(())
}
- pub(super) fn unset_funding_info(&mut self) {
- match self.key_storage {
- Storage::Local { ref mut funding_info, .. } => {
- *funding_info = None;
- },
- Storage::Watchtower { .. } => {
- panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
- },
- }
+ /// Gets the update_id from the latest ChannelMonitorUpdate which was applied to this
+ /// ChannelMonitor.
+ pub fn get_latest_update_id(&self) -> u64 {
+ self.latest_update_id
}
/// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
/// Can only fail if idx is < get_min_seen_secret
pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
- for i in 0..self.old_secrets.len() {
- if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
- return Some(ChannelMonitor::<ChanSigner>::derive_secret(self.old_secrets[i].0, i as u8, idx))
- }
- }
- assert!(idx < self.get_min_seen_secret());
- None
+ self.commitment_secrets.get_secret(idx)
}
pub(super) fn get_min_seen_secret(&self) -> u64 {
- //TODO This can be optimized?
- let mut min = 1 << 48;
- for &(_, idx) in self.old_secrets.iter() {
- if idx < min {
- min = idx;
- }
- }
- min
+ self.commitment_secrets.get_min_seen_secret()
}
pub(super) fn get_cur_remote_commitment_number(&self) -> u64 {
return Err(DecodeError::UnknownVersion);
}
+ let latest_update_id: u64 = Readable::read(reader)?;
let commitment_transaction_number_obscure_factor = <U48 as Readable<R>>::read(reader)?.0;
let key_storage = match <u8 as Readable<R>>::read(reader)? {
let our_to_self_delay: u16 = Readable::read(reader)?;
let their_to_self_delay: Option<u16> = Some(Readable::read(reader)?);
- let mut old_secrets = [([0; 32], 1 << 48); 49];
- for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
- *secret = Readable::read(reader)?;
- *idx = Readable::read(reader)?;
- }
+ let commitment_secrets = Readable::read(reader)?;
macro_rules! read_htlc_in_commitment {
() => {
}
Ok((last_block_hash.clone(), ChannelMonitor {
+ latest_update_id,
commitment_transaction_number_obscure_factor,
key_storage,
our_to_self_delay,
their_to_self_delay,
- old_secrets,
+ commitment_secrets,
remote_claimable_outpoints,
remote_commitment_txn_on_chain,
remote_hash_commitment_number,
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
use bitcoin_hashes::hex::FromHex;
use hex;
+ use chain::transaction::OutPoint;
use ln::channelmanager::{PaymentPreimage, PaymentHash};
use ln::channelmonitor::{ChannelMonitor, InputDescriptors};
use ln::chan_utils;
use std::sync::Arc;
use chain::keysinterface::InMemoryChannelKeys;
-
- #[test]
- fn test_per_commitment_storage() {
- // Test vectors from BOLT 3:
- let mut secrets: Vec<[u8; 32]> = Vec::new();
- let mut monitor: ChannelMonitor<InMemoryChannelKeys>;
- let secp_ctx = Secp256k1::new();
- let logger = Arc::new(TestLogger::new());
-
- macro_rules! test_secrets {
- () => {
- let mut idx = 281474976710655;
- for secret in secrets.iter() {
- assert_eq!(monitor.get_secret(idx).unwrap(), *secret);
- idx -= 1;
- }
- assert_eq!(monitor.get_min_seen_secret(), idx + 1);
- assert!(monitor.get_secret(idx).is_none());
- };
- }
-
- let keys = InMemoryChannelKeys::new(
- &secp_ctx,
- SecretKey::from_slice(&[41; 32]).unwrap(),
- SecretKey::from_slice(&[41; 32]).unwrap(),
- SecretKey::from_slice(&[41; 32]).unwrap(),
- SecretKey::from_slice(&[41; 32]).unwrap(),
- SecretKey::from_slice(&[41; 32]).unwrap(),
- [41; 32],
- 0,
- );
-
- {
- // insert_secret correct sequence
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
- monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
- monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
- monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
- }
-
- {
- // insert_secret #1 incorrect
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- assert_eq!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #2 incorrect (#1 derived from incorrect)
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #3 incorrect
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #4 incorrect (1,2,3 derived from incorrect)
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
- monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
- monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
- assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #5 incorrect
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
- assert_eq!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #6 incorrect (5 derived from incorrect)
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap());
- monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
- monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
- assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #7 incorrect
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
- monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap());
- monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
- assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
-
- {
- // insert_secret #8 incorrect
- monitor = ChannelMonitor::new(keys.clone(), &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
- secrets.clear();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
- monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
- monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
- monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
- monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
- monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
- monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
- monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
- test_secrets!();
-
- secrets.push([0; 32]);
- secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap());
- assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
- "Previous secret did not match new one");
- }
- }
-
#[test]
fn test_prune_preimages() {
let secp_ctx = Secp256k1::new();
// Prune with one old state and a local commitment tx holding a few overlaps with the
// old state.
- let mut monitor = ChannelMonitor::new(keys, &SecretKey::from_slice(&[41; 32]).unwrap(), &SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ let mut monitor = ChannelMonitor::new(keys,
+ &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()), 0, &Script::new(),
+ (OutPoint { txid: Sha256dHash::from_slice(&[43; 32]).unwrap(), index: 0 }, Script::new()),
+ &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[44; 32]).unwrap()),
+ &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()),
+ 0, Script::new(), 46, 0, logger.clone());
+
monitor.their_to_self_delay = Some(10);
- monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10]));
+ monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10])).unwrap();
monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key);
monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key);
monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key);
// Now update local commitment tx info, pruning only element 18 as we still care about the
// previous commitment tx's preimages too
- monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..5]));
+ monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..5])).unwrap();
secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
monitor.provide_secret(281474976710653, secret.clone()).unwrap();
assert_eq!(monitor.payment_preimages.len(), 12);
test_preimages_exist!(&preimages[18..20], monitor);
// But if we do it again, we'll prune 5-10
- monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..3]));
+ monitor.provide_latest_local_commitment_tx_info(LocalCommitmentTransaction::dummy(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..3])).unwrap();
secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
monitor.provide_secret(281474976710652, secret.clone()).unwrap();
assert_eq!(monitor.payment_preimages.len(), 5);
let chain_watch = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&self.logger) as Arc<Logger>));
let channel_monitor = test_utils::TestChannelMonitor::new(chain_watch.clone(), self.tx_broadcaster.clone(), self.logger.clone(), feeest);
for deserialized_monitor in deserialized_monitors.drain(..) {
- if let Err(_) = channel_monitor.add_update_monitor(deserialized_monitor.get_funding_txo().unwrap(), deserialized_monitor) {
+ if let Err(_) = channel_monitor.add_monitor(deserialized_monitor.get_funding_txo().unwrap(), deserialized_monitor) {
panic!();
}
}
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
nodes[0].node = &nodes_0_deserialized;
nodes[0].block_notifier.register_listener(nodes[0].node);
assert_eq!(nodes[0].node.list_channels().len(), 1);
nodes_0_deserialized = nodes_0_deserialized_tmp;
assert!(nodes_0_read.is_empty());
- assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ assert!(nodes[0].chan_monitor.add_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
nodes[0].node = &nodes_0_deserialized;
check_added_monitors!(nodes[0], 1);
}
for monitor in node_0_monitors.drain(..) {
- assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
+ assert!(nodes[0].chan_monitor.add_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
check_added_monitors!(nodes[0], 1);
}
nodes[0].node = &nodes_0_deserialized;
}).unwrap().1
};
nodes[0].node = &node_state_0;
- assert!(monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok());
+ assert!(monitor.add_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor).is_ok());
nodes[0].chan_monitor = &monitor;
nodes[0].chain_monitor = chain_monitor;
/// A human-readable error message
err: &'static str
},
- /// An attempt to call add_update_monitor returned an Err (ie you did this!), causing the
+ /// An attempt to call add/update_monitor returned an Err (ie you did this!), causing the
/// attempted action to fail.
MonitorUpdateFailed,
}
use secp256k1::Signature;
use secp256k1::key::{PublicKey, SecretKey};
use bitcoin::blockdata::script::Script;
-use bitcoin::blockdata::transaction::OutPoint;
+use bitcoin::blockdata::transaction::{OutPoint, Transaction};
+use bitcoin::consensus;
+use bitcoin::consensus::Encodable;
use bitcoin_hashes::sha256d::Hash as Sha256dHash;
use std::marker::Sized;
use ln::msgs::DecodeError;
}
}
+impl Writeable for Transaction {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self.consensus_encode(WriterWriteAdaptor(writer)) {
+ Ok(_) => Ok(()),
+ Err(consensus::encode::Error::Io(e)) => Err(e),
+ Err(_) => panic!("We shouldn't get a consensus::encode::Error unless our Write generated an std::io::Error"),
+ }
+ }
+}
+
+impl<R: Read> Readable<R> for Transaction {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ match consensus::encode::Decodable::consensus_decode(r) {
+ Ok(t) => Ok(t),
+ Err(consensus::encode::Error::Io(ref e)) if e.kind() == ::std::io::ErrorKind::UnexpectedEof => Err(DecodeError::ShortRead),
+ Err(consensus::encode::Error::Io(e)) => Err(DecodeError::Io(e)),
+ Err(_) => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
impl<R: Read, T: Readable<R>> Readable<R> for Mutex<T> {
fn read(r: &mut R) -> Result<Self, DecodeError> {
let t: T = Readable::read(r)?;
use util::enforcing_trait_impls::EnforcingChannelKeys;
use util::events;
use util::logger::{Logger, Level, Record};
-use util::ser::ReadableArgs;
-use util::ser::Writer;
+use util::ser::{Readable, ReadableArgs, Writer, Writeable};
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::script::Script;
pub struct TestChannelMonitor<'a> {
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>>,
+ pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
pub simple_monitor: channelmonitor::SimpleManyChannelMonitor<OutPoint, EnforcingChannelKeys, &'a chaininterface::BroadcasterInterface>,
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
}
pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
+ latest_monitor_update_id: Mutex::new(HashMap::new()),
simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
update_ret: Mutex::new(Ok(())),
}
}
}
impl<'a> channelmonitor::ManyChannelMonitor<EnforcingChannelKeys> for TestChannelMonitor<'a> {
- fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ fn add_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor<EnforcingChannelKeys>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
let mut w = TestVecWriter(Vec::new());
monitor.write_for_disk(&mut w).unwrap();
- assert!(<(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
- &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == monitor);
+ let new_monitor = <(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
+ &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1;
+ assert!(new_monitor == monitor);
w.0.clear();
monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
- self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
- assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
+ self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, monitor.get_latest_update_id()));
+ self.added_monitors.lock().unwrap().push((funding_txo, monitor));
+ assert!(self.simple_monitor.add_monitor(funding_txo, new_monitor).is_ok());
+ self.update_ret.lock().unwrap().clone()
+ }
+
+ fn update_monitor(&self, funding_txo: OutPoint, update: channelmonitor::ChannelMonitorUpdate) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ // Every monitor update should survive roundtrip
+ let mut w = TestVecWriter(Vec::new());
+ update.write(&mut w).unwrap();
+ assert!(channelmonitor::ChannelMonitorUpdate::read(
+ &mut ::std::io::Cursor::new(&w.0)).unwrap() == update);
+
+ self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), (funding_txo, update.update_id));
+ assert!(self.simple_monitor.update_monitor(funding_txo, update).is_ok());
+ // At every point where we get a monitor update, we should be able to send a useful monitor
+ // to a watchtower and disk...
+ let monitors = self.simple_monitor.monitors.lock().unwrap();
+ let monitor = monitors.get(&funding_txo).unwrap();
+ w.0.clear();
+ monitor.write_for_disk(&mut w).unwrap();
+ let new_monitor = <(Sha256dHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
+ &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1;
+ assert!(new_monitor == *monitor);
+ w.0.clear();
+ monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
+ self.added_monitors.lock().unwrap().push((funding_txo, new_monitor));
self.update_ret.lock().unwrap().clone()
}