fn get_last_revoke_and_ack<L: Deref>(&mut self, logger: &L) -> Option<msgs::RevokeAndACK> where L::Target: Logger {
debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2);
self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger);
- let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2);
- if let HolderCommitmentPoint::Available { transaction_number: _, current, next: _ } = self.context.holder_commitment_point {
+ let per_commitment_secret = self.context.holder_signer.as_ref()
+ .release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2).ok();
+ if let (HolderCommitmentPoint::Available { current, .. }, Some(per_commitment_secret)) =
+ (self.context.holder_commitment_point, per_commitment_secret) {
self.context.signer_pending_revoke_and_ack = false;
- Some(msgs::RevokeAndACK {
+ return Some(msgs::RevokeAndACK {
channel_id: self.context.channel_id,
per_commitment_secret,
next_per_commitment_point: current,
#[cfg(taproot)]
next_local_nonce: None,
})
- } else {
- #[cfg(not(async_signing))] {
- panic!("Holder commitment point must be Available when generating revoke_and_ack");
- }
- #[cfg(async_signing)] {
- // Technically if we're at HolderCommitmentPoint::PendingNext,
- // we have a commitment point ready to send in an RAA, however we
- // choose to wait since if we send RAA now, we could get another
- // CS before we have any commitment point available. Blocking our
- // RAA here is a convenient way to make sure that post-funding
- // we're only ever waiting on one commitment point at a time.
- log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
- &self.context.channel_id(), self.context.holder_commitment_point.transaction_number());
- self.context.signer_pending_revoke_and_ack = true;
- None
- }
+ }
+ if !self.context.holder_commitment_point.is_available() {
+ log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
+ &self.context.channel_id(), self.context.holder_commitment_point.transaction_number());
+ }
+ if per_commitment_secret.is_none() {
+ log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment secret for {} is not available",
+ &self.context.channel_id(), self.context.holder_commitment_point.transaction_number(),
+ self.context.holder_commitment_point.transaction_number() + 2);
+ }
+ #[cfg(not(async_signing))] {
+ panic!("Holder commitment point and per commitment secret must be available when generating revoke_and_ack");
+ }
+ #[cfg(async_signing)] {
+ // Technically if we're at HolderCommitmentPoint::PendingNext,
+ // we have a commitment point ready to send in an RAA, however we
+ // choose to wait since if we send RAA now, we could get another
+ // CS before we have any commitment point available. Blocking our
+ // RAA here is a convenient way to make sure that post-funding
+ // we're only ever waiting on one commitment point at a time.
+ log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
+ &self.context.channel_id(), self.context.holder_commitment_point.transaction_number());
+ self.context.signer_pending_revoke_and_ack = true;
+ None
}
}
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
- chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
+ chan_signer.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap(),
chan_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx).unwrap(),
chan_signer.as_ref().pubkeys().funding_pubkey)
};
// Make signer believe we got a counterparty signature, so that it allows the revocation
keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
- per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+ per_commitment_secret = keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER).unwrap();
// Must revoke without gaps
keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
- keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+ keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1).unwrap();
keys.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
- &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ &SecretKey::from_slice(&keys.as_ref().release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2).unwrap()).unwrap());
}
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
///
/// Note that the commitment number starts at `(1 << 48) - 1` and counts backwards.
// TODO: return a Result so we can signal a validation error
- fn release_commitment_secret(&self, idx: u64) -> [u8; 32];
+ fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()>;
/// Validate the counterparty's signatures on the holder commitment transaction and HTLCs.
///
Ok(PublicKey::from_secret_key(secp_ctx, &commitment_secret))
}
- fn release_commitment_secret(&self, idx: u64) -> [u8; 32] {
- chan_utils::build_commitment_secret(&self.commitment_seed, idx)
+ fn release_commitment_secret(&self, idx: u64) -> Result<[u8; 32], ()> {
+ Ok(chan_utils::build_commitment_secret(&self.commitment_seed, idx))
}
fn validate_holder_commitment(