//! Tests for asynchronous signing. These tests verify that the channel state machine behaves
//! properly with a signer implementation that asynchronously derives signatures.
+use bitcoin::secp256k1::PublicKey;
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+use crate::ln::ChannelId;
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::util::test_channel_signer::ops;
+
+const OPS: u32 = ops::GET_PER_COMMITMENT_POINT | ops::RELEASE_COMMITMENT_SECRET | ops::SIGN_COUNTERPARTY_COMMITMENT;
#[test]
-fn test_async_commitment_signature_for_funding_created() {
+fn test_funding_created() {
// Simulate acquiring the signature for `funding_created` asynchronously.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
// But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created
// message...
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, OPS, false);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
check_added_monitors(&nodes[0], 0);
channels[0].channel_id
};
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].set_channel_signer_ops_available(&nodes[1].node.get_our_node_id(), &chan_id, OPS, true);
nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id)));
let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
}
#[test]
-fn test_async_commitment_signature_for_funding_signed() {
+fn test_for_funding_signed() {
// Simulate acquiring the signature for `funding_signed` asynchronously.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, OPS, false);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len());
channels[0].channel_id
};
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &chan_id, OPS, true);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
}
#[test]
-fn test_async_commitment_signature_for_commitment_signed() {
+fn test_commitment_signed() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]);
- // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
- // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ // Mark dst's signer as unavailable and handle src's commitment_signed. If dst's signer is
+ // offline, it oughtn't yet respond with any updates.
+ dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, false);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
- get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id());
+ {
+ let events = dst.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0, "expected 0 events to be generated, got {}", events.len());
+ }
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, true);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
- let events = dst.node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1, "expected one message, got {}", events.len());
- if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] {
- assert_eq!(node_id, &src.node.get_our_node_id());
- } else {
- panic!("expected UpdateHTLCs message, not {:?}", events[0]);
- };
+ get_revoke_commit_msgs(&dst, &src.node.get_our_node_id());
}
#[test]
-fn test_async_commitment_signature_for_funding_signed_0conf() {
+fn test_funding_signed_0conf() {
// Simulate acquiring the signature for `funding_signed` asynchronously for a zero-conf channel.
let mut manually_accept_config = test_default_channel_config();
manually_accept_config.manually_accept_inbound_channels = true;
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, OPS, false);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
};
// At this point, we basically expect the channel to open like a normal zero-conf channel.
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].set_channel_signer_ops_available(&nodes[0].node.get_our_node_id(), &chan_id, OPS, true);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
let (funding_signed, channel_ready_1) = {
assert_eq!(nodes[1].node.list_usable_channels().len(), 1);
}
+/// Helper to run operations with a simulated asynchronous signer.
+///
+/// Disables the signer for the specified channel and then runs `do_fn`, then re-enables the signer
+/// and calls `signer_unblocked`.
+#[cfg(test)]
+pub fn with_async_signer<'a, DoFn>(node: &Node, peer_id: &PublicKey, channel_id: &ChannelId, masks: &Vec<u32>, do_fn: &'a DoFn) where DoFn: Fn() {
+ let mask = masks.iter().fold(0, |acc, m| (acc | m));
+ node.set_channel_signer_ops_available(peer_id, channel_id, mask, false);
+ do_fn();
+ for mask in masks {
+ node.set_channel_signer_ops_available(peer_id, channel_id, *mask, true);
+ node.node.signer_unblocked(Some((*peer_id, *channel_id)));
+ }
+}
+
+#[cfg(test)]
+fn do_test_payment(masks: &Vec<u32>) {
+ // This runs through a one-hop payment from start to finish, simulating an asynchronous signer at
+ // each step.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let (_up1, _up2, channel_id, _tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+
+ let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(alice, bob, 8_000_000);
+
+ with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| {
+ alice.node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(alice, 1);
+ let events = alice.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 0, "expected 0 events, got {}", events.len());
+ });
+
+ let payment_event = {
+ let mut events = alice.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ assert_eq!(payment_event.node_id, bob.node.get_our_node_id());
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ // alice --[update_add_htlc]--> bob
+ // alice --[commitment_signed]--> bob
+ with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| {
+ bob.node.handle_update_add_htlc(&alice.node.get_our_node_id(), &payment_event.msgs[0]);
+ bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &payment_event.commitment_msg);
+ check_added_monitors(bob, 1);
+ });
+
+ // alice <--[revoke_and_ack]-- bob
+ // alice <--[commitment_signed]-- bob
+ {
+ let (raa, cu) = {
+ let events = bob.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len());
+ match (&events[0], &events[1]) {
+ (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => {
+ assert_eq!(cu.update_add_htlcs.len(), 0, "expected 0 update_add_htlcs, got {}", cu.update_add_htlcs.len());
+ (raa.clone(), cu.clone())
+ }
+ (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b)
+ }
+ };
+
+ // TODO: run this with_async_signer once validate_counterparty_revocation supports it.
+ alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa);
+ check_added_monitors(alice, 1);
+
+ with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| {
+ alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed);
+ check_added_monitors(alice, 1);
+ });
+ }
+
+ // alice --[revoke_and_ack]--> bob
+ // TODO: run this with_async_signer once validate_counterparty_revocation supports it.
+ let raa = get_event_msg!(alice, MessageSendEvent::SendRevokeAndACK, bob.node.get_our_node_id());
+ bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa);
+ check_added_monitors(bob, 1);
+
+ expect_pending_htlcs_forwardable!(bob);
+
+ // Bob generates a PaymentClaimable to user code.
+ {
+ let events = bob.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len());
+ match &events[0] {
+ Event::PaymentClaimable { .. } => {
+ bob.node.claim_funds(payment_preimage);
+ }
+ ev => panic!("Expected PaymentClaimable, got {:?}", ev)
+ }
+ check_added_monitors(bob, 1);
+ }
+
+ // Bob generates a PaymentClaimed event to user code.
+ {
+ let events = bob.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1, "expected 1 event, got {}", events.len());
+ match &events[0] {
+ Event::PaymentClaimed { .. } => (),
+ ev => panic!("Expected PaymentClaimed, got {:?}", ev),
+ }
+ }
+
+ // alice <--[update_fulfill_htlcs]-- bob
+ // alice <--[commitment_signed]-- bob
+ {
+ let cu = {
+ let events = bob.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1, "expected 1 events, got {}", events.len());
+ match &events[0] {
+ MessageSendEvent::UpdateHTLCs { updates, .. } => {
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1, "expected 1 update_fulfill_htlcs, got {}", updates.update_fulfill_htlcs.len());
+ updates.clone()
+ }
+ ev => panic!("Expected UpdateHTLCs, got {:?}", ev)
+ }
+ };
+
+ with_async_signer(&alice, &bob.node.get_our_node_id(), &channel_id, masks, &|| {
+ alice.node.handle_update_fulfill_htlc(&bob.node.get_our_node_id(), &cu.update_fulfill_htlcs[0]);
+ alice.node.handle_commitment_signed(&bob.node.get_our_node_id(), &cu.commitment_signed);
+ check_added_monitors(alice, 1);
+ });
+ }
+
+ // alice --[revoke_and_ack]--> bob
+ // alice --[commitment_signed]--> bob
+ {
+ let (raa, cu) = {
+ let events = alice.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2, "expected 2 messages, got {}", events.len());
+ match (&events[0], &events[1]) {
+ (MessageSendEvent::SendRevokeAndACK { msg: raa, .. }, MessageSendEvent::UpdateHTLCs { updates: cu, .. }) => {
+ assert_eq!(cu.update_fulfill_htlcs.len(), 0, "expected 0 update_fulfill_htlcs, got {}", cu.update_fulfill_htlcs.len());
+ (raa.clone(), cu.clone())
+ }
+ (a, b) => panic!("expected SendRevokeAndAck and UpdateHTLCs, not {:?} and {:?}", a, b)
+ }
+ };
+
+ // TODO: run with async once validate_counterparty_revocation supports it.
+ bob.node.handle_revoke_and_ack(&alice.node.get_our_node_id(), &raa);
+ check_added_monitors(bob, 1);
+
+ with_async_signer(&bob, &alice.node.get_our_node_id(), &channel_id, masks, &|| {
+ bob.node.handle_commitment_signed(&alice.node.get_our_node_id(), &cu.commitment_signed);
+ check_added_monitors(bob, 1);
+ });
+ }
+
+ // alice <--[revoke_and_ack]-- bob
+ // TODO: run with async once validate_counterparty_revocation supports it.
+ let raa = get_event_msg!(bob, MessageSendEvent::SendRevokeAndACK, alice.node.get_our_node_id());
+ alice.node.handle_revoke_and_ack(&bob.node.get_our_node_id(), &raa);
+ check_added_monitors(alice, 0);
+
+ // Alice generates PaymentSent and PaymentPathSuccessful events to user code.
+ {
+ let events = alice.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2, "expected 2 event, got {}", events.len());
+ match (&events[0], &events[1]) {
+ (Event::PaymentSent { .. }, Event::PaymentPathSuccessful { .. }) => (),
+ (a, b) => panic!("Expected PaymentSent and PaymentPathSuccessful, got {:?} and {:?}", a, b)
+ }
+
+ check_added_monitors(alice, 1); // why? would have expected this after handling RAA...
+ }
+}
+
+#[test]
+fn test_payment_grs() {
+ do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT]);
+}
+
+#[test]
+fn test_payment_gsr() {
+ do_test_payment(&vec![ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET]);
+}
+
+#[test]
+fn test_payment_rsg() {
+ do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT]);
+}
+
+#[test]
+fn test_payment_rgs() {
+ do_test_payment(&vec![ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT, ops::SIGN_COUNTERPARTY_COMMITMENT]);
+}
+
+#[test]
+fn test_payment_srg() {
+ do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::RELEASE_COMMITMENT_SECRET, ops::GET_PER_COMMITMENT_POINT]);
+}
+
#[test]
-fn test_async_commitment_signature_for_peer_disconnect() {
+fn test_payment_sgr() {
+ do_test_payment(&vec![ops::SIGN_COUNTERPARTY_COMMITMENT, ops::GET_PER_COMMITMENT_POINT, ops::RELEASE_COMMITMENT_SECRET]);
+}
+
+#[test]
+fn test_peer_disconnect() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
dst.node.handle_update_add_htlc(&src.node.get_our_node_id(), &payment_event.msgs[0]);
- // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
- // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ // Mark dst's signer as unavailable and handle src's commitment_signed. If dst's signer is
+ // offline, it oughtn't respond with any updates.
+ dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, false);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
- get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id());
-
// Now disconnect and reconnect the peers.
src.node.peer_disconnected(&dst.node.get_our_node_id());
dst.node.peer_disconnected(&src.node.get_our_node_id());
let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]);
reconnect_args.send_channel_ready = (false, false);
- reconnect_args.pending_raa = (true, false);
+ reconnect_args.pending_raa = (false, false);
reconnect_nodes(reconnect_args);
- // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ // Mark dst's signer as available and retry: we now expect to see dst's commitment signed and RAA.
+ dst.set_channel_signer_ops_available(&src.node.get_our_node_id(), &chan_id, OPS, true);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
-
- {
- let events = dst.node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1, "expected one message, got {}", events.len());
- if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = events[0] {
- assert_eq!(node_id, &src.node.get_our_node_id());
- } else {
- panic!("expected UpdateHTLCs message, not {:?}", events[0]);
- };
- }
+ get_revoke_commit_msgs(dst, &src.node.get_our_node_id());
}
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
}
-/// The return value of `signer_maybe_unblocked`
+/// The return value of `signer_maybe_unblocked`.
+///
+/// When the signer becomes unblocked, any non-`None` event accumulated here should be sent to the
+/// peer by the caller.
#[allow(unused)]
pub(super) struct SignerResumeUpdates {
+ /// A `commitment_signed` message, possibly with additional HTLC-related messages (e.g.,
+ /// `update_add_htlc`) that should be placed in the commitment.
+ ///
+ /// When both this and `raa` contain values, they should be sent to the peer using an ordering
+ /// consistent with `order`.
pub commitment_update: Option<msgs::CommitmentUpdate>,
+ /// A `revoke_and_ack` message that should be sent to the peer.
+ ///
+ /// When both this and `raa` contain values, they should be sent to the peer using an ordering
+ /// consistent with `order`.
+ pub raa: Option<msgs::RevokeAndACK>,
+ /// The order in which the `commitment_signed` and `revoke_and_ack` messages should be provided to
+ /// the peer. Only meaningful if both of these messages are present.
+ pub order: RAACommitmentOrder,
+ /// A `funding_signed` message that should be sent to the peer.
pub funding_signed: Option<msgs::FundingSigned>,
+ /// A `funding_created` message that should be sent to the peer.
pub funding_created: Option<msgs::FundingCreated>,
+ /// A `channel_ready` message that should be sent to the peer. If present, it should be sent last.
pub channel_ready: Option<msgs::ChannelReady>,
}
+#[allow(unused)]
+pub(super) struct UnfundedInboundV1SignerResumeUpdates {
+ pub accept_channel: Option<msgs::AcceptChannel>,
+}
+
+#[allow(unused)]
+pub(super) struct UnfundedOutboundV1SignerResumeUpdates {
+ pub open_channel: Option<msgs::OpenChannel>,
+}
+
/// The return value of `channel_reestablish`
pub(super) struct ReestablishResponses {
pub channel_ready: Option<msgs::ChannelReady>,
// cost of others, but should really just be changed.
cur_holder_commitment_transaction_number: u64,
+
+ // The commitment point corresponding to `cur_holder_commitment_transaction_number`, which is the
+ // *next* state. On initial channel construction, this value may be None, in which case that means
+ // that the first commitment point wasn't ready at the time that the channel needed to be created.
+ cur_holder_commitment_point: Option<PublicKey>,
+ // The commitment secret corresponding to `cur_holder_commitment_transaction_number + 2`, which is
+ // the *previous* state.
+ prev_holder_commitment_secret: Option<[u8; 32]>,
cur_counterparty_commitment_transaction_number: u64,
value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
pending_inbound_htlcs: Vec<InboundHTLCOutput>,
/// This flag is set in such a case. Note that we don't need to persist this as we'll end up
/// setting it again as a side-effect of [`Channel::channel_reestablish`].
signer_pending_commitment_update: bool,
+ /// Similar to [`Self::signer_pending_commitment_update`]: indicates that we've deferred sending a
+ /// `revoke_and_ack`, and should do so once the signer has become unblocked.
+ signer_pending_revoke_and_ack: bool,
/// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
/// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
/// outbound or inbound.
signer_pending_funding: bool,
+ /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send a
+ /// [`msgs::ChannelReady`].
+ signer_pending_channel_ready: bool,
+ /// If we attempted to retrieve the per-commitment point for the next transaction but the signer
+ /// wasn't ready, then this will be set to `true`.
+ signer_pending_commitment_point: bool,
+ /// If we attempted to release the per-commitment secret for the previous transaction but the
+ /// signer wasn't ready, then this will be set to `true`.
+ signer_pending_released_secret: bool,
// pending_update_fee is filled when sending and receiving update_fee.
//
self.channel_ready_event_emitted = true;
}
+ /// Retrieves the next commitment point and previous commitment secret from the signer.
+ pub fn update_holder_per_commitment<L: Deref>(&mut self, logger: &L) where L::Target: Logger
+ {
+ let transaction_number = self.cur_holder_commitment_transaction_number;
+ let signer = self.holder_signer.as_ref();
+
+ log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number);
+ self.cur_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) {
+ Ok(point) => {
+ log_trace!(logger, "Commitment point for {} transaction number {} retrieved", self.channel_id(), transaction_number);
+ self.signer_pending_commitment_point = false;
+ Some(point)
+ }
+
+ Err(_) => {
+ log_trace!(logger, "Commitment point for {} transaction number {} is not available", self.channel_id(), transaction_number);
+ self.signer_pending_commitment_point = true;
+ None
+ }
+ };
+
+ let releasing_transaction_number = transaction_number + 2;
+ if releasing_transaction_number <= INITIAL_COMMITMENT_NUMBER {
+ log_trace!(logger, "Retrieving commitment secret for {} transaction number {}", self.channel_id(), releasing_transaction_number);
+ self.prev_holder_commitment_secret = match signer.release_commitment_secret(releasing_transaction_number) {
+ Ok(secret) => {
+ log_trace!(logger, "Commitment secret for {} transaction number {} retrieved", self.channel_id(), releasing_transaction_number);
+ self.signer_pending_released_secret = false;
+ Some(secret)
+ }
+
+ Err(_) => {
+ log_trace!(logger, "Commitment secret for {} transaction number {} is not available", self.channel_id(), releasing_transaction_number);
+ self.signer_pending_released_secret = true;
+ None
+ }
+ }
+ };
+ }
+
/// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
/// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
/// no longer be considered when forwarding HTLCs.
#[inline]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which our
- /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
- /// our counterparty!)
+ /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to our
+ /// counterparty!) The keys are specifically generated for the _next_ state to which the channel
+ /// is about to advance.
/// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
/// TODO Some magic rust shit to compile-time check this?
- fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
- let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
+ fn build_next_holder_transaction_keys(&self) -> TxCreationKeys {
let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
let counterparty_pubkeys = self.get_counterparty_pubkeys();
+ let cur_holder_commitment_point = self.cur_holder_commitment_point
+ .expect("Holder per-commitment point is not ready");
- TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
+ TxCreationKeys::derive_new(
+ &self.secp_ctx, &cur_holder_commitment_point, delayed_payment_base, htlc_basepoint,
+ &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
}
#[inline]
log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
self.signer_pending_funding = false;
}
-
+
Some(msgs::FundingCreated {
temporary_channel_id: self.temporary_channel_id.unwrap(),
funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
&self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
- let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ // N.B. we'll have acquired the first per-commitment point from the signer during channel
+ // creation. Verify that the signature from the counterparty is correct so that we've got our
+ // signed refund transaction if we need to immediately close.
+ let holder_signer = self.context.build_next_holder_transaction_keys();
let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
{
let trusted_tx = initial_commitment_tx.trust();
self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
.map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
let funding_redeemscript = self.context.get_funding_redeemscript();
let funding_txo = self.context.get_funding_txo().unwrap();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
self.context.channel_state = ChannelState::FundingSent as u32;
}
self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.update_holder_per_commitment(logger);
self.context.cur_counterparty_commitment_transaction_number -= 1;
log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ log_trace!(logger, "funding_signed {} channel_ready", if need_channel_ready { "needs" } else { "does not need" });
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
Ok(channel_monitor)
}
let funding_script = self.context.get_funding_redeemscript();
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let keys = self.context.build_next_holder_transaction_keys();
let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
let commitment_txid = {
};
self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.update_holder_per_commitment(logger);
+
// Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
// build_commitment_no_status_check() next which will reset this to RAAFirst.
+ log_debug!(logger, "setting resend_order to CommitmentFirst");
self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
// Before proposing a feerate update, check that we can actually afford the new fee.
let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let keys = self.context.build_next_holder_transaction_keys();
let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
"Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
self.context.monitor_pending_channel_ready = false;
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ self.get_channel_ready().or_else(|| {
+ log_trace!(logger, "Monitor was pending channel_ready with no commitment point available; setting signer_pending_channel_ready = true");
+ self.context.signer_pending_channel_ready = true;
+ None
})
} else { None };
}
let raa = if self.context.monitor_pending_revoke_and_ack {
- Some(self.get_last_revoke_and_ack())
+ self.get_last_revoke_and_ack(logger).or_else(|| {
+ self.context.signer_pending_revoke_and_ack = true;
+ None
+ })
} else { None };
let commitment_update = if self.context.monitor_pending_commitment_signed {
self.get_last_commitment_update_for_send(logger).ok()
self.mark_awaiting_response();
}
+ if self.context.monitor_pending_commitment_signed && commitment_update.is_none() {
+ log_debug!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update = true");
+ self.context.signer_pending_commitment_update = true;
+ } else {
+ // If the signer was pending a commitment update, but we happened to get one just now because
+ // the monitor retrieved it, then we can mark the signer as "not pending anymore".
+ if self.context.signer_pending_commitment_update && commitment_update.is_some() {
+ self.context.signer_pending_commitment_update = false;
+ }
+ }
+ if self.context.monitor_pending_revoke_and_ack && raa.is_none() {
+ log_debug!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack = true");
+ self.context.signer_pending_revoke_and_ack = true;
+ } else {
+ // If the signer was pending a RAA, but we happened to get one just now because the monitor
+ // retrieved it, then we can mark the signer as "not pending anymore".
+ if self.context.signer_pending_revoke_and_ack && raa.is_some() {
+ self.context.signer_pending_revoke_and_ack = false;
+ }
+ }
+
self.context.monitor_pending_revoke_and_ack = false;
self.context.monitor_pending_commitment_signed = false;
+
let order = self.context.resend_order.clone();
- log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA{}",
&self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
- match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ if commitment_update.is_some() && raa.is_some() {
+ match order { RAACommitmentOrder::CommitmentFirst => ", with commitment first", RAACommitmentOrder::RevokeAndACKFirst => ", with RAA first"}
+ } else { "" });
MonitorRestoreUpdates {
raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
}
/// blocked.
#[allow(unused)]
pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
- let commitment_update = if self.context.signer_pending_commitment_update {
- self.get_last_commitment_update_for_send(logger).ok()
- } else { None };
+ log_trace!(logger, "Signing unblocked in channel {} at sequence {}",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number);
+
+ if self.context.signer_pending_commitment_point || self.context.signer_pending_released_secret {
+ log_trace!(logger, "Attempting to update holder per-commitment for pending commitment point and secret...");
+ self.context.update_holder_per_commitment(logger);
+ }
+
+ // Make sure that we honor any ordering requirements between the commitment update and revoke-and-ack.
+ let (commitment_update, raa) = match &self.context.resend_order {
+ RAACommitmentOrder::CommitmentFirst => {
+ let cu = if self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Attempting to generate pending commitment update...");
+ self.get_last_commitment_update_for_send(logger).map(|cu| {
+ self.context.signer_pending_commitment_update = false;
+ cu
+ }).ok()
+ } else { None };
+
+ let raa = if self.context.signer_pending_revoke_and_ack && !self.context.signer_pending_commitment_update {
+ log_trace!(logger, "Attempting to generate pending RAA...");
+ self.get_last_revoke_and_ack(logger).map(|raa| {
+ self.context.signer_pending_revoke_and_ack = false;
+ raa
+ })
+ } else { None };
+
+ (cu, raa)
+ }
+
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ let raa = if self.context.signer_pending_revoke_and_ack {
+ log_trace!(logger, "Attempting to generate pending RAA...");
+ self.get_last_revoke_and_ack(logger).map(|raa| {
+ self.context.signer_pending_revoke_and_ack = false;
+ raa
+ })
+ } else { None };
+
+ let cu = if self.context.signer_pending_commitment_update && !self.context.signer_pending_revoke_and_ack {
+ log_trace!(logger, "Attempting to generate pending commitment update...");
+ self.get_last_commitment_update_for_send(logger).map(|cu| {
+ self.context.signer_pending_commitment_update = false;
+ cu
+ }).ok()
+ } else { None };
+
+ (cu, raa)
+ }
+ };
+
let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
+ log_trace!(logger, "Attempting to generate pending funding signed...");
self.context.get_funding_signed_msg(logger).1
} else { None };
- let channel_ready = if funding_signed.is_some() {
- self.check_get_channel_ready(0)
- } else { None };
let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
+ log_trace!(logger, "Attempting to generate pending funding created...");
self.context.get_funding_created_msg(logger)
} else { None };
- log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
- if commitment_update.is_some() { "a" } else { "no" },
- if funding_signed.is_some() { "a" } else { "no" },
- if funding_created.is_some() { "a" } else { "no" },
- if channel_ready.is_some() { "a" } else { "no" });
-
+ // Don't yield up a `channel_ready` message if we're still pending funding.
+ let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding {
+ log_trace!(logger, "Attempting to generate pending channel ready...");
+ self.get_channel_ready().map(|msg| {
+ self.context.signer_pending_channel_ready = false;
+ msg
+ })
+ } else { None };
+
+ let order = self.context.resend_order.clone();
+
+ log_debug!(logger, "Signing unblocked in channel {} at sequence {} resulted in {} commitment update, {} RAA{}, {} funding signed, {} funding created, {} channel ready",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
+ if commitment_update.is_some() { "a" } else { "no" },
+ if raa.is_some() { "an" } else { "no" },
+ if commitment_update.is_some() && raa.is_some() {
+ if order == RAACommitmentOrder::CommitmentFirst { " (commitment first)" } else { " (RAA first)" }
+ } else { "" },
+ if funding_signed.is_some() { "a" } else { "no" },
+ if funding_created.is_some() { "a" } else { "no" },
+ if channel_ready.is_some() { "a" } else { "no" });
+
SignerResumeUpdates {
commitment_update,
+ raa,
+ order,
funding_signed,
funding_created,
channel_ready,
}
}
- fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
- msgs::RevokeAndACK {
- channel_id: self.context.channel_id,
- per_commitment_secret,
- next_per_commitment_point,
- #[cfg(taproot)]
- next_local_nonce: None,
+ fn get_last_revoke_and_ack<L: Deref>(&self, logger: &L) -> Option<msgs::RevokeAndACK> where L::Target: Logger {
+ assert!(self.context.cur_holder_commitment_transaction_number <= INITIAL_COMMITMENT_NUMBER + 2);
+ match (self.context.cur_holder_commitment_point, self.context.prev_holder_commitment_secret) {
+ (Some(next_per_commitment_point), Some(per_commitment_secret)) => {
+ log_debug!(logger, "Regenerated last revoke-and-ack in channel {} for next per-commitment point sequence number {}, releasing secret for {}",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
+ self.context.cur_holder_commitment_transaction_number + 2);
+
+ Some(msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ })
+ },
+
+ (Some(_), None) => {
+ log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the secret for {} is not available",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
+ self.context.cur_holder_commitment_transaction_number + 2);
+ None
+ },
+
+ (None, Some(_)) => {
+ log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number);
+ None
+ },
+
+ (None, None) => {
+ log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because neither the next per-commitment point nor the secret for {} is available",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
+ self.context.cur_holder_commitment_transaction_number + 2);
+ None
+ },
}
}
+ fn get_channel_ready(&self) -> Option<msgs::ChannelReady> {
+ self.context.cur_holder_commitment_point.map(|next_per_commitment_point| {
+ msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ }
+ })
+ }
+
/// Gets the last commitment update for immediate sending to our peer.
fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
let mut update_add_htlcs = Vec::new();
})
} else { None };
- log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
-
let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
if self.context.signer_pending_commitment_update {
log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
}
return Err(());
};
+ log_debug!(logger, "Regenerated latest commitment update in channel {} at {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
Ok(msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
commitment_signed,
}
if msg.next_remote_commitment_number > 0 {
- let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
- let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
- .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
- if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ // TODO(waterson): figure out how to do this verification when an async signer is provided
+ // with a (more or less) arbitrary state index. Should we require that an async signer cache
+ // old points? Or should we make it so that we can restart the re-establish after the signer
+ // becomes unblocked? Or something else?
+ if false {
+ let state_index = INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1;
+ let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(state_index, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close(format!("Unable to retrieve per-commitment point for state {}", state_index)))?;
+ let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ }
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
macro_rules! log_and_panic {
}
// We have OurChannelReady set!
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let channel_ready = self.get_channel_ready();
+ if channel_ready.is_none() {
+ self.context.signer_pending_channel_ready = true;
+ }
+
return Ok(ReestablishResponses {
- channel_ready: Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- }),
+ channel_ready,
raa: None, commitment_update: None,
order: RAACommitmentOrder::CommitmentFirst,
shutdown_msg, announcement_sigs,
self.context.monitor_pending_revoke_and_ack = true;
None
} else {
- Some(self.get_last_revoke_and_ack())
+ self.get_last_revoke_and_ack(logger).map(|raa| {
+ self.context.signer_pending_revoke_and_ack = false;
+ raa
+ }).or_else(|| {
+ self.context.signer_pending_revoke_and_ack = true;
+ None
+ })
}
} else {
return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
// We should never have to worry about MonitorUpdateInProgress resending ChannelReady
- let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ self.get_channel_ready().or_else(|| {
+ self.context.signer_pending_channel_ready = true;
+ None
})
} else { None };
if need_commitment_update {
if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
- let next_per_commitment_point =
- self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
- return Some(msgs::ChannelReady {
- channel_id: self.context.channel_id,
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- });
+ if let Ok(next_per_commitment_point) = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx) {
+ return Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id,
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ });
+ }
+ self.context.signer_pending_channel_ready = true;
}
} else {
self.context.monitor_pending_channel_ready = true;
self.context.pending_update_fee = None;
}
}
+ log_debug!(logger, "setting resend_order to RevokeAndACKFirst");
self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
let (mut htlcs_ref, counterparty_commitment_tx) =
pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
pub context: ChannelContext<SP>,
pub unfunded_context: UnfundedChannelContext,
+ pub signer_pending_open_channel: bool,
}
impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
+ let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok();
+
Ok(Self {
context: ChannelContext {
user_id,
destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_holder_commitment_point,
+ prev_holder_commitment_secret: None,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat,
monitor_pending_finalized_fulfills: Vec::new(),
signer_pending_commitment_update: false,
+ signer_pending_revoke_and_ack: false,
signer_pending_funding: false,
+ signer_pending_channel_ready: false,
+ signer_pending_commitment_point: false,
+ signer_pending_released_secret: false,
#[cfg(debug_assertions)]
holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
blocked_monitor_updates: Vec::new(),
},
- unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
+ signer_pending_open_channel: false,
})
}
/// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
pub(crate) fn maybe_handle_error_without_close<F: Deref>(
&mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
- ) -> Result<msgs::OpenChannel, ()>
+ ) -> Result<Option<msgs::OpenChannel>, ()>
where
F::Target: FeeEstimator
{
self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
}
self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
- Ok(self.get_open_channel(chain_hash))
+ let opt_msg = self.get_open_channel(chain_hash);
+ if opt_msg.is_none() {
+ self.signer_pending_open_channel = true;
+ }
+ Ok(opt_msg)
}
- pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
+ pub fn get_open_channel(&self, chain_hash: ChainHash) -> Option<msgs::OpenChannel> {
if !self.context.is_outbound() {
panic!("Tried to open a channel for an inbound channel?");
}
panic!("Tried to send an open_channel for a channel that has already advanced");
}
- let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
let keys = self.context.get_holder_pubkeys();
- msgs::OpenChannel {
- chain_hash,
- temporary_channel_id: self.context.channel_id,
- funding_satoshis: self.context.channel_value_satoshis,
- push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
- channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- feerate_per_kw: self.context.feerate_per_kw as u32,
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
- first_per_commitment_point,
- channel_flags: if self.context.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
- }
+ self.context.cur_holder_commitment_point.map(|first_per_commitment_point| {
+ msgs::OpenChannel {
+ chain_hash,
+ temporary_channel_id: self.context.channel_id,
+ funding_satoshis: self.context.channel_value_satoshis,
+ push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ feerate_per_kw: self.context.feerate_per_kw as u32,
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint,
+ payment_point: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint,
+ htlc_basepoint: keys.htlc_basepoint,
+ first_per_commitment_point,
+ channel_flags: if self.context.config.announced_channel {1} else {0},
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ }
+ })
}
// Message handlers
Ok(())
}
+
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[allow(unused)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, chain_hash: &ChainHash, logger: &L) -> UnfundedOutboundV1SignerResumeUpdates
+ where L::Target: Logger
+ {
+ let open_channel = if self.signer_pending_open_channel {
+ self.context.update_holder_per_commitment(logger);
+ self.get_open_channel(chain_hash.clone()).map(|msg| {
+ self.signer_pending_open_channel = false;
+ msg
+ })
+ } else { None };
+ UnfundedOutboundV1SignerResumeUpdates {
+ open_channel,
+ }
+ }
}
/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
pub context: ChannelContext<SP>,
pub unfunded_context: UnfundedChannelContext,
+ pub signer_pending_accept_channel: bool,
}
impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
} else {
Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
};
+ let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok();
let chan = Self {
context: ChannelContext {
destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_holder_commitment_point,
+ prev_holder_commitment_secret: None,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
value_to_self_msat: msg.push_msat,
monitor_pending_finalized_fulfills: Vec::new(),
signer_pending_commitment_update: false,
+ signer_pending_revoke_and_ack: false,
signer_pending_funding: false,
+ signer_pending_channel_ready: false,
+ signer_pending_commitment_point: false,
+ signer_pending_released_secret: false,
#[cfg(debug_assertions)]
holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
blocked_monitor_updates: Vec::new(),
},
- unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
+ unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
+ signer_pending_accept_channel: false,
};
Ok(chan)
/// should be sent back to the counterparty node.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
+ pub fn accept_inbound_channel(&mut self) -> Option<msgs::AcceptChannel> {
if self.context.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
/// [`InboundV1Channel::accept_inbound_channel`] instead.
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
- let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.context.get_holder_pubkeys();
-
- msgs::AcceptChannel {
- temporary_channel_id: self.context.channel_id,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
- channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- minimum_depth: self.context.minimum_depth.unwrap(),
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
- first_per_commitment_point,
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
- #[cfg(taproot)]
- next_local_nonce: None,
- }
+ fn generate_accept_channel_message(&self) -> Option<msgs::AcceptChannel> {
+ self.context.cur_holder_commitment_point.map(|first_per_commitment_point| {
+ let keys = self.context.get_holder_pubkeys();
+ msgs::AcceptChannel {
+ temporary_channel_id: self.context.channel_id,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ minimum_depth: self.context.minimum_depth.unwrap(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint,
+ payment_point: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint,
+ htlc_basepoint: keys.htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ }
+ })
}
/// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
///
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
#[cfg(test)]
- pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
+ pub fn get_accept_channel_message(&self) -> Option<msgs::AcceptChannel> {
self.generate_accept_channel_message()
}
fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
let funding_script = self.context.get_funding_redeemscript();
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let keys = self.context.build_next_holder_transaction_keys();
let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
let trusted_tx = initial_commitment_tx.trust();
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
self.context.channel_id = funding_txo.to_channel_id();
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.update_holder_per_commitment(logger);
let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
log_info!(logger, "{} funding_signed for peer for channel {}",
if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
+ let signer_pending_funding = self.context.signer_pending_funding;
// Promote the channel to a full-fledged one now that we have updated the state and have a
// `ChannelMonitor`.
Ok((channel, funding_signed, channel_monitor))
}
+
+ /// Indicates that the signer may have some signatures for us, so we should retry if we're
+ /// blocked.
+ #[allow(unused)]
+ pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> UnfundedInboundV1SignerResumeUpdates
+ where L::Target: Logger
+ {
+ let accept_channel = if self.signer_pending_accept_channel {
+ self.context.update_holder_per_commitment(logger);
+ self.generate_accept_channel_message().map(|msg| {
+ self.signer_pending_accept_channel = false;
+ msg
+ })
+ } else { None };
+ UnfundedInboundV1SignerResumeUpdates {
+ accept_channel,
+ }
+ }
}
const SERIALIZATION_VERSION: u8 = 3;
(35, pending_outbound_skimmed_fees, optional_vec),
(37, holding_cell_skimmed_fees, optional_vec),
(38, self.context.is_batch_funding, option),
+ (39, self.context.cur_holder_commitment_point, option),
+ (41, self.context.prev_holder_commitment_secret, option),
+ (43, self.context.signer_pending_commitment_point, required),
+ (45, self.context.signer_pending_revoke_and_ack, required),
+ (47, self.context.signer_pending_funding, required),
+ (49, self.context.signer_pending_channel_ready, required),
+ (51, self.context.signer_pending_commitment_point, required),
+ (53, self.context.signer_pending_released_secret, required),
});
Ok(())
let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+ let mut cur_holder_commitment_point: Option<PublicKey> = None;
+ let mut prev_holder_commitment_secret: Option<[u8; 32]> = None;
let mut is_batch_funding: Option<()> = None;
+ let mut signer_pending_commitment_update: bool = false;
+ let mut signer_pending_revoke_and_ack: bool = false;
+ let mut signer_pending_funding: bool = false;
+ let mut signer_pending_channel_ready: bool = false;
+ let mut signer_pending_commitment_point: bool = false;
+ let mut signer_pending_released_secret: bool = false;
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(35, pending_outbound_skimmed_fees_opt, optional_vec),
(37, holding_cell_skimmed_fees_opt, optional_vec),
(38, is_batch_funding, option),
+ (39, cur_holder_commitment_point, option),
+ (41, prev_holder_commitment_secret, option),
+ (43, signer_pending_commitment_update, (default_value, false)),
+ (45, signer_pending_revoke_and_ack, (default_value, false)),
+ (47, signer_pending_funding, (default_value, false)),
+ (49, signer_pending_channel_ready, (default_value, false)),
+ (51, signer_pending_commitment_point, (default_value, false)),
+ (53, signer_pending_released_secret, (default_value, false)),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+ // If we weren't able to load the cur_holder_commitment_point or prev_holder_commitment_secret,
+ // ask the signer for it now.
+ if cur_holder_commitment_point.is_none() {
+ cur_holder_commitment_point = holder_signer.get_per_commitment_point(
+ cur_holder_commitment_transaction_number, &secp_ctx
+ ).ok();
+
+ signer_pending_commitment_point = cur_holder_commitment_point.is_none();
+ }
+
+ if prev_holder_commitment_secret.is_none() {
+ let release_transaction_number = cur_holder_commitment_transaction_number + 2;
+ prev_holder_commitment_secret = if release_transaction_number <= INITIAL_COMMITMENT_NUMBER {
+ let secret = holder_signer.release_commitment_secret(release_transaction_number).ok();
+ signer_pending_commitment_point = secret.is_none();
+ secret
+ } else { None };
+ }
+
// `user_id` used to be a single u64 value. In order to remain backwards
// compatible with versions prior to 0.0.113, the u128 is serialized as two
// separate u64 values.
destination_script,
cur_holder_commitment_transaction_number,
+ cur_holder_commitment_point,
+ prev_holder_commitment_secret,
cur_counterparty_commitment_transaction_number,
value_to_self_msat,
monitor_pending_failures,
monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
- signer_pending_commitment_update: false,
- signer_pending_funding: false,
+ signer_pending_commitment_update,
+ signer_pending_revoke_and_ack,
+ signer_pending_funding,
+ signer_pending_channel_ready,
+ signer_pending_commitment_point,
+ signer_pending_released_secret,
pending_update_fee,
holding_cell_update_fee,
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
fee_est.fee_est = 500;
- let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
}
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
- let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
- let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
+ let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
accept_channel_msg.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
node_a_chan.context.holder_dust_limit_satoshis = 1560;
let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
- let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
+ let open_channel_msg = node_a_chan.get_open_channel(chain_hash).unwrap();
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
// Node B --> Node A: accept channel
- let accept_channel_msg = node_b_chan.accept_inbound_channel();
+ let accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
// Node A --> Node B: funding created
let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
- let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
+ let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
// Test that `InboundV1Channel::new` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
- let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
let mut inbound_node_config = UserConfig::default();
inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
- let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
- let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
+ let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
accept_channel_msg.dust_limit_satoshis = 546;
node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
node_a_chan.context.holder_dust_limit_satoshis = 1560;
assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
- // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
+ // We can't just use build_next_holder_transaction_keys here as the per_commitment_secret is not
// derived from a commitment_seed, so instead we copy it here and call
// build_commitment_transaction.
let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
channel_type_features.set_zero_conf_required();
- let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
+ let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
open_channel_msg.channel_type = Some(channel_type_features);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
&channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
).unwrap();
- let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
+ let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
).unwrap();
// Set `channel_type` to `None` to force the implicit feature negotiation.
- let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
+ let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
open_channel_msg.channel_type = None;
// Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
&channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
).unwrap();
- let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
+ let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
let res = InboundV1Channel::<&TestKeysInterface>::new(
10000000, 100000, 42, &config, 0, 42
).unwrap();
- let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
+ let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
).unwrap();
- let mut accept_channel_msg = channel_b.get_accept_channel_message();
+ let mut accept_channel_msg = channel_b.get_accept_channel_message().unwrap();
accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
let res = channel_a.accept_channel(
node_b_node_id,
&channelmanager::provided_channel_type_features(&config),
&channelmanager::provided_init_features(&config),
- &open_channel_msg,
+ &open_channel_msg.unwrap(),
7,
&config,
0,
let accept_channel_msg = node_b_chan.accept_inbound_channel();
node_a_chan.accept_channel(
- &accept_channel_msg,
+ &accept_channel_msg.unwrap(),
&config.channel_handshake_limits,
&channelmanager::provided_init_features(&config),
).unwrap();