use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
+struct PendingChannelMonitorUpdate {
+ update: ChannelMonitorUpdate,
+ /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
+ /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
+ /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ blocked: bool,
+}
+
+impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
+ (0, update, required),
+ (2, blocked, required),
+});
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
/// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
/// completes we still need to be able to complete the persistence. Thus, we have to keep a
/// copy of the [`ChannelMonitorUpdate`] here until it is complete.
- pending_monitor_updates: Vec<ChannelMonitorUpdate>,
+ pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
}
#[cfg(any(test, fuzzing))]
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- Some(signer_provider.get_shutdown_scriptpubkey())
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
}
}
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ };
+
let temporary_channel_id = entropy_source.get_secure_random_bytes();
Ok(Channel {
holder_signer,
shutdown_scriptpubkey,
- destination_script: signer_provider.get_destination_script(),
+ destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
} else { None };
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- Some(signer_provider.get_shutdown_scriptpubkey())
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ }
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
}
}
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ };
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
holder_signer,
shutdown_scriptpubkey,
- destination_script: signer_provider.get_destination_script(),
+ destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
}
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ self.pending_monitor_updates.len() - 1
+ } else {
+ let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked)
+ .unwrap_or(self.pending_monitor_updates.len());
+ let new_mon_id = self.pending_monitor_updates.get(insert_pos)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: true,
+ });
+ }
+ insert_pos
+ };
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
UpdateFulfillCommitFetch::NewClaim {
- monitor_update: self.pending_monitor_updates.last().unwrap(),
+ monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update,
htlc_value_msat,
}
},
- UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
- self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- UpdateFulfillCommitFetch::NewClaim {
- monitor_update: self.pending_monitor_updates.last().unwrap(),
- htlc_value_msat,
- }
- }
UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
}
}
Ok(())
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
}
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
log_bytes!(self.channel_id));
- self.pending_monitor_updates.push(monitor_update);
- return Ok(self.pending_monitor_updates.last().unwrap());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
- self.pending_monitor_updates.push(monitor_update);
self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
- return Ok(self.pending_monitor_updates.last().unwrap());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
/// Public version of the below, checking relevant preconditions first.
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
} else {
(None, Vec::new())
}
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
self.monitor_pending_failures.append(&mut revoked_htlcs);
self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
- self.pending_monitor_updates.push(monitor_update);
- return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
+ return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
match self.free_holding_cell_htlcs(logger) {
(Some(_), htlcs_to_fail) => {
- let mut additional_update = self.pending_monitor_updates.pop().unwrap();
+ let mut additional_update = self.pending_monitor_updates.pop().unwrap().update;
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
},
(None, htlcs_to_fail) => {
if require_commitment {
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
}
}
}
{
assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
- self.pending_monitor_updates.clear();
+ let mut found_blocked = false;
+ self.pending_monitor_updates.retain(|upd| {
+ if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
+ if upd.blocked { found_blocked = true; }
+ upd.blocked
+ });
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
msg.next_local_commitment_number == 0 {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish".to_owned()));
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
}
if msg.next_remote_commitment_number > 0 {
Some(_) => false,
None => {
assert!(send_shutdown);
- let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ };
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Some(self.pending_monitor_updates.last().unwrap())
+ if self.push_blockable_mon_update(monitor_update) {
+ self.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
(self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
}
- pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> {
- self.pending_monitor_updates.first()
+ pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
+ if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); }
+ self.pending_monitor_updates[0].update.update_id - 1
+ }
+
+ /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+ /// further blocked monitor update exists after the next.
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
+ for i in 0..self.pending_monitor_updates.len() {
+ if self.pending_monitor_updates[i].blocked {
+ self.pending_monitor_updates[i].blocked = false;
+ return Some((&self.pending_monitor_updates[i].update,
+ self.pending_monitor_updates.len() > i + 1));
+ }
+ }
+ None
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning whether it should be
+ /// immediately given to the user for persisting or if it should be held as blocked.
+ fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
+ let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: !release_monitor
+ });
+ release_monitor
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
+ /// it should be immediately given to the user for persisting or `None` if it should be held as
+ /// blocked.
+ fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+ -> Option<&ChannelMonitorUpdate> {
+ let release_monitor = self.push_blockable_mon_update(update);
+ if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
+ }
+
+ pub fn no_monitor_updates_pending(&self) -> bool {
+ self.pending_monitor_updates.is_empty()
+ }
+
+ pub fn complete_one_mon_update(&mut self, update_id: u64) {
+ self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
}
/// Returns true if funding_created was sent/received.
Some(_) => {
let monitor_update = self.build_commitment_no_status_check(logger);
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Ok(Some(self.pending_monitor_updates.last().unwrap()))
+ Ok(self.push_ret_blockable_mon_update(monitor_update))
},
None => Ok(None)
}
/// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
/// [`ChannelMonitorUpdate`] will be returned).
pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
- target_feerate_sats_per_kw: Option<u32>)
+ target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where SP::Target: SignerProvider {
for htlc in self.pending_outbound_htlcs.iter() {
return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
}
}
+ if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+ return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+ }
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
let update_shutdown_script = match self.shutdown_scriptpubkey {
Some(_) => false,
None if !chan_closed => {
- let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+ // use override shutdown script if provided
+ let shutdown_scriptpubkey = match override_shutdown_script {
+ Some(script) => script,
+ None => {
+ // otherwise, use the shutdown scriptpubkey provided by the signer
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ },
+ };
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
}
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Some(self.pending_monitor_updates.last().unwrap())
+ if self.push_blockable_mon_update(monitor_update) {
+ self.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.channel_id,
(28, holder_max_accepted_htlcs, option),
(29, self.temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
+ (33, self.pending_monitor_updates, vec_type),
});
Ok(())
let mut temporary_channel_id: Option<[u8; 32]> = None;
let mut holder_max_accepted_htlcs: Option<u16> = None;
+ let mut pending_monitor_updates = Some(Vec::new());
+
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(1, minimum_depth, option),
(28, holder_max_accepted_htlcs, option),
(29, temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
+ (33, pending_monitor_updates, vec_type),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
channel_type: channel_type.unwrap(),
channel_keys_id,
- pending_monitor_updates: Vec::new(),
+ pending_monitor_updates: pending_monitor_updates.unwrap(),
})
}
}
use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
- use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
+ use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
use crate::chain::transaction::OutPoint;
use crate::routing::router::Path;
use crate::util::config::UserConfig;
fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
- fn get_destination_script(&self) -> Script {
+ fn get_destination_script(&self) -> Result<Script, ()> {
let secp_ctx = Secp256k1::signing_only();
let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
- Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()
+ Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
}
- fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
+ fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
let secp_ctx = Secp256k1::signing_only();
let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
- ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))
+ Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
}
}
use bitcoin::hashes::hex::FromHex;
use bitcoin::hash_types::Txid;
use bitcoin::secp256k1::Message;
- use crate::chain::keysinterface::EcdsaChannelSigner;
+ use crate::sign::EcdsaChannelSigner;
use crate::ln::PaymentPreimage;
use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};