pub commitment_seed: [u8; 32],
}
+#[cfg(test)]
+pub struct ChannelValueStat {
+ pub value_to_self_msat: u64,
+ pub channel_value_msat: u64,
+ pub channel_reserve_msat: u64,
+ pub pending_outbound_htlcs_amount_msat: u64,
+ pub pending_inbound_htlcs_amount_msat: u64,
+ pub holding_cell_outbound_amount_msat: u64,
+ pub their_max_htlc_value_in_flight_msat: u64, // outgoing
+}
+
impl ChannelKeys {
pub fn new_from_seed(seed: &[u8; 32]) -> Result<ChannelKeys, secp256k1::Error> {
let mut prk = [0; 32];
channel_update_count: u32,
feerate_per_kw: u64,
+ #[cfg(debug_assertions)]
+ /// Max to_local and to_remote outputs in a locally-generated commitment transaction
+ max_commitment_tx_output_local: ::std::sync::Mutex<(u64, u64)>,
+ #[cfg(debug_assertions)]
+ /// Max to_local and to_remote outputs in a remote-generated commitment transaction
+ max_commitment_tx_output_remote: ::std::sync::Mutex<(u64, u64)>,
+
#[cfg(test)]
// Used in ChannelManager's tests to send a revoked transaction
pub last_local_commitment_txn: Vec<Transaction>,
our_dust_limit_satoshis: u64,
their_max_htlc_value_in_flight_msat: u64,
//get_our_max_htlc_value_in_flight_msat(): u64,
+ /// minimum channel reserve for **self** to maintain - set by them.
their_channel_reserve_satoshis: u64,
//get_our_channel_reserve_satoshis(): u64,
their_htlc_minimum_msat: u64,
channel_value_satoshis * 1000 / 10 //TODO
}
+ /// Returns a minimum channel reserve value **they** need to maintain
+ ///
/// Guaranteed to return a value no larger than channel_value_satoshis
fn get_our_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
let (q, _) = channel_value_satoshis.overflowing_div(100);
next_remote_htlc_id: 0,
channel_update_count: 1,
+ #[cfg(debug_assertions)]
+ max_commitment_tx_output_local: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ #[cfg(debug_assertions)]
+ max_commitment_tx_output_remote: ::std::sync::Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+
last_local_commitment_txn: Vec::new(),
last_sent_closing_fee: None,
next_remote_htlc_id: 0,
channel_update_count: 1,
+ #[cfg(debug_assertions)]
+ max_commitment_tx_output_local: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ #[cfg(debug_assertions)]
+ max_commitment_tx_output_remote: ::std::sync::Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+
last_local_commitment_txn: Vec::new(),
last_sent_closing_fee: None,
}
+ let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
+ let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset;
+
+ #[cfg(debug_assertions)]
+ {
+ // Make sure that the to_self/to_remote is always either past the appropriate
+ // channel_reserve *or* it is making progress towards it.
+ // TODO: This should happen after fee calculation, but we don't handle that correctly
+ // yet!
+ let mut max_commitment_tx_output = if generated_by_local {
+ self.max_commitment_tx_output_local.lock().unwrap()
+ } else {
+ self.max_commitment_tx_output_remote.lock().unwrap()
+ };
+ debug_assert!(max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.their_channel_reserve_satoshis as i64);
+ max_commitment_tx_output.0 = cmp::max(max_commitment_tx_output.0, value_to_self_msat as u64);
+ debug_assert!(max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis) as i64);
+ max_commitment_tx_output.1 = cmp::max(max_commitment_tx_output.1, value_to_remote_msat as u64);
+ }
+
let total_fee: u64 = feerate_per_kw * (COMMITMENT_TX_BASE_WEIGHT + (txouts.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
- let value_to_self: i64 = ((self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset) / 1000 - if self.channel_outbound { total_fee as i64 } else { 0 };
- let value_to_remote: i64 = (((self.channel_value_satoshis * 1000 - self.value_to_self_msat - remote_htlc_total_msat) as i64 - value_to_self_msat_offset) / 1000) - if self.channel_outbound { 0 } else { total_fee as i64 };
+ let (value_to_self, value_to_remote) = if self.channel_outbound {
+ (value_to_self_msat / 1000 - total_fee as i64, value_to_remote_msat / 1000)
+ } else {
+ (value_to_self_msat / 1000, value_to_remote_msat / 1000 - total_fee as i64)
+ };
let value_to_a = if local { value_to_self } else { value_to_remote };
let value_to_b = if local { value_to_remote } else { value_to_self };
Ok(self.channel_monitor.clone())
}
- pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), HandleError> {
+ pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(HandleError{err: "Peer sent funding_locked when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent funding_locked when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer sent funding_locked when we needed a channel_reestablish"));
}
let non_shutdown_state = self.channel_state & (!BOTH_SIDES_SHUTDOWN_MASK);
if non_shutdown_state == ChannelState::FundingSent as u32 {
self.cur_local_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
self.cur_remote_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
if self.their_cur_commitment_point != Some(msg.next_per_commitment_point) {
- return Err(HandleError{err: "Peer sent a reconnect funding_locked with a different point", action: None});
+ return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point"));
}
// They probably disconnected/reconnected and re-sent the funding_locked, which is required
return Ok(());
} else {
- return Err(HandleError{err: "Peer sent a funding_locked at a strange time", action: None});
+ return Err(ChannelError::Close("Peer sent a funding_locked at a strange time"));
}
self.their_prev_commitment_point = self.their_cur_commitment_point;
Ok(())
}
- /// Returns (inbound_htlc_count, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat)
- /// If its for a remote update check, we need to be more lax about checking against messages we
- /// sent but they may not have received/processed before they sent this message. Further, for
- /// our own sends, we're more conservative and even consider things they've removed against
- /// totals, though there is little reason to outside of further avoiding any race condition
- /// issues.
- fn get_pending_htlc_stats(&self, for_remote_update_check: bool) -> (u32, u32, u64, u64) {
- //TODO: Can probably split this into inbound/outbound
+ /// Returns (inbound_htlc_count, htlc_inbound_value_msat)
+ fn get_inbound_pending_htlc_stats(&self) -> (u32, u64) {
let mut inbound_htlc_count: u32 = 0;
- let mut outbound_htlc_count: u32 = 0;
- let mut htlc_outbound_value_msat = 0;
let mut htlc_inbound_value_msat = 0;
for ref htlc in self.pending_inbound_htlcs.iter() {
match htlc.state {
inbound_htlc_count += 1;
htlc_inbound_value_msat += htlc.amount_msat;
}
+ (inbound_htlc_count, htlc_inbound_value_msat)
+ }
+
+ /// Returns (outbound_htlc_count, htlc_outbound_value_msat)
+ /// If its for a remote update check, we need to be more lax about checking against messages we
+ /// sent but they may not have received/processed before they sent this message. Further, for
+ /// our own sends, we're more conservative and even consider things they've removed against
+ /// totals, though there is little reason to outside of further avoiding any race condition
+ /// issues.
+ fn get_outbound_pending_htlc_stats(&self, for_remote_update_check: bool) -> (u32, u64) {
+ let mut outbound_htlc_count: u32 = 0;
+ let mut htlc_outbound_value_msat = 0;
for ref htlc in self.pending_outbound_htlcs.iter() {
match htlc.state {
OutboundHTLCState::LocalAnnounced => { if for_remote_update_check { continue; } },
htlc_outbound_value_msat += htlc.amount_msat;
}
- (inbound_htlc_count, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat)
+ (outbound_htlc_count, htlc_outbound_value_msat)
}
pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, pending_forward_state: PendingHTLCStatus) -> Result<(), HandleError> {
return Err(HandleError{err: "Remote side tried to send less than our minimum HTLC value", action: None});
}
- let (inbound_htlc_count, _, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(true);
+ let (inbound_htlc_count, htlc_inbound_value_msat) = self.get_inbound_pending_htlc_stats();
if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
return Err(HandleError{err: "Remote tried to push more than our max accepted HTLCs", action: None});
}
// Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet
// the reserve_satoshis we told them to always have as direct payment so that they lose
// something if we punish them for broadcasting an old state).
- if htlc_inbound_value_msat + htlc_outbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 {
+ if htlc_inbound_value_msat + msg.amount_msat + self.value_to_self_msat > (self.channel_value_satoshis - Channel::get_our_channel_reserve_satoshis(self.channel_value_satoshis)) * 1000 {
return Err(HandleError{err: "Remote HTLC add would put them over their reserve value", action: None});
}
if self.next_remote_htlc_id != msg.htlc_id {
/// Removes an outbound HTLC which has been commitment_signed by the remote end
#[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, HandleError> {
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<[u8; 32]>, fail_reason: Option<HTLCFailReason>) -> Result<&HTLCSource, ChannelError> {
for htlc in self.pending_outbound_htlcs.iter_mut() {
if htlc.htlc_id == htlc_id {
match check_preimage {
None => {},
Some(payment_hash) =>
if payment_hash != htlc.payment_hash {
- return Err(HandleError{err: "Remote tried to fulfill HTLC with an incorrect preimage", action: None});
+ return Err(ChannelError::Close("Remote tried to fulfill HTLC with an incorrect preimage"));
}
};
match htlc.state {
OutboundHTLCState::LocalAnnounced =>
- return Err(HandleError{err: "Remote tried to fulfill HTLC before it had been committed", action: None}),
+ return Err(ChannelError::Close("Remote tried to fulfill HTLC before it had been committed")),
OutboundHTLCState::Committed => {
htlc.state = OutboundHTLCState::RemoteRemoved;
htlc.fail_reason = fail_reason;
},
OutboundHTLCState::AwaitingRemoteRevokeToRemove | OutboundHTLCState::AwaitingRemovedRemoteRevoke | OutboundHTLCState::RemoteRemoved =>
- return Err(HandleError{err: "Remote tried to fulfill HTLC that they'd already fulfilled", action: None}),
+ return Err(ChannelError::Close("Remote tried to fulfill HTLC that they'd already fulfilled")),
}
return Ok(&htlc.source);
}
}
- Err(HandleError{err: "Remote tried to fulfill/fail an HTLC we couldn't find", action: None})
+ Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find"))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<&HTLCSource, HandleError> {
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<&HTLCSource, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
- return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", action: None});
+ return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state"));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(HandleError{err: "Peer sent update_fulfill_htlc when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish"));
}
let mut sha = Sha256::new();
self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None)
}
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<&HTLCSource, HandleError> {
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<&HTLCSource, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
- return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", action: None});
+ return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state"));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(HandleError{err: "Peer sent update_fail_htlc when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent update_fail_htlc when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish"));
}
self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
}
- pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<&HTLCSource, HandleError> {
+ pub fn update_fail_malformed_htlc<'a>(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<&HTLCSource, ChannelError> {
if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
- return Err(HandleError{err: "Got add HTLC message when channel was not in an operational state", action: None});
+ return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state"));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(HandleError{err: "Peer sent update_fail_malformed_htlc when we needed a channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish"));
}
self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
- pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor>), HandleError> {
+ pub fn channel_reestablish(&mut self, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, Option<ChannelMonitor>), ChannelError> {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
- return Err(HandleError{err: "Peer sent a loose channel_reestablish not after reconnect", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer sent a loose channel_reestablish not after reconnect".to_string(), channel_id: msg.channel_id}})});
+ // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
+ // almost certainly indicates we are going to end up out-of-sync in some way, so we
+ // just close here instead of trying to recover.
+ return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect"));
}
if msg.next_local_commitment_number == 0 || msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
msg.next_remote_commitment_number == 0 || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER {
- return Err(HandleError{err: "Peer send garbage channel_reestablish", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer send garbage channel_reestablish".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer send garbage channel_reestablish"));
}
// Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
next_per_commitment_point,
});
} else {
- return Err(HandleError{err: "Peer attempted to reestablish channel with a very old local commitment transaction", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer attempted to reestablish channel with a very old remote commitment transaction".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction"));
}
if msg.next_local_commitment_number == INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number {
match self.free_holding_cell_htlcs() {
Err(e) => {
if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(_)}) = &e.action {
+ return Err(ChannelError::Close(e.err));
} else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: _}) = &e.action {
+ return Err(ChannelError::Close(e.err));
} else {
panic!("Got non-channel-failing result from free_holding_cell_htlcs");
}
- return Err(e);
},
Ok(Some((commitment_update, channel_monitor))) => return Ok((None, required_revoke, Some(commitment_update), Some(channel_monitor))),
Ok(None) => return Ok((None, required_revoke, None, None)),
commitment_signed: self.send_commitment_no_state_update().expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
}), None));
} else {
- return Err(HandleError{err: "Peer attempted to reestablish channel with a very old remote commitment transaction", action: Some(msgs::ErrorAction::SendErrorMessage{msg: msgs::ErrorMessage{data: "Peer attempted to reestablish channel with a very old remote commitment transaction".to_string(), channel_id: msg.channel_id}})});
+ return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
}
}
&self.local_keys
}
+ #[cfg(test)]
+ pub fn get_value_stat(&self) -> ChannelValueStat {
+ ChannelValueStat {
+ value_to_self_msat: self.value_to_self_msat,
+ channel_value_msat: self.channel_value_satoshis * 1000,
+ channel_reserve_msat: self.their_channel_reserve_satoshis * 1000,
+ pending_outbound_htlcs_amount_msat: self.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ pending_inbound_htlcs_amount_msat: self.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ holding_cell_outbound_amount_msat: {
+ let mut res = 0;
+ for h in self.holding_cell_htlc_updates.iter() {
+ match h {
+ &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
+ res += amount_msat;
+ }
+ _ => {}
+ }
+ }
+ res
+ },
+ their_max_htlc_value_in_flight_msat: self.their_max_htlc_value_in_flight_msat,
+ }
+ }
+
/// Allowed in any state (including after shutdown)
pub fn get_channel_update_count(&self) -> u32 {
self.channel_update_count
/// closing).
/// Note that the "channel must be funded" requirement is stricter than BOLT 7 requires - see
/// https://github.com/lightningnetwork/lightning-rfc/issues/468
- pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), HandleError> {
+ pub fn get_channel_announcement(&self, our_node_id: PublicKey, chain_hash: Sha256dHash) -> Result<(msgs::UnsignedChannelAnnouncement, Signature), ChannelError> {
if !self.announce_publicly {
- return Err(HandleError{err: "Channel is not available for public announcements", action: Some(msgs::ErrorAction::IgnoreError)});
+ return Err(ChannelError::Ignore("Channel is not available for public announcements"));
}
if self.channel_state & (ChannelState::ChannelFunded as u32) == 0 {
- return Err(HandleError{err: "Cannot get a ChannelAnnouncement until the channel funding has been locked", action: Some(msgs::ErrorAction::IgnoreError)});
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement until the channel funding has been locked"));
}
if (self.channel_state & (ChannelState::LocalShutdownSent as u32 | ChannelState::ShutdownComplete as u32)) != 0 {
- return Err(HandleError{err: "Cannot get a ChannelAnnouncement once the channel is closing", action: Some(msgs::ErrorAction::IgnoreError)});
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement once the channel is closing"));
}
let were_node_one = our_node_id.serialize()[..] < self.their_node_id.serialize()[..];
return Err(HandleError{err: "Cannot send an HTLC while disconnected", action: Some(ErrorAction::IgnoreError)});
}
- let (_, outbound_htlc_count, htlc_outbound_value_msat, htlc_inbound_value_msat) = self.get_pending_htlc_stats(false);
+ let (outbound_htlc_count, htlc_outbound_value_msat) = self.get_outbound_pending_htlc_stats(false);
if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
return Err(HandleError{err: "Cannot push more than their max accepted HTLCs", action: None});
}
if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat {
return Err(HandleError{err: "Cannot send value that would put us over our max HTLC value in flight", action: None});
}
- // Check their_channel_reserve_satoshis:
- if htlc_inbound_value_msat + htlc_outbound_value_msat + amount_msat + (self.channel_value_satoshis * 1000 - self.value_to_self_msat) > (self.channel_value_satoshis - self.their_channel_reserve_satoshis) * 1000 {
+
+ let mut holding_cell_outbound_amount_msat = 0;
+ for holding_htlc in self.holding_cell_htlc_updates.iter() {
+ match holding_htlc {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } => {
+ holding_cell_outbound_amount_msat += *amount_msat;
+ }
+ _ => {}
+ }
+ }
+
+ // Check self.their_channel_reserve_satoshis (the amount we must keep as
+ // reserve for them to have something to claim if we misbehave)
+ if self.value_to_self_msat < self.their_channel_reserve_satoshis * 1000 + amount_msat + holding_cell_outbound_amount_msat + htlc_outbound_value_msat {
return Err(HandleError{err: "Cannot send value that would put us over our reserve value", action: None});
}
/// Begins the shutdown process, getting a message for the remote peer and returning all
/// holding cell HTLCs for payment failure.
- pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, [u8; 32])>), HandleError> {
+ pub fn get_shutdown(&mut self) -> Result<(msgs::Shutdown, Vec<(HTLCSource, [u8; 32])>), APIError> {
for htlc in self.pending_outbound_htlcs.iter() {
if htlc.state == OutboundHTLCState::LocalAnnounced {
- return Err(HandleError{err: "Cannot begin shutdown with pending HTLCs, call send_commitment first", action: None});
+ return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first"});
}
}
if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
- return Err(HandleError{err: "Shutdown already in progress", action: None});
+ if (self.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress"});
+ }
+ else if (self.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote"});
+ }
}
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(HandleError{err: "Cannot begin shutdown while peer is disconnected, maybe force-close instead?", action: None});
+ return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected, maybe force-close instead?"});
}
let our_closing_script = self.get_closing_scriptpubkey();