pub counterparty: ChannelCounterparty,
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
- ///
- /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to
- /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`.
pub funding_txo: Option<OutPoint>,
/// The features which this channel operates with. See individual features for more info.
///
counterparty_node_id: $channel.context.get_counterparty_node_id(),
user_channel_id: $channel.context.get_user_id(),
funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ channel_type: Some($channel.context.get_channel_type().clone()),
}, None));
$channel.context.set_channel_pending_event_emitted();
}
handle_new_monitor_update!($self, $update_res, $chan, _internal,
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
- ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
+ ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
.or_insert_with(Vec::new);
// During startup, we push monitor updates as background events through to here in
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt.take() {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
} else {
}, onion_packet, None, &self.fee_estimator, &&logger);
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
Some(monitor_update) => {
- match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
+ match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
false => {
// Note that MonitorUpdateInProgress here indicates (per function
// docs) that we will resend the commitment update once monitor
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
updated_chan = true;
- handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(),
+ handle_new_monitor_update!(self, funding_txo, update.clone(),
peer_state_lock, peer_state, per_peer_state, chan);
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
if !during_init {
- handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
} else {
// If we're running during init we cannot update a monitor directly -
}
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
- forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
- next_channel_counterparty_node_id: Option<PublicKey>, next_channel_outpoint: OutPoint,
- next_channel_id: ChannelId,
+ forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
+ startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
})
} else { None }
} else {
- let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
if let Some(claimed_htlc_value) = htlc_claim_value_msat {
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None }
} else { None };
+ debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
+ "skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- fee_earned_msat,
+ total_fee_earned_msat,
claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ skimmed_fee_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
}
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
},
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
- let (htlc_source, forwarded_htlc_value) = {
+ let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value),
- false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
+ Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
+ funding_txo, msg.channel_id
+ );
+
Ok(())
}
let funding_txo = chan.context.get_funding_txo();
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry);
if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
peer_state, per_peer_state, chan);
}
Ok(())
if let Some(monitor_update) = monitor_update_opt {
let funding_txo = funding_txo_opt
.expect("Funding outpoint must have been set for RAA handling to succeed");
- handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo, monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
}
htlcs_to_fail
let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id));
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
- self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id);
+ self.claim_funds_internal(htlc_update.source, preimage,
+ htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
+ false, counterparty_node_id, funding_outpoint, channel_id);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
if let Some(monitor_update) = monitor_opt {
has_monitor_update = true;
- handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update,
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan);
continue 'peer_loop;
}
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
channel_id);
- handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update,
+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
peer_state_lck, peer_state, per_peer_state, chan);
if further_update_exists {
// If there are more `ChannelMonitorUpdate`s to process, restart at the
(2, prev_short_channel_id, required),
(4, prev_htlc_id, required),
(6, prev_funding_outpoint, required),
- // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be
+ // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
// filled in, so we can safely unwrap it here.
(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
});
Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), ..
} = action {
if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) {
- let channel_id = blocked_channel_id;
log_trace!(logger,
"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
- channel_id);
+ blocked_channel_id);
blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
- .entry(*channel_id)
+ .entry(*blocked_channel_id)
.or_insert_with(Vec::new).push(blocking_action.clone());
} else {
// If the channel we were blocking has closed, we don't need to
// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
- channel_manager.claim_funds_internal(source, preimage, Some(downstream_value),
+ channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
}
}
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
}
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {