X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=756836f2404d742d2191a8cae3e7d6d4e9e0ed09;hb=f70c113fd813cff4d31481a9200e5f7121224a81;hp=57598c6f294423c482082025de13113f24397668;hpb=58f7942e11d7a500a605c9ae968855285cadefa7;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 57598c6f..756836f2 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1636,9 +1636,6 @@ pub struct ChannelDetails { pub counterparty: ChannelCounterparty, /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. - /// - /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to - /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`. pub funding_txo: Option, /// The features which this channel operates with. See individual features for more info. /// @@ -2155,6 +2152,7 @@ macro_rules! emit_channel_pending_event { counterparty_node_id: $channel.context.get_counterparty_node_id(), user_channel_id: $channel.context.get_user_id(), funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(), + channel_type: Some($channel.context.get_channel_type().clone()), }, None)); $channel.context.set_channel_pending_event_emitted(); } @@ -2296,7 +2294,7 @@ macro_rules! handle_new_monitor_update { handle_new_monitor_update!($self, $update_res, $chan, _internal, handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; - ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in @@ -2753,7 +2751,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } } else { @@ -3414,7 +3412,7 @@ where }, onion_packet, None, &self.fee_estimator, &&logger); match break_chan_phase_entry!(self, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { + match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -4770,7 +4768,7 @@ where hash_map::Entry::Occupied(mut chan_phase) => { if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { updated_chan = true; - handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(), + handle_new_monitor_update!(self, funding_txo, update.clone(), peer_state_lock, peer_state, per_peer_state, chan); } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -5574,7 +5572,7 @@ where peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } if !during_init { - handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock, + handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } else { // If we're running during init we cannot update a monitor directly - @@ -5690,9 +5688,9 @@ where } fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, - forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint, - next_channel_id: ChannelId, + forwarded_htlc_value_msat: Option, skimmed_fee_msat: Option, from_onchain: bool, + startup_replay: bool, next_channel_counterparty_node_id: Option, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { @@ -5788,18 +5786,21 @@ where }) } else { None } } else { - let fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { if let Some(claimed_htlc_value) = htlc_claim_value_msat { Some(claimed_htlc_value - forwarded_htlc_value) } else { None } } else { None }; + debug_assert!(skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat"); Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { event: events::Event::PaymentForwarded { - fee_earned_msat, + total_fee_earned_msat, claim_from_onchain_tx: from_onchain, prev_channel_id: Some(prev_channel_id), next_channel_id: Some(next_channel_id), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + skimmed_fee_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, }) @@ -6570,7 +6571,7 @@ where } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } }, @@ -6739,7 +6740,7 @@ where fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let funding_txo; - let (htlc_source, forwarded_htlc_value) = { + let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -6777,8 +6778,11 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), - false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), + Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id), + funding_txo, msg.channel_id + ); + Ok(()) } @@ -6852,7 +6856,7 @@ where let funding_txo = chan.context.get_funding_txo(); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, + handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } Ok(()) @@ -7031,7 +7035,7 @@ where if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } htlcs_to_fail @@ -7276,7 +7280,9 @@ where let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id)); if let Some(preimage) = htlc_update.payment_preimage { log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id); + self.claim_funds_internal(htlc_update.source, preimage, + htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true, + false, counterparty_node_id, funding_outpoint, channel_id); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; @@ -7372,7 +7378,7 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, + handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); continue 'peer_loop; } @@ -8141,7 +8147,7 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", channel_id); - handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update, + handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the @@ -9816,7 +9822,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), - // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be + // Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be // filled in, so we can safely unwrap it here. (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), }); @@ -11079,12 +11085,11 @@ where Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { - let channel_id = blocked_channel_id; log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - channel_id); + blocked_channel_id); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates - .entry(*channel_id) + .entry(*blocked_channel_id) .or_insert_with(Vec::new).push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to @@ -11168,7 +11173,7 @@ where // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. - channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), + channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None, downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id); }