nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events_3 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_3.len(), 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[0], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events_5 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 0);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa);
check_added_monitors!(nodes[0], 1);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
expect_payment_received!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
// Call forward_pending_htlcs and check that the new HTLC was simply added to the holding cell
// and not forwarded.
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 0);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
let (outpoint, latest_update) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone();
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
check_added_monitors!(nodes[2], 1);
assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
let events_6 = nodes[2].node.get_and_clear_pending_events();
assert_eq!(events_6.len(), 2);
};
if test_ignore_second_cs {
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
send_event = SendEvent::from_node(&nodes[1]);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
let events_9 = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events_9.len(), 1);
// nodes[1] should be AwaitingRAA here!
check_added_monitors!(nodes[1], 0);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
// We send a third payment here, which is somewhat of a redundant test, but the
// Finally deliver the RAA to nodes[1] which results in a CS response to the last update
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let mut events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let mut events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
nodes[1].node.channel_monitor_updated(&outpoint, latest_update);
check_added_monitors!(nodes[1], 0);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 1000000);
let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
expect_payment_sent!(nodes[0], payment_preimage_1, events);
// Get the payment forwards, note that they were batched into one commitment update.
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]);
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]);
commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
*nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 1000000);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
check_added_monitors!(nodes[1], 1);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000);
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage);
let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
check_added_monitors!(nodes[1], 1);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
};
if second_fails {
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
} else {
if second_fails {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
} else {
reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
}
if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_ignore!(nodes[1], events); // We finally receive the second payment, but don't claim it
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
},
},
},
- chan_id: Some(channel_id),
+ chan_id: None,
shutdown_finish: None,
}
}
});
}
if let Some(channel_id) = chan_id {
- $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, err: ClosureReason::ProcessingError { err: err.err.clone() } });
+ $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } });
}
}
}
}
+ /// `peer_node_id` should be set when we receive a message from a peer, but not set when the user closes, which will be re-exposed as the `ChannelClosed`
+ /// reason.
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let mut pending_events_lock = self.pending_events.lock().unwrap();
if peer_node_id.is_some() {
if let Some(peer_msg) = peer_msg {
- pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::CounterpartyForceClosed { peer_msg: Some(peer_msg.to_string()) } });
- } else {
- pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::CounterpartyForceClosed { peer_msg: None } });
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } });
}
} else {
- pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, err: ClosureReason::HolderForceClosed });
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed });
}
chan.remove_entry().1
} else {
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- // ChannelClosed event is generated by handle_errors for us.
+ // ChannelClosed event is generated by handle_error for us.
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
msg: update
});
}
- //TODO: split between CounterpartyInitiated/LocallyInitiated
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, err: ClosureReason::CooperativeClosure });
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure });
}
Ok(())
}
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureReason::CommitmentTxBroadcasted });
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted });
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), err: ClosureReason::CommitmentTxBroadcasted });
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted });
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
msg: update
});
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureReason::DisconnectedPeer });
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
false
} else {
true
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), err: ClosureReason::DisconnectedPeer });
+ self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer });
return false;
} else {
no_channels_remain = false;
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
#[test]
fn test_keysend_dup_payment_hash() {
- return true;
+ return;
// (1): Test that a keysend payment with a duplicate payment hash to an existing pending
// outbound regular payment fails as expected.
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), $events);
match events[0] {
- Event::ChannelClosed { .. } => {}
+ Event::ChannelClosed { ref reason, .. } => {
+ //assert_eq!(reason, $reason);
+ },
_ => panic!("Unexpected event"),
}
}}
{
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
if $fail_backwards {
- let events = $node_a.node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!($node_a, events);
+ expect_pending_htlcs_forwardable!($node_a);
check_added_monitors!($node_a, 1);
let channel_state = $node_a.node.channel_state.lock().unwrap();
}
macro_rules! expect_pending_htlcs_forwardable_ignore {
- ($node: expr, $events: expr) => {{
- assert_eq!($events.len(), 1);
- match $events[0] {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
}
macro_rules! expect_pending_htlcs_forwardable {
- ($node: expr, $events: expr) => {{
- expect_pending_htlcs_forwardable_ignore!($node, $events);
+ ($node: expr) => {{
+ expect_pending_htlcs_forwardable_ignore!($node);
$node.node.process_pending_htlc_forwards();
}}
}
+#[cfg(test)]
+macro_rules! expect_pending_htlcs_forwardable_from_events {
+ ($node: expr, $events: expr, $ignore: expr) => {{
+ assert_eq!($events.len(), 1);
+ match $events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ if $ignore {
+ $node.node.process_pending_htlc_forwards();
+ }
+ }}
+}
+
#[cfg(any(test, feature = "unstable"))]
macro_rules! expect_payment_received {
($node: expr, $expected_payment_hash: expr, $expected_payment_secret: expr, $expected_recv_value: expr) => {
check_added_monitors!(node, 0);
commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
- let events = node.node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(node, events);
+ expect_pending_htlcs_forwardable!(node);
if idx == expected_path.len() - 1 {
let events_2 = node.node.get_and_clear_pending_events();
check_added_monitors!(nodes[0], 1);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
// fails), the second will process the resulting failure and fail the HTLC backward.
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa);
check_added_monitors!(nodes[2], 1);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
let events = nodes[2].node.get_and_clear_pending_events();
assert_eq!(events.len(), payments.len());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let ref payment_event_11 = expect_forward!(nodes[1]);
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]);
commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
// flush the htlcs in the holding cell
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]);
commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let ref payment_event_3 = expect_forward!(nodes[1]);
assert_eq!(payment_event_3.msgs.len(), 2);
nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]);
commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
let events = nodes[2].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
// Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[0], events);
+ expect_pending_htlcs_forwardable!(nodes[0]);
expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
check_added_monitors!(nodes[1], 1);
check_closed_event!(nodes[1], 1);
let mut events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+ expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
match events[1] {
Event::ChannelClosed { .. } => {}
_ => panic!("Unexpected event"),
check_spends!(commitment_tx[0], chan_2.3);
nodes[2].node.fail_htlc_backwards(&payment_hash);
check_added_monitors!(nodes[2], 0);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let events = nodes[2].node.get_and_clear_pending_msg_events();
}
}
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// Drop the last RAA from 3 -> 2
assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[2], 1);
assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events_5 = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events_5.len(), 1);
connect_block(&nodes[1], &block);
}
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
let payment_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 0);
} else {
let net_graph_msg_handler = &nodes[1].net_graph_msg_handler;
connect_blocks(&nodes[1], 1);
if forwarded_htlc {
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(fail_commit.len(), 1);
mine_transaction(&nodes[1], &htlc_timeout_tx);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
check_added_monitors!(nodes[4], 0);
- let events = nodes[4].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[4], events);
+ expect_pending_htlcs_forwardable!(nodes[4]);
check_added_monitors!(nodes[4], 1);
let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
check_added_monitors!(nodes[5], 0);
- let events = nodes[5].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[5], events);
+ expect_pending_htlcs_forwardable!(nodes[5]);
check_added_monitors!(nodes[5], 1);
let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
- let events = nodes[3].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[3], events);
+ expect_pending_htlcs_forwardable!(nodes[3]);
check_added_monitors!(nodes[3], 1);
let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
- expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
} else {
assert_eq!(events.len(), 1);
match events[0] {
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
}
} else {
mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
- expect_pending_htlcs_forwardable!(nodes[2], events[0..1]);
+ expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
} else {
assert_eq!(events.len(), 1);
match events[0] {
}
connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
check_closed_broadcast!(nodes[2], true);
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
}
}
check_added_monitors!(nodes[2], 3);
let htlc_value = if use_dust { 50000 } else { 3000000 };
let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2);
assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
}
let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[1]);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_2.len(), 1);
check_added_monitors!(nodes[1], 1);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_4.len(), 1);
// Fail one HTLC to prune it in the will-be-latest-local commitment tx
assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
check_added_monitors!(nodes[1], 0);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Note that we first have to wait a random delay before processing the receipt of the HTLC,
// and then will wait a second random delay before failing the HTLC back:
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
// Node 3 is expecting payment of 100_000 but received 10_000,
// it should fail htlc like we didn't know the preimage.
let header_129 = BlockHeader { version: 0x20000000, prev_blockhash: header_11.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_ignore!(nodes[0], events[0..1]);
+ expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
match events[1] {
Event::ChannelClosed { .. } => {}
_ => panic!("Unexpected event"),
// Broadcast set of revoked txn on A
connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
- let events = nodes[0].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable_ignore!(nodes[0], events);
+ expect_pending_htlcs_forwardable_ignore!(nodes[0]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
}
// Note that after leaving the above scope we have no knowledge of any arguments or return
// values from previous calls.
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
}
// Note that after leaving the above scope we have no knowledge of any arguments or return
// values from previous calls.
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
// We have to forward pending HTLCs once to process the receipt of the HTLC and then
// again to process the pending backwards-failure of the HTLC
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
// We should fail the payment back
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
check_added_monitors!(nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
expect_htlc_forward!(&nodes[2]);
expect_event!(&nodes[2], Event::PaymentReceived);
callback_node();
- let events = nodes[2].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[2], events);
+ expect_pending_htlcs_forwardable!(nodes[2]);
}
let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
txdata: vec![],
};
connect_block(&nodes[1], &block);
- let events = nodes[1].node.get_and_clear_pending_events();
- expect_pending_htlcs_forwardable!(nodes[1], events);
+ expect_pending_htlcs_forwardable!(nodes[1]);
}
check_added_monitors!(nodes[1], 1);
}
#[derive(Clone, Debug)]
-/// Some information provided on the closure source of the channel halting.
+/// The reason which the channel was closed. See individual variants more details.
pub enum ClosureReason {
- /// Closure generated from receiving a peer error message by ChannelManager::handle_error
+ /// Closure generated from receiving a peer error message.
CounterpartyForceClosed {
- /// The error is coming from the peer, there *might* be a human-readable msg
- peer_msg: Option<String>,
+ /// The error which the peer sent us.
+ ///
+ /// The string should be sanitized before it is used (e.g emitted to logs
+ /// or printed to stdout). Otherwise, a well crafted error message may trigger
+ /// a security in the terminal emulator or the logging subsystem.
+ peer_msg: String,
},
- /// Closure generated from ChannelManager::force_close_channel
+ /// Closure generated from [`ChannelManager::force_close_channel`], called by the user.
HolderForceClosed,
- /// Closure generated from receiving a peer's ClosingSigned message. Note the shutdown
- /// sequence might have been initially initiated by us.
+ /// The channel was closed after negotiating a cooperative close and we've now broadcasted
+ /// the cooperative close transaction. Note the shutdown may have been initiated by us.
+ //TODO: split between CounterpartyInitiated/LocallyInitiated
CooperativeClosure,
- /// Closure generated from receiving chain::Watch's CommitmentTxBroadcast event.
+ /// A commitment transaction was confirmed on chain, closing the channel. Most likely this
+ /// commitment transaction came from our counterparty, but it may also have come from
+ /// a copy of our own `ChannelMonitor`.
CommitmentTxBroadcasted,
/// Closure generated from processing an event, likely a HTLC forward/relay/reception.
ProcessingError {
err: String,
},
- /// Closure generated from ChannelManager::peer_disconnected.
+ /// The `PeerManager` informed us that we've disconnected from the peer and that it is
+ /// unlikely we'll be able to connect to the peer, most likely because we have incompatible
+ /// features and the peer, or we, require features which we, or the peer, do not support.
DisconnectedPeer,
}
impl_writeable_tlv_based_enum_upgradable!(ClosureReason,
- (0, CounterpartyForceClosed) => { (1, peer_msg, option) },
+ (0, CounterpartyForceClosed) => { (1, peer_msg, required) },
(2, HolderForceClosed) => {},
(6, CommitmentTxBroadcasted) => {},
(4, CooperativeClosure) => {},
},
/// Used to indicate that a channel with the given `channel_id` is in the process of closure.
ChannelClosed {
- /// The channel_id which has been barren from further off-chain updates but
- /// funding output might still be not resolved yet.
+ /// The channel_id of the channel which has been closed. Note that on-chain transactions
+ /// resolving the channel are likely still awaiting confirmation.
channel_id: [u8; 32],
- /// A machine-readable error message
- err: ClosureReason
+ /// The reason the channel was closed.
+ reason: ClosureReason
}
}
(2, claim_from_onchain_tx, required),
});
},
- &Event::ChannelClosed { ref channel_id, ref err } => {
- 8u8.write(writer)?;
+ &Event::ChannelClosed { ref channel_id, ref reason } => {
+ 9u8.write(writer)?;
channel_id.write(writer)?;
- err.write(writer)?;
+ reason.write(writer)?;
write_tlv_fields!(writer, {});
},
}
};
f()
},
- 8u8 => {
+ 9u8 => {
let channel_id = Readable::read(reader)?;
- let err = MaybeReadable::read(reader)?;
+ let reason = MaybeReadable::read(reader)?;
read_tlv_fields!(reader, {});
- if err.is_none() { return Ok(None); }
- Ok(Some(Event::ChannelClosed { channel_id, err: err.unwrap() }))
+ if reason.is_none() { return Ok(None); }
+ Ok(Some(Event::ChannelClosed { channel_id, reason: reason.unwrap() }))
},
// Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
x if x % 2 == 1 => Ok(None),