holding_cell_htlc_updates: Vec::new(),
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
- channel_update_count: 0,
+ channel_update_count: 1,
last_local_commitment_txn: Vec::new(),
holding_cell_htlc_updates: Vec::new(),
next_local_htlc_id: 0,
next_remote_htlc_id: 0,
- channel_update_count: 0,
+ channel_update_count: 1,
last_local_commitment_txn: Vec::new(),
self.channel_state |= ChannelState::TheirFundingLocked as u32;
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK);
+ self.channel_update_count += 1;
} else {
return Err(HandleError{err: "Peer sent a funding_locked at a strange time", msg: None});
}
return Err(HandleError{err: "Non-funding remote tried to update channel fee", msg: None});
}
Channel::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
+ self.channel_update_count += 1;
self.feerate_per_kw = msg.feerate_per_kw as u64;
Ok(())
}
pub fn shutdown(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<[u8; 32]>), HandleError> {
if self.channel_state < ChannelState::FundingSent as u32 {
self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_update_count += 1;
return Ok((None, None, Vec::new()));
}
for htlc in self.pending_htlcs.iter() {
// From here on out, we may not fail!
self.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.channel_update_count += 1;
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
};
self.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.channel_update_count += 1;
if self.pending_htlcs.is_empty() && self.channel_outbound {
// There are no more HTLCs and we're the funder, this means we start the closing_signed
// dance with an initial fee proposal!
if last_fee == msg.fee_satoshis {
self.sign_commitment_transaction(&mut closing_tx, &msg.signature);
self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_update_count += 1;
return Ok((None, Some(closing_tx)));
}
}
let our_sig = self.sign_commitment_transaction(&mut closing_tx, &msg.signature);
self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_update_count += 1;
Ok((Some(msgs::ClosingSigned {
channel_id: self.channel_id,
self.channel_value_satoshis
}
- pub fn get_channel_update_count(&mut self) -> u32 {
- self.channel_update_count += 1; //TODO: This should be base on updates, not updates *sent*
+ pub fn get_channel_update_count(&self) -> u32 {
self.channel_update_count
}
self.channel_state |= ChannelState::OurFundingLocked as u32;
} else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK);
+ self.channel_update_count += 1;
//TODO: Something about a state where we "lost confirmation"
} else if self.channel_state < ChannelState::ChannelFunded as u32 {
panic!("Started confirming a channel in a state pre-FundingSent?");
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
tx.output[txo_idx].value != self.channel_value_satoshis {
self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_update_count += 1;
} else {
self.funding_tx_confirmations = 1;
self.short_channel_id = Some(((height as u64) << (5*8)) |
} else {
self.channel_state |= ChannelState::LocalShutdownSent as u32;
}
+ self.channel_update_count += 1;
// We can't send our shutdown until we've committed all of our pending HTLCs, but the
// remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
pub fn force_shutdown(&mut self) -> Vec<Transaction> {
assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.channel_update_count += 1;
let mut res = Vec::new();
mem::swap(&mut res, &mut self.last_local_commitment_txn);
res
/// will be accepted on the given channel, and after additional timeout/the closing of all
/// pending HTLCs, the channel will be closed on chain.
pub fn close_channel(&self, channel_id: &Uint256) -> Result<msgs::Shutdown, HandleError> {
- let res = {
+ let (res, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
-
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
let res = chan_entry.get_mut().get_shutdown()?;
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- chan_entry.remove_entry();
- }
- res
+ (res, Some(chan_entry.remove_entry().1))
+ } else { (res, None) }
},
hash_map::Entry::Vacant(_) => return Err(HandleError{err: "No such channel", msg: None})
}
// unknown_next_peer...I dunno who that is anymore....
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
+ if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut events = self.pending_events.lock().unwrap();
+ events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
Ok(res.0)
}
}
/// only fails if the channel does not yet have an assigned short_id
- fn get_channel_update(&self, chan: &mut Channel) -> Result<msgs::ChannelUpdate, HandleError> {
+ fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(HandleError{err: "Channel not yet established", msg: None}),
Some(id) => id,
})
}
- /// Sends a payment along a given route, returning the UpdateAddHTLC message to give to the
- /// first hop in route. Value parameters are provided via the last hop in route, see
- /// documentation for RouteHop fields for more info.
+ /// Sends a payment along a given route.
+ /// Value parameters are provided via the last hop in route, see documentation for RouteHop
+ /// fields for more info.
/// See-also docs on Channel::send_htlc_and_commit.
- pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<Option<(msgs::UpdateAddHTLC, msgs::CommitmentSigned)>, HandleError> {
+ /// May generate a SendHTLCs event on success, which should be relayed.
+ pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), HandleError> {
if route.hops.len() < 1 || route.hops.len() > 20 {
return Err(HandleError{err: "Route didn't go anywhere/had bogus size", msg: None});
}
let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route)?;
let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, associated_data)?;
- let (update_add, commitment_signed, chan_monitor) = {
+ let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = {
let mut channel_state = self.channel_state.lock().unwrap();
let id = match channel_state.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
None => return Err(HandleError{err: "No channel available with first hop!", msg: None}),
chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, onion_packet)?
};
+ let first_hop_node_id = route.hops.first().unwrap().pubkey;
+
if channel_state.claimable_htlcs.insert(payment_hash, PendingOutboundHTLC::OutboundRoute {
route,
session_priv,
}
match res {
- Some(msgs) => msgs,
- None => return Ok(None),
+ Some(msgs) => (first_hop_node_id, msgs),
+ None => return Ok(()),
}
};
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
unimplemented!(); // maybe remove from claimable_htlcs?
}
- Ok(Some((update_add, commitment_signed)))
+
+ let mut events = self.pending_events.lock().unwrap();
+ events.push(events::Event::SendHTLCs {
+ node_id: first_hop_node_id,
+ msgs: vec![update_add],
+ commitment_msg: commitment_signed,
+ });
+ Ok(())
}
/// Call this upon creation of a funding transaction for the given channel.
impl ChainListener for ChannelManager {
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
- let mut new_funding_locked_messages = Vec::new();
+ let mut new_events = Vec::new();
{
let mut channel_state = self.channel_state.lock().unwrap();
let mut short_to_ids_to_insert = Vec::new();
return true;
}
};
- new_funding_locked_messages.push(events::Event::SendFundingLocked {
+ new_events.push(events::Event::SendFundingLocked {
node_id: channel.get_their_node_id(),
msg: funding_locked,
announcement_sigs: announcement_sigs
short_to_ids_to_remove.push(short_id);
}
channel.force_shutdown();
+ if let Ok(update) = self.get_channel_update(&channel) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
return false;
}
}
short_to_ids_to_remove.push(short_id);
}
channel.force_shutdown();
+ if let Ok(update) = self.get_channel_update(&channel) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
return false;
}
true
}
}
let mut pending_events = self.pending_events.lock().unwrap();
- for funding_locked in new_funding_locked_messages.drain(..) {
+ for funding_locked in new_events.drain(..) {
pending_events.push(funding_locked);
}
}
}
fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), HandleError> {
- let res = {
+ let (res, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- chan_entry.remove_entry();
- }
- res
+ (res, Some(chan_entry.remove_entry().1))
+ } else { (res, None) }
},
hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
}
// unknown_next_peer...I dunno who that is anymore....
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() });
}
+ if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut events = self.pending_events.lock().unwrap();
+ events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
Ok((res.0, res.1))
}
fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, HandleError> {
- let res = {
+ let (res, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
match channel_state.by_id.entry(msg.channel_id.clone()) {
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- chan_entry.remove_entry();
- }
- res
+ (res, Some(chan_entry.remove_entry().1))
+ } else { (res, None) }
},
hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", msg: None})
}
if let Some(broadcast_tx) = res.1 {
self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
}
+ if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut events = self.pending_events.lock().unwrap();
+ events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
Ok(res.0)
}
}
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = channel_state_lock.borrow_parts();
- let short_to_id = channel_state.short_to_id;
- if no_connection_possible {
- channel_state.by_id.retain(move |_, chan| {
- if chan.get_their_node_id() == *their_node_id {
- if let Some(short_id) = chan.get_short_channel_id() {
- short_to_id.remove(&short_id);
+ let mut new_events = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ if no_connection_possible {
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_their_node_id() == *their_node_id {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ let txn_to_broadcast = chan.force_shutdown();
+ for tx in txn_to_broadcast {
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+ if let Ok(update) = self.get_channel_update(&chan) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ } else {
+ true
}
- let txn_to_broadcast = chan.force_shutdown();
- for tx in txn_to_broadcast {
- self.tx_broadcaster.broadcast_transaction(&tx);
+ });
+ } else {
+ for chan in channel_state.by_id {
+ if chan.1.get_their_node_id() == *their_node_id {
+ //TODO: mark channel disabled (and maybe announce such after a timeout). Also
+ //fail and wipe any uncommitted outbound HTLCs as those are considered after
+ //reconnect.
}
- false
- } else {
- true
- }
- });
- } else {
- for chan in channel_state.by_id {
- if chan.1.get_their_node_id() == *their_node_id {
- //TODO: mark channel disabled (and maybe announce such after a timeout). Also
- //fail and wipe any uncommitted outbound HTLCs as those are considered after
- //reconnect.
}
}
}
+ if !new_events.is_empty() {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ for event in new_events.drain(..) {
+ pending_events.push(event);
+ }
+ }
}
}
(chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
}
- fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &Uint256, funding_tx: Transaction, close_inbound_first: bool) {
+ fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &Uint256, funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate) {
let (node_a, broadcaster_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster) } else { (&outbound_node.node, &outbound_node.tx_broadcaster) };
let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
let (tx_a, tx_b);
let mut funding_tx_map = HashMap::new();
funding_tx_map.insert(funding_tx.txid(), funding_tx);
tx_a.verify(&funding_tx_map).unwrap();
+
+ let events_1 = node_a.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ let as_update = match events_1[0] {
+ Event::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ let events_2 = node_b.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ let bs_update = match events_2[0] {
+ Event::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ (as_update, bs_update)
}
struct SendEvent {
};
let mut payment_event = {
- let msgs = origin_node.node.send_payment(route, our_payment_hash).unwrap().unwrap();
+ origin_node.node.send_payment(route, our_payment_hash).unwrap();
{
let mut added_monitors = origin_node.chan_monitor.added_monitors.lock().unwrap();
assert_eq!(added_monitors.len(), 1);
added_monitors.clear();
}
- SendEvent {
- node_id: expected_route[0].node.get_our_node_id(),
- msgs: vec!(msgs.0),
- commitment_msg: msgs.1,
- }
+
+ let mut events = origin_node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
};
let mut prev_node = origin_node;
assert_eq!(added_monitors.len(), 1);
added_monitors.clear();
}
- for event in events_2.drain(..) {
- payment_event = SendEvent::from_event(event);
- }
+ payment_event = SendEvent::from_event(events_2.remove(0));
assert_eq!(payment_event.msgs.len(), 1);
}
res
}
+ fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
+ let events_1 = nodes[a].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ let as_update = match events_1[0] {
+ Event::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ let events_2 = nodes[b].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ let bs_update = match events_2[0] {
+ Event::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ for node in nodes {
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+ }
+
#[test]
fn channel_monitor_network_test() {
// Simple test which builds a network of ChannelManagers, connects them to each other, and
nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0]; 1], &[4; 1]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
}
+ get_announce_close_broadcast_events(&nodes, 0, 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
assert_eq!(nodes[1].node.list_channels().len(), 1);
nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0]; 1], &[4; 1]);
assert_eq!(nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
}
+ get_announce_close_broadcast_events(&nodes, 1, 2);
assert_eq!(nodes[1].node.list_channels().len(), 0);
assert_eq!(nodes[2].node.list_channels().len(), 1);
check_preimage_claim(&nodes[3], &node_txn);
}
+ get_announce_close_broadcast_events(&nodes, 2, 3);
assert_eq!(nodes[2].node.list_channels().len(), 0);
assert_eq!(nodes[3].node.list_channels().len(), 1);
check_preimage_claim(&nodes[4], &node_txn);
}
+ get_announce_close_broadcast_events(&nodes, 3, 4);
assert_eq!(nodes[3].node.list_channels().len(), 0);
assert_eq!(nodes[4].node.list_channels().len(), 0);
- // TODO: Need to reenable this when we fix local route tracking
// Create some new channels:
- /*let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
// A pending HTLC which will be revoked:
let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
}
get_announce_close_broadcast_events(&nodes, 0, 1);
assert_eq!(nodes[0].node.list_channels().len(), 0);
- assert_eq!(nodes[1].node.list_channels().len(), 0);*/
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
// Check that we processed all pending events
for node in nodes {