use ln::msgs;
/// Stores the info we will need to send when we want to forward an HTLC onwards
+ #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
pub struct PendingForwardHTLCInfo {
pub(super) onion_packet: Option<msgs::OnionPacket>,
pub(super) payment_hash: [u8; 32],
pub(super) outgoing_cltv_value: u32,
}
+ #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+ pub enum HTLCFailureMsg {
+ Relay(msgs::UpdateFailHTLC),
+ Malformed(msgs::UpdateFailMalformedHTLC),
+ }
+
+ /// Stores whether we can't forward an HTLC or relevant forwarding info
+ #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+ pub enum PendingHTLCStatus {
+ Forward(PendingForwardHTLCInfo),
+ Fail(HTLCFailureMsg),
+ }
+
#[cfg(feature = "fuzztarget")]
- impl PendingForwardHTLCInfo {
+ impl PendingHTLCStatus {
pub fn dummy() -> Self {
- Self {
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
onion_packet: None,
payment_hash: [0; 32],
short_channel_id: 0,
prev_short_channel_id: 0,
amt_to_forward: 0,
outgoing_cltv_value: 0,
- }
+ })
}
}
}
}
+struct MsgHandleErrInternal {
+ err: msgs::HandleError,
+ needs_channel_force_close: bool,
+}
+impl MsgHandleErrInternal {
+ #[inline]
+ fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
+ Self {
+ err: HandleError {
+ err,
+ action: Some(msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: err.to_string()
+ },
+ }),
+ },
+ needs_channel_force_close: false,
+ }
+ }
+ #[inline]
+ fn send_err_msg_close_chan(err: &'static str, channel_id: [u8; 32]) -> Self {
+ Self {
+ err: HandleError {
+ err,
+ action: Some(msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: err.to_string()
+ },
+ }),
+ },
+ needs_channel_force_close: true,
+ }
+ }
+ #[inline]
+ fn from_maybe_close(err: msgs::HandleError) -> Self {
+ Self { err, needs_channel_force_close: true }
+ }
+ #[inline]
+ fn from_no_close(err: msgs::HandleError) -> Self {
+ Self { err, needs_channel_force_close: false }
+ }
+}
+
/// We hold back HTLCs we intend to relay for a random interval in the range (this, 5*this). This
/// provides some limited amount of privacy. Ideally this would range from somewhere like 1 second
/// to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly. We could
announce_channels_publicly: bool,
fee_proportional_millionths: u32,
latest_block_height: AtomicUsize,
- secp_ctx: Secp256k1,
+ secp_ctx: Secp256k1<secp256k1::All>,
channel_state: Mutex<ChannelHolder>,
our_network_key: SecretKey,
const CLTV_EXPIRY_DELTA: u16 = 6 * 24 * 2; //TODO?
macro_rules! secp_call {
- ( $res : expr ) => {
+ ( $res: expr, $err: expr ) => {
match $res {
Ok(key) => key,
- //TODO: Make the err a parameter!
- Err(_) => return Err(HandleError{err: "Key error", action: None})
+ Err(_) => return Err($err),
}
};
}
//TODO: We need to handle monitoring of pending offered HTLCs which just hit the chain and
//may be claimed, resulting in us claiming the inbound HTLCs (and back-failing after
//timeouts are hit and our claims confirm).
+ //TODO: In any case, we need to make sure we remove any pending htlc tracking (via
+ //fail_backwards or claim_funds) eventually for all HTLCs that were in the channel
}
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
// can only fail if an intermediary hop has an invalid public key or session_priv is invalid
#[inline]
- fn construct_onion_keys_callback<FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), HandleError> {
+ fn construct_onion_keys_callback<T: secp256k1::Signing, FType: FnMut(SharedSecret, [u8; 32], PublicKey, &RouteHop)> (secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey, mut callback: FType) -> Result<(), secp256k1::Error> {
let mut blinded_priv = session_priv.clone();
- let mut blinded_pub = secp_call!(PublicKey::from_secret_key(secp_ctx, &blinded_priv));
- let mut first_iteration = true;
+ let mut blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
for hop in route.hops.iter() {
let shared_secret = SharedSecret::new(secp_ctx, &hop.pubkey, &blinded_priv);
let mut blinding_factor = [0u8; 32];
sha.result(&mut blinding_factor);
- if first_iteration {
- blinded_pub = secp_call!(PublicKey::from_secret_key(secp_ctx, &blinded_priv));
- first_iteration = false;
- }
let ephemeral_pubkey = blinded_pub;
- secp_call!(blinded_priv.mul_assign(secp_ctx, &secp_call!(SecretKey::from_slice(secp_ctx, &blinding_factor))));
- blinded_pub = secp_call!(PublicKey::from_secret_key(secp_ctx, &blinded_priv));
+ blinded_priv.mul_assign(secp_ctx, &SecretKey::from_slice(secp_ctx, &blinding_factor)?)?;
+ blinded_pub = PublicKey::from_secret_key(secp_ctx, &blinded_priv);
callback(shared_secret, blinding_factor, ephemeral_pubkey, hop);
}
}
// can only fail if an intermediary hop has an invalid public key or session_priv is invalid
- fn construct_onion_keys(secp_ctx: &Secp256k1, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, HandleError> {
+ fn construct_onion_keys<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, route: &Route, session_priv: &SecretKey) -> Result<Vec<OnionKeys>, secp256k1::Error> {
let mut res = Vec::with_capacity(route.hops.len());
Self::construct_onion_keys_callback(secp_ctx, route, session_priv, |shared_secret, _blinding_factor, ephemeral_pubkey, _| {
Ok(msgs::OnionPacket{
version: 0,
- public_key: onion_keys.first().unwrap().ephemeral_pubkey,
+ public_key: Ok(onion_keys.first().unwrap().ephemeral_pubkey),
hop_data: packet_data,
hmac: hmac_res,
})
ChannelManager::encrypt_failure_packet(shared_secret, &failure_packet.encode()[..])
}
+ fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, Option<SharedSecret>, MutexGuard<ChannelHolder>) {
+ macro_rules! get_onion_hash {
+ () => {
+ {
+ let mut sha = Sha256::new();
+ sha.input(&msg.onion_routing_packet.hop_data);
+ let mut onion_hash = [0; 32];
+ sha.result(&mut onion_hash);
+ onion_hash
+ }
+ }
+ }
+
+ if let Err(_) = msg.onion_routing_packet.public_key {
+ log_info!(self, "Failed to accept/forward incoming HTLC with invalid ephemeral pubkey");
+ return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: get_onion_hash!(),
+ failure_code: 0x8000 | 0x4000 | 6,
+ })), None, self.channel_state.lock().unwrap());
+ }
+
+ let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key);
+ let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
+
+ let mut channel_state = None;
+ macro_rules! return_err {
+ ($msg: expr, $err_code: expr, $data: expr) => {
+ {
+ log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
+ if channel_state.is_none() {
+ channel_state = Some(self.channel_state.lock().unwrap());
+ }
+ return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
+ })), Some(shared_secret), channel_state.unwrap());
+ }
+ }
+ }
+
+ if msg.onion_routing_packet.version != 0 {
+ //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
+ //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
+ //the hash doesn't really serve any purpuse - in the case of hashing all data, the
+ //receiving node would have to brute force to figure out which version was put in the
+ //packet by the node that send us the message, in the case of hashing the hop_data, the
+ //node knows the HMAC matched, so they already know what is there...
+ return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
+ }
+
+ let mut hmac = Hmac::new(Sha256::new(), &mu);
+ hmac.input(&msg.onion_routing_packet.hop_data);
+ hmac.input(&msg.payment_hash);
+ if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
+ return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
+ }
+
+ let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
+ let next_hop_data = {
+ let mut decoded = [0; 65];
+ chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
+ match msgs::OnionHopData::decode(&decoded[..]) {
+ Err(err) => {
+ let error_code = match err {
+ msgs::DecodeError::UnknownRealmByte => 0x4000 | 1,
+ _ => 0x2000 | 2, // Should never happen
+ };
+ return_err!("Unable to decode our hop data", error_code, &[0;0]);
+ },
+ Ok(msg) => msg
+ }
+ };
+
+ //TODO: Check that msg.cltv_expiry is within acceptable bounds!
+
+ let pending_forward_info = if next_hop_data.hmac == [0; 32] {
+ // OUR PAYMENT!
+ if next_hop_data.data.amt_to_forward != msg.amount_msat {
+ return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+ }
+ if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
+ return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: None,
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: 0,
+ prev_short_channel_id: 0,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ } else {
+ let mut new_packet_data = [0; 20*65];
+ chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
+ chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
+
+ let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+
+ let blinding_factor = {
+ let mut sha = Sha256::new();
+ sha.input(&new_pubkey.serialize()[..]);
+ sha.input(&shared_secret[..]);
+ let mut res = [0u8; 32];
+ sha.result(&mut res);
+ match SecretKey::from_slice(&self.secp_ctx, &res) {
+ Err(_) => {
+ return_err!("Blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
+ },
+ Ok(key) => key
+ }
+ };
+
+ if let Err(_) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
+ return_err!("New blinding factor is an invalid private key", 0x8000 | 0x4000 | 6, &get_onion_hash!());
+ }
+
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key: Ok(new_pubkey),
+ hop_data: new_packet_data,
+ hmac: next_hop_data.hmac.clone(),
+ };
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: Some(outgoing_packet),
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: next_hop_data.data.short_channel_id,
+ prev_short_channel_id: 0,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ };
+
+ channel_state = Some(self.channel_state.lock().unwrap());
+ if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
+ let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
+ let forwarding_id = match id_option {
+ None => {
+ return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
+ },
+ Some(id) => id.clone(),
+ };
+ if let Some((err, code, chan_update)) = {
+ let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+ if !chan.is_live() {
+ Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, self.get_channel_update(chan).unwrap()))
+ } else {
+ let fee = amt_to_forward.checked_mul(self.fee_proportional_millionths as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+ if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward {
+ Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, self.get_channel_update(chan).unwrap()))
+ } else {
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 {
+ Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, self.get_channel_update(chan).unwrap()))
+ } else {
+ None
+ }
+ }
+ }
+ } {
+ return_err!(err, code, &chan_update.encode_with_len()[..]);
+ }
+ }
+ }
+
+ (pending_forward_info, Some(shared_secret), channel_state.unwrap())
+ }
+
/// only fails if the channel does not yet have an assigned short_id
fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
let short_channel_id = match chan.get_short_channel_id() {
Some(id) => id,
};
- let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).unwrap().serialize()[..] < chan.get_their_node_id().serialize()[..];
+ let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
fee_proportional_millionths: self.fee_proportional_millionths,
+ excess_data: Vec::new(),
};
let msg_hash = Sha256dHash::from_data(&unsigned.encode()[..]);
- let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key).unwrap(); //TODO Can we unwrap here?
+ let sig = self.secp_ctx.sign(&Message::from_slice(&msg_hash[..]).unwrap(), &self.our_network_key); //TODO Can we unwrap here?
Ok(msgs::ChannelUpdate {
signature: sig,
}
}
- let session_priv = secp_call!(SecretKey::from_slice(&self.secp_ctx, &{
+ let session_priv = SecretKey::from_slice(&self.secp_ctx, &{
let mut session_key = [0; 32];
rng::fill_bytes(&mut session_key);
session_key
- }));
+ }).expect("RNG is bad!");
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
- let onion_keys = ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv)?;
+ //TODO: This should return something other than HandleError, that's really intended for
+ //p2p-returns only.
+ let onion_keys = secp_call!(ChannelManager::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
+ HandleError{err: "Pubkey along hop was maliciously selected", action: Some(msgs::ErrorAction::IgnoreError)});
let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash)?;
}
let mut events = self.pending_events.lock().unwrap();
- events.push(events::Event::SendHTLCs {
+ events.push(events::Event::UpdateHTLCs {
node_id: first_hop_node_id,
- msgs: vec![update_add],
- commitment_msg: commitment_signed,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ commitment_signed,
+ },
});
Ok(())
}
}
}
- fn get_announcement_sigs(&self, chan: &Channel) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
- if !chan.is_usable() || !chan.should_announce() { return Ok(None) }
+ fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
+ if !chan.should_announce() { return None }
- let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone())?;
+ let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
+ Ok(res) => res,
+ Err(_) => return None, // Only in case of state precondition violations eg channel is closing
+ };
let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
- let our_node_sig = secp_call!(self.secp_ctx.sign(&msghash, &self.our_network_key));
+ let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
- Ok(Some(msgs::AnnouncementSignatures {
+ Some(msgs::AnnouncementSignatures {
channel_id: chan.channel_id(),
short_channel_id: chan.get_short_channel_id().unwrap(),
node_signature: our_node_sig,
bitcoin_signature: our_bitcoin_sig,
- }))
+ })
}
/// Processes HTLCs which are pending waiting on random forward delay.
if !add_htlc_msgs.is_empty() {
let (commitment_msg, monitor) = match forward_chan.send_commitment() {
Ok(res) => res,
- Err(_e) => {
+ Err(e) => {
+ if let &Some(msgs::ErrorAction::DisconnectPeer{msg: Some(ref _err_msg)}) = &e.action {
+ } else if let &Some(msgs::ErrorAction::SendErrorMessage{msg: ref _err_msg}) = &e.action {
+ } else {
+ panic!("Stated return value requirements in send_commitment() were not met");
+ }
//TODO: Handle...this is bad!
continue;
},
};
- new_events.push((Some(monitor), events::Event::SendHTLCs {
+ new_events.push((Some(monitor), events::Event::UpdateHTLCs {
node_id: forward_chan.get_their_node_id(),
- msgs: add_htlc_msgs,
- commitment_msg: commitment_msg,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: add_htlc_msgs,
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ commitment_signed: commitment_msg,
+ },
}));
}
} else {
}
let mut pending_events = self.pending_events.lock().unwrap();
- //TODO: replace by HandleError ? UpdateFailHTLC in handle_update_add_htlc need also to build a CommitmentSigned
- pending_events.push(events::Event::SendFailHTLC {
+ pending_events.push(events::Event::UpdateHTLCs {
node_id,
- msg: msg,
- commitment_msg: commitment_msg,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: vec![msg],
+ update_fail_malformed_htlcs: Vec::new(),
+ commitment_signed: commitment_msg,
+ },
});
},
None => {},
if let Some((msg, commitment_msg)) = fulfill_msgs.0 {
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::SendFulfillHTLC {
+ pending_events.push(events::Event::UpdateHTLCs {
node_id: node_id,
- msg,
- commitment_msg,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ commitment_signed: commitment_msg,
+ }
});
}
true
/// Gets the node_id held by this ChannelManager
pub fn get_our_node_id(&self) -> PublicKey {
- PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).unwrap()
+ PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
}
/// Used to restore channels to normal operation after a
pub fn test_restore_channel_monitor(&self) {
unimplemented!();
}
-}
-
-impl events::EventsProvider for ChannelManager {
- fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
- let mut pending_events = self.pending_events.lock().unwrap();
- let mut ret = Vec::new();
- mem::swap(&mut ret, &mut *pending_events);
- ret
- }
-}
-
-impl ChainListener for ChannelManager {
- fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
- let mut new_events = Vec::new();
- let mut failed_channels = Vec::new();
- {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = channel_lock.borrow_parts();
- let short_to_id = channel_state.short_to_id;
- channel_state.by_id.retain(|_, channel| {
- let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
- if let Ok(Some(funding_locked)) = chan_res {
- let announcement_sigs = match self.get_announcement_sigs(channel) {
- Ok(res) => res,
- Err(e) => {
- log_error!(self, "Got error handling message: {}!", e.err);
- //TODO: push e on events and blow up the channel (it has bad keys)
- return true;
- }
- };
- new_events.push(events::Event::SendFundingLocked {
- node_id: channel.get_their_node_id(),
- msg: funding_locked,
- announcement_sigs: announcement_sigs
- });
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
- } else if let Err(e) = chan_res {
- new_events.push(events::Event::HandleError {
- node_id: channel.get_their_node_id(),
- action: e.action,
- });
- if channel.is_shutdown() {
- return false;
- }
- }
- if let Some(funding_txo) = channel.get_funding_txo() {
- for tx in txn_matched {
- for inp in tx.input.iter() {
- if inp.prev_hash == funding_txo.txid && inp.prev_index == funding_txo.index as u32 {
- if let Some(short_id) = channel.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- // It looks like our counterparty went on-chain. We go ahead and
- // broadcast our latest local state as well here, just in case its
- // some kind of SPV attack, though we expect these to be dropped.
- failed_channels.push(channel.force_shutdown());
- if let Ok(update) = self.get_channel_update(&channel) {
- new_events.push(events::Event::BroadcastChannelUpdate {
- msg: update
- });
- }
- return false;
- }
- }
- }
- }
- if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
- if let Some(short_id) = channel.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(channel.force_shutdown());
- // If would_broadcast_at_height() is true, the channel_monitor will broadcast
- // the latest local tx for us, so we should skip that here (it doesn't really
- // hurt anything, but does make tests a bit simpler).
- failed_channels.last_mut().unwrap().0 = Vec::new();
- if let Ok(update) = self.get_channel_update(&channel) {
- new_events.push(events::Event::BroadcastChannelUpdate {
- msg: update
- });
- }
- return false;
- }
- true
- });
- }
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
- }
- let mut pending_events = self.pending_events.lock().unwrap();
- for funding_locked in new_events.drain(..) {
- pending_events.push(funding_locked);
- }
- self.latest_block_height.store(height as usize, Ordering::Release);
- }
- /// We force-close the channel without letting our counterparty participate in the shutdown
- fn block_disconnected(&self, header: &BlockHeader) {
- let mut new_events = Vec::new();
- let mut failed_channels = Vec::new();
- {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = channel_lock.borrow_parts();
- let short_to_id = channel_state.short_to_id;
- channel_state.by_id.retain(|_, v| {
- if v.block_disconnected(header) {
- if let Some(short_id) = v.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(v.force_shutdown());
- if let Ok(update) = self.get_channel_update(&v) {
- new_events.push(events::Event::BroadcastChannelUpdate {
- msg: update
- });
- }
- false
- } else {
- true
- }
- });
- }
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
- }
- if !new_events.is_empty() {
- let mut pending_events = self.pending_events.lock().unwrap();
- for funding_locked in new_events.drain(..) {
- pending_events.push(funding_locked);
- }
- }
- self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
- }
-}
-
-impl ChannelMessageHandler for ChannelManager {
- //TODO: Handle errors and close channel (or so)
- fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, HandleError> {
+ fn internal_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
- return Err(HandleError{err: "Unknown genesis block hash", action: None});
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
}
let mut channel_state = self.channel_state.lock().unwrap();
if channel_state.by_id.contains_key(&msg.temporary_channel_id) {
- return Err(HandleError{err: "temporary_channel_id collision!", action: None});
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone()));
}
let chan_keys = if cfg!(feature = "fuzztarget") {
}
};
- let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger))?;
- let accept_msg = channel.get_accept_channel()?;
+ let channel = Channel::new_from_req(&*self.fee_estimator, chan_keys, their_node_id.clone(), msg, 0, false, self.announce_channels_publicly, Arc::clone(&self.logger)).map_err(|e| MsgHandleErrInternal::from_no_close(e))?;
+ let accept_msg = channel.get_accept_channel();
channel_state.by_id.insert(channel.channel_id(), channel);
Ok(accept_msg)
}
- fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
+ fn internal_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.temporary_channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
}
- chan.accept_channel(&msg)?;
+ chan.accept_channel(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
(chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ //TODO: same as above
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
Ok(())
}
- fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, HandleError> {
+ fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, MsgHandleErrInternal> {
let (chan, funding_msg, monitor_update) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
hash_map::Entry::Occupied(mut chan) => {
if chan.get().get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
}
match chan.get_mut().funding_created(msg) {
Ok((funding_msg, monitor_update)) => {
(chan.remove(), funding_msg, monitor_update)
},
Err(e) => {
- //TODO: Possibly remove the channel depending on e.action
- return Err(e);
+ return Err(e).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
}
}
},
- hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
}
}; // Release channel lock for install_watch_outpoint call,
// note that this means if the remote end is misbehaving and sends a message for the same
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.entry(funding_msg.channel_id) {
hash_map::Entry::Occupied(_) => {
- return Err(HandleError {
- err: "Duplicate channel_id!",
- action: Some(msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id: funding_msg.channel_id, data: "Already had channel with the new channel_id".to_owned() } })
- });
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
e.insert(chan);
Ok(funding_msg)
}
- fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
+ fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let (funding_txo, user_id, monitor) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let chan_monitor = chan.funding_signed(&msg)?;
+ let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
(chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor)
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
Ok(())
}
- fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
+ fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.funding_locked(&msg)?;
- return Ok(self.get_announcement_sigs(chan)?);
+ chan.funding_locked(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+ return Ok(self.get_announcement_sigs(chan));
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
};
}
- fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), HandleError> {
+ fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), MsgHandleErrInternal> {
let (res, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
match channel_state.by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
if chan_entry.get().get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let res = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg)?;
+ let res = chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
if chan_entry.get().is_shutdown() {
if let Some(short_id) = chan_entry.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
(res, Some(chan_entry.remove_entry().1))
} else { (res, None) }
},
- hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
for payment_hash in res.2 {
Ok((res.0, res.1))
}
- fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, HandleError> {
+ fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, MsgHandleErrInternal> {
let (res, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = channel_state_lock.borrow_parts();
match channel_state.by_id.entry(msg.channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
if chan_entry.get().get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- let res = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg)?;
+ let res = chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
if res.1.is_some() {
// We're done with this channel, we've got a signed closing transaction and
// will send the closing_signed back to the remote peer upon return. This
(res, Some(chan_entry.remove_entry().1))
} else { (res, None) }
},
- hash_map::Entry::Vacant(_) => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
if let Some(broadcast_tx) = res.1 {
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update(&chan) {
let mut events = self.pending_events.lock().unwrap();
- events.push(events::Event::BroadcastChannelUpdate {
- msg: update
- });
- }
- }
- Ok(res.0)
- }
-
- fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
- //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
- //determine the state of the payment based on our response/if we forward anything/the time
- //we take to respond. We should take care to avoid allowing such an attack.
- //
- //TODO: There exists a further attack where a node may garble the onion data, forward it to
- //us repeatedly garbled in different ways, and compare our error messages, which are
- //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
- //but we should prevent it anyway.
-
- let shared_secret = SharedSecret::new(&self.secp_ctx, &msg.onion_routing_packet.public_key, &self.our_network_key);
- let (rho, mu) = ChannelManager::gen_rho_mu_from_shared_secret(&shared_secret);
-
- macro_rules! get_onion_hash {
- () => {
- {
- let mut sha = Sha256::new();
- sha.input(&msg.onion_routing_packet.hop_data);
- let mut onion_hash = [0; 32];
- sha.result(&mut onion_hash);
- onion_hash
- }
- }
- }
-
- macro_rules! return_err {
- ($msg: expr, $err_code: expr, $data: expr) => {
- return Err(msgs::HandleError {
- err: $msg,
- action: Some(msgs::ErrorAction::UpdateFailHTLC {
- msg: msgs::UpdateFailHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason: ChannelManager::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
- }
- }),
- });
- }
- }
-
- if msg.onion_routing_packet.version != 0 {
- //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
- //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
- //the hash doesn't really serve any purpuse - in the case of hashing all data, the
- //receiving node would have to brute force to figure out which version was put in the
- //packet by the node that send us the message, in the case of hashing the hop_data, the
- //node knows the HMAC matched, so they already know what is there...
- return_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4, &get_onion_hash!());
- }
-
- let mut hmac = Hmac::new(Sha256::new(), &mu);
- hmac.input(&msg.onion_routing_packet.hop_data);
- hmac.input(&msg.payment_hash);
- if hmac.result() != MacResult::new(&msg.onion_routing_packet.hmac) {
- return_err!("HMAC Check failed", 0x8000 | 0x4000 | 5, &get_onion_hash!());
- }
-
- let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
- let next_hop_data = {
- let mut decoded = [0; 65];
- chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
- match msgs::OnionHopData::decode(&decoded[..]) {
- Err(err) => {
- let error_code = match err {
- msgs::DecodeError::UnknownRealmByte => 0x4000 | 1,
- _ => 0x2000 | 2, // Should never happen
- };
- return_err!("Unable to decode our hop data", error_code, &[0;0]);
- },
- Ok(msg) => msg
- }
- };
-
- //TODO: Check that msg.cltv_expiry is within acceptable bounds!
-
- let mut pending_forward_info = if next_hop_data.hmac == [0; 32] {
- // OUR PAYMENT!
- if next_hop_data.data.amt_to_forward != msg.amount_msat {
- return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
- }
- if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
- return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
- }
-
- // Note that we could obviously respond immediately with an update_fulfill_htlc
- // message, however that would leak that we are the recipient of this payment, so
- // instead we stay symmetric with the forwarding case, only responding (after a
- // delay) once they've send us a commitment_signed!
-
- PendingForwardHTLCInfo {
- onion_packet: None,
- payment_hash: msg.payment_hash.clone(),
- short_channel_id: 0,
- prev_short_channel_id: 0,
- amt_to_forward: next_hop_data.data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
- }
- } else {
- let mut new_packet_data = [0; 20*65];
- chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
- chacha.process(&ChannelManager::ZERO[0..65], &mut new_packet_data[19*65..]);
-
- let mut new_pubkey = msg.onion_routing_packet.public_key.clone();
-
- let blinding_factor = {
- let mut sha = Sha256::new();
- sha.input(&new_pubkey.serialize()[..]);
- sha.input(&shared_secret[..]);
- let mut res = [0u8; 32];
- sha.result(&mut res);
- match SecretKey::from_slice(&self.secp_ctx, &res) {
- Err(_) => {
- // Return temporary node failure as its technically our issue, not the
- // channel's issue.
- return_err!("Blinding factor is an invalid private key", 0x2000 | 2, &[0;0]);
- },
- Ok(key) => key
- }
- };
-
- match new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor) {
- Err(_) => {
- // Return temporary node failure as its technically our issue, not the
- // channel's issue.
- return_err!("New blinding factor is an invalid private key", 0x2000 | 2, &[0;0]);
- },
- Ok(_) => {}
- };
-
- let outgoing_packet = msgs::OnionPacket {
- version: 0,
- public_key: new_pubkey,
- hop_data: new_packet_data,
- hmac: next_hop_data.hmac.clone(),
- };
-
- PendingForwardHTLCInfo {
- onion_packet: Some(outgoing_packet),
- payment_hash: msg.payment_hash.clone(),
- short_channel_id: next_hop_data.data.short_channel_id,
- prev_short_channel_id: 0,
- amt_to_forward: next_hop_data.data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
- }
- };
-
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = channel_state_lock.borrow_parts();
-
- if pending_forward_info.onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
- let forwarding_id = match channel_state.short_to_id.get(&pending_forward_info.short_channel_id) {
- None => {
- return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
- },
- Some(id) => id.clone(),
- };
- let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap();
- let fee = chan.get_our_fee_base_msat(&*self.fee_estimator) + (pending_forward_info.amt_to_forward * self.fee_proportional_millionths as u64 / 1000000) as u32;
- if msg.amount_msat < fee as u64 || (msg.amount_msat - fee as u64) < pending_forward_info.amt_to_forward {
- log_debug!(self, "HTLC {} incorrect amount: in {} out {} fee required {}", msg.htlc_id, msg.amount_msat, pending_forward_info.amt_to_forward, fee);
- //TODO: send back "channel_update" with new fee parameters in onion failure packet
- return_err!("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, &[0;0]);
- }
- if (msg.cltv_expiry as u64) < pending_forward_info.outgoing_cltv_value as u64 + CLTV_EXPIRY_DELTA as u64 {
- log_debug!(self, "HTLC {} incorrect CLTV: in {} out {} delta required {}", msg.htlc_id, msg.cltv_expiry, pending_forward_info.outgoing_cltv_value, CLTV_EXPIRY_DELTA);
- return_err!("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, &[0;0]);
- }
-
- if !chan.is_live() {
- let chan_update = self.get_channel_update(chan).unwrap();
- return_err!("Forwarding channel is not in a ready state.", 0x1000 | 7, &chan_update.encode_with_len()[..]);
+ events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
}
}
+ Ok(res.0)
+ }
+
+ fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
+ //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
+ //determine the state of the payment based on our response/if we forward anything/the time
+ //we take to respond. We should take care to avoid allowing such an attack.
+ //
+ //TODO: There exists a further attack where a node may garble the onion data, forward it to
+ //us repeatedly garbled in different ways, and compare our error messages, which are
+ //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
+ //but we should prevent it anyway.
+
+ let (mut pending_forward_info, shared_secret, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+ let channel_state = channel_state_lock.borrow_parts();
let claimable_htlcs_entry = channel_state.claimable_htlcs.entry(msg.payment_hash.clone());
// destination. That's OK since those nodes are probably busted or trying to do network
// mapping through repeated loops. In either case, we want them to stop talking to us, so
// we send permanent_node_failure.
- if let &hash_map::Entry::Occupied(ref e) = &claimable_htlcs_entry {
- let mut acceptable_cycle = false;
- if let &PendingOutboundHTLC::OutboundRoute { .. } = e.get() {
- acceptable_cycle = pending_forward_info.short_channel_id == 0;
- }
- if !acceptable_cycle {
- return_err!("Payment looped through us twice", 0x4000 | 0x2000 | 2, &[0;0]);
+ let mut will_forward = false;
+ if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { short_channel_id, .. }) = pending_forward_info {
+ if let &hash_map::Entry::Occupied(ref e) = &claimable_htlcs_entry {
+ let mut acceptable_cycle = false;
+ if let &PendingOutboundHTLC::OutboundRoute { .. } = e.get() {
+ acceptable_cycle = short_channel_id == 0;
+ }
+ if !acceptable_cycle {
+ log_info!(self, "Failed to accept incoming HTLC: Payment looped through us twice");
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: ChannelManager::build_first_hop_failure_packet(&shared_secret.unwrap(), 0x4000 | 0x2000 | 2, &[0;0]),
+ }));
+ } else {
+ will_forward = true;
+ }
+ } else {
+ will_forward = true;
}
}
let (source_short_channel_id, res) = match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
if !chan.is_usable() {
- return Err(HandleError{err: "Channel not yet available for receiving HTLCs", action: None});
+ return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Channel not yet available for receiving HTLCs", action: Some(msgs::ErrorAction::IgnoreError)}));
}
let short_channel_id = chan.get_short_channel_id().unwrap();
- pending_forward_info.prev_short_channel_id = short_channel_id;
- (short_channel_id, chan.update_add_htlc(&msg, pending_forward_info)?)
+ if let PendingHTLCStatus::Forward(ref mut forward_info) = pending_forward_info {
+ forward_info.prev_short_channel_id = short_channel_id;
+ }
+ (short_channel_id, chan.update_add_htlc(&msg, pending_forward_info).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?)
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None}),
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
};
- match claimable_htlcs_entry {
- hash_map::Entry::Occupied(mut e) => {
- let outbound_route = e.get_mut();
- let (route, session_priv) = match outbound_route {
- &mut PendingOutboundHTLC::OutboundRoute { ref route, ref session_priv } => {
- (route.clone(), session_priv.clone())
- },
- _ => unreachable!(),
- };
- *outbound_route = PendingOutboundHTLC::CycledRoute {
- source_short_channel_id,
- incoming_packet_shared_secret: shared_secret,
- route,
- session_priv,
- };
- },
- hash_map::Entry::Vacant(e) => {
- e.insert(PendingOutboundHTLC::IntermediaryHopData {
- source_short_channel_id,
- incoming_packet_shared_secret: shared_secret,
- });
+ if will_forward {
+ match claimable_htlcs_entry {
+ hash_map::Entry::Occupied(mut e) => {
+ let outbound_route = e.get_mut();
+ let (route, session_priv) = match outbound_route {
+ &mut PendingOutboundHTLC::OutboundRoute { ref route, ref session_priv } => {
+ (route.clone(), session_priv.clone())
+ },
+ _ => unreachable!(),
+ };
+ *outbound_route = PendingOutboundHTLC::CycledRoute {
+ source_short_channel_id,
+ incoming_packet_shared_secret: shared_secret.unwrap(),
+ route,
+ session_priv,
+ };
+ },
+ hash_map::Entry::Vacant(e) => {
+ e.insert(PendingOutboundHTLC::IntermediaryHopData {
+ source_short_channel_id,
+ incoming_packet_shared_secret: shared_secret.unwrap(),
+ });
+ }
}
}
Ok(res)
}
- fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
+ fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// Claim funds first, cause we don't really care if the channel we received the message on
// is broken, we may have enough info to get our own money!
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.update_fulfill_htlc(&msg)
+ chan.update_fulfill_htlc(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
}
- fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
+ fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
let payment_hash = match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
+ chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}?;
if let Some(pending_htlc) = channel_state.claimable_htlcs.get(&payment_hash) {
}
}
- fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
+ fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
+ chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
}
- fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
+ fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), MsgHandleErrInternal> {
let (revoke_and_ack, commitment_signed, chan_monitor) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.commitment_signed(&msg)?
+ chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
Ok((revoke_and_ack, commitment_signed))
}
- fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+ fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, MsgHandleErrInternal> {
let (res, mut pending_forwards, mut pending_failures, chan_monitor) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.revoke_and_ack(&msg)?
+ chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
Ok(res)
}
- fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
+ fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
- chan.update_fee(&*self.fee_estimator, &msg)
+ chan.update_fee(&*self.fee_estimator, &msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
}
- fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
+ fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
let (chan_announcement, chan_update) = {
let mut channel_state = self.channel_state.lock().unwrap();
match channel_state.by_id.get_mut(&msg.channel_id) {
Some(chan) => {
if chan.get_their_node_id() != *their_node_id {
- return Err(HandleError{err: "Got a message for a channel from the wrong node!", action: None})
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
}
if !chan.is_usable() {
- return Err(HandleError{err: "Got an announcement_signatures before we were ready for it", action: None });
+ return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
}
let our_node_id = self.get_our_node_id();
- let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())?;
+ let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
+ .map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
let were_node_one = announcement.node_id_1 == our_node_id;
let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
- secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }));
- secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }));
+ let bad_sig_action = MsgHandleErrInternal::send_err_msg_close_chan("Bad announcement_signatures node_signature", msg.channel_id);
+ secp_call!(self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }), bad_sig_action);
+ secp_call!(self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }), bad_sig_action);
- let our_node_sig = secp_call!(self.secp_ctx.sign(&msghash, &self.our_network_key));
+ let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
(msgs::ChannelAnnouncement {
node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
contents: announcement,
}, self.get_channel_update(chan).unwrap()) // can only fail if we're not in a ready state
},
- None => return Err(HandleError{err: "Failed to find corresponding channel", action: None})
+ None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
Ok(())
}
+
+}
+
+impl events::EventsProvider for ChannelManager {
+ fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ let mut ret = Vec::new();
+ mem::swap(&mut ret, &mut *pending_events);
+ ret
+ }
+}
+
+impl ChainListener for ChannelManager {
+ fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
+ let mut new_events = Vec::new();
+ let mut failed_channels = Vec::new();
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ channel_state.by_id.retain(|_, channel| {
+ let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
+ if let Ok(Some(funding_locked)) = chan_res {
+ let announcement_sigs = self.get_announcement_sigs(channel);
+ new_events.push(events::Event::SendFundingLocked {
+ node_id: channel.get_their_node_id(),
+ msg: funding_locked,
+ announcement_sigs: announcement_sigs
+ });
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ } else if let Err(e) = chan_res {
+ new_events.push(events::Event::HandleError {
+ node_id: channel.get_their_node_id(),
+ action: e.action,
+ });
+ if channel.is_shutdown() {
+ return false;
+ }
+ }
+ if let Some(funding_txo) = channel.get_funding_txo() {
+ for tx in txn_matched {
+ for inp in tx.input.iter() {
+ if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ // It looks like our counterparty went on-chain. We go ahead and
+ // broadcast our latest local state as well here, just in case its
+ // some kind of SPV attack, though we expect these to be dropped.
+ failed_channels.push(channel.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&channel) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ return false;
+ }
+ }
+ }
+ }
+ if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(channel.force_shutdown());
+ // If would_broadcast_at_height() is true, the channel_monitor will broadcast
+ // the latest local tx for us, so we should skip that here (it doesn't really
+ // hurt anything, but does make tests a bit simpler).
+ failed_channels.last_mut().unwrap().0 = Vec::new();
+ if let Ok(update) = self.get_channel_update(&channel) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ return false;
+ }
+ true
+ });
+ }
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+ let mut pending_events = self.pending_events.lock().unwrap();
+ for funding_locked in new_events.drain(..) {
+ pending_events.push(funding_locked);
+ }
+ self.latest_block_height.store(height as usize, Ordering::Release);
+ }
+
+ /// We force-close the channel without letting our counterparty participate in the shutdown
+ fn block_disconnected(&self, header: &BlockHeader) {
+ let mut new_events = Vec::new();
+ let mut failed_channels = Vec::new();
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ channel_state.by_id.retain(|_, v| {
+ if v.block_disconnected(header) {
+ if let Some(short_id) = v.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(v.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&v) {
+ new_events.push(events::Event::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ } else {
+ true
+ }
+ });
+ }
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+ if !new_events.is_empty() {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ for funding_locked in new_events.drain(..) {
+ pending_events.push(funding_locked);
+ }
+ }
+ self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
+ }
+}
+
+macro_rules! handle_error {
+ ($self: ident, $internal: expr, $their_node_id: expr) => {
+ match $internal {
+ Ok(msg) => Ok(msg),
+ Err(MsgHandleErrInternal { err, needs_channel_force_close }) => {
+ if needs_channel_force_close {
+ match &err.action {
+ &Some(msgs::ErrorAction::DisconnectPeer { msg: Some(ref msg) }) => {
+ if msg.channel_id == [0; 32] {
+ $self.peer_disconnected(&$their_node_id, true);
+ } else {
+ $self.force_close_channel(&msg.channel_id);
+ }
+ },
+ &Some(msgs::ErrorAction::DisconnectPeer { msg: None }) => {},
+ &Some(msgs::ErrorAction::IgnoreError) => {},
+ &Some(msgs::ErrorAction::SendErrorMessage { ref msg }) => {
+ if msg.channel_id == [0; 32] {
+ $self.peer_disconnected(&$their_node_id, true);
+ } else {
+ $self.force_close_channel(&msg.channel_id);
+ }
+ },
+ &None => {},
+ }
+ }
+ Err(err)
+ },
+ }
+ }
+}
+
+impl ChannelMessageHandler for ChannelManager {
+ //TODO: Handle errors and close channel (or so)
+ fn handle_open_channel(&self, their_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<msgs::AcceptChannel, HandleError> {
+ handle_error!(self, self.internal_open_channel(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_accept_channel(&self, their_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_accept_channel(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<msgs::FundingSigned, HandleError> {
+ handle_error!(self, self.internal_funding_created(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_funding_signed(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<Option<msgs::AnnouncementSignatures>, HandleError> {
+ handle_error!(self, self.internal_funding_locked(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>), HandleError> {
+ handle_error!(self, self.internal_shutdown(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<Option<msgs::ClosingSigned>, HandleError> {
+ handle_error!(self, self.internal_closing_signed(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
+ handle_error!(self, self.internal_update_add_htlc(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
+ handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), HandleError> {
+ handle_error!(self, self.internal_commitment_signed(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, HandleError> {
+ handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_update_fee(their_node_id, msg), their_node_id)
+ }
+
+ fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
+ handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
+ }
+
fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
let mut new_events = Vec::new();
let mut failed_channels = Vec::new();
}
}
}
+
+ fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
+ if msg.channel_id == [0; 32] {
+ for chan in self.list_channels() {
+ if chan.remote_network_id == *their_node_id {
+ self.force_close_channel(&chan.channel_id);
+ }
+ }
+ } else {
+ self.force_close_channel(&msg.channel_id);
+ }
+ }
}
#[cfg(test)]
use bitcoin::util::hash::Sha256dHash;
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::{Transaction, TxOut};
+ use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use bitcoin::network::serialize::serialize;
use bitcoin::network::serialize::BitcoinHash;
use hex;
- use secp256k1::Secp256k1;
+ use secp256k1::{Secp256k1, Message};
use secp256k1::key::{PublicKey,SecretKey};
use crypto::sha2::Sha256;
}
struct Node {
- feeest: Arc<test_utils::TestFeeEstimator>,
chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
chan_monitor: Arc<test_utils::TestChannelMonitor>,
- node_id: SecretKey,
node: Arc<ChannelManager>,
router: Router,
}
impl SendEvent {
fn from_event(event: Event) -> SendEvent {
match event {
- Event::SendHTLCs { node_id, msgs, commitment_msg } => {
- SendEvent { node_id: node_id, msgs: msgs, commitment_msg: commitment_msg }
+ Event::UpdateHTLCs { node_id, updates: msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, commitment_signed } } => {
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ SendEvent { node_id: node_id, msgs: update_add_htlcs, commitment_msg: commitment_signed }
},
_ => panic!("Unexpected event type!"),
}
let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::SendFulfillHTLC { ref node_id, ref msg, ref commitment_msg } => {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
expected_next_node = node_id.clone();
- next_msgs = Some((msg.clone(), commitment_msg.clone()));
+ next_msgs = Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()));
},
_ => panic!("Unexpected event"),
};
let events = node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::SendFailHTLC { ref node_id, ref msg, ref commitment_msg } => {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert_eq!(update_fail_htlcs.len(), 1);
+ assert!(update_fail_malformed_htlcs.is_empty());
expected_next_node = node_id.clone();
- next_msgs = Some((msg.clone(), commitment_msg.clone()));
+ next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
},
_ => panic!("Unexpected event"),
};
for _ in 0..node_count {
let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
- let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Arc::clone(&logger)));
+ let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone()));
let node_id = {
SecretKey::from_slice(&secp_ctx, &key_slice).unwrap()
};
let node = ChannelManager::new(node_id.clone(), 0, true, Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger)).unwrap();
- let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id).unwrap(), Arc::clone(&logger));
- nodes.push(Node { feeest, chain_monitor, tx_broadcaster, chan_monitor, node_id, node, router });
+ let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &node_id), chain_monitor.clone(), Arc::clone(&logger));
+ nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router });
}
nodes
res.push(explicit_tx.clone());
} else {
for tx in node_txn.iter() {
- if tx.input.len() == 1 && tx.input[0].prev_hash == chan.3.txid() {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
let mut funding_tx_map = HashMap::new();
funding_tx_map.insert(chan.3.txid(), chan.3.clone());
tx.verify(&funding_tx_map).unwrap();
if has_htlc_tx != HTLCType::NONE {
for tx in node_txn.iter() {
- if tx.input.len() == 1 && tx.input[0].prev_hash == res[0].txid() {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
let mut funding_tx_map = HashMap::new();
funding_tx_map.insert(res[0].txid(), res[0].clone());
tx.verify(&funding_tx_map).unwrap();
let mut found_prev = false;
for tx in prev_txn {
- if node_txn[0].input[0].prev_hash == tx.txid() {
+ if node_txn[0].input[0].previous_output.txid == tx.txid() {
let mut funding_tx_map = HashMap::new();
funding_tx_map.insert(tx.txid(), tx.clone());
node_txn[0].verify(&funding_tx_map).unwrap();
let events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
match events[0] {
- Event::SendFulfillHTLC { ref node_id, .. } => {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
assert_eq!(*node_id, $prev_node.node.get_our_node_id());
},
_ => panic!("Unexpected event"),
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
{
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 2);
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
assert_eq!(node_txn[0].input.len(), 1);
let mut funding_tx_map = HashMap::new();
assert_eq!(channel_state.by_id.len(), 0);
assert_eq!(channel_state.short_to_id.len(), 0);
}
+
+ #[test]
+ fn test_invalid_channel_announcement() {
+ //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
+ let secp_ctx = Secp256k1::new();
+ let nodes = create_network(2);
+
+ let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+ let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+ let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+
+ let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
+
+ let as_network_key = nodes[0].node.get_our_node_id();
+ let bs_network_key = nodes[1].node.get_our_node_id();
+
+ let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
+
+ let mut chan_announcement;
+
+ macro_rules! dummy_unsigned_msg {
+ () => {
+ msgs::UnsignedChannelAnnouncement {
+ features: msgs::GlobalFeatures::new(),
+ chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
+ short_channel_id: as_chan.get_short_channel_id().unwrap(),
+ node_id_1: if were_node_one { as_network_key } else { bs_network_key },
+ node_id_2: if were_node_one { bs_network_key } else { as_network_key },
+ bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
+ bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
+ excess_data: Vec::new(),
+ };
+ }
+ }
+
+ macro_rules! sign_msg {
+ ($unsigned_msg: expr) => {
+ let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
+ let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
+ let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
+ let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
+ chan_announcement = msgs::ChannelAnnouncement {
+ node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
+ node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
+ bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
+ bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
+ contents: $unsigned_msg
+ }
+ }
+ }
+
+ let unsigned_msg = dummy_unsigned_msg!();
+ sign_msg!(unsigned_msg);
+ assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap() } );
+
+ // Configured with Network::Testnet
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
+
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
+ }
}