+ fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
+ //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
+ //determine the state of the payment based on our response/if we forward anything/the time
+ //we take to respond. We should take care to avoid allowing such an attack.
+ //
+ //TODO: There exists a further attack where a node may garble the onion data, forward it to
+ //us repeatedly garbled in different ways, and compare our error messages, which are
+ //encrypted with the same key. Its not immediately obvious how to usefully exploit that,
+ //but we should prevent it anyway.
+
+ let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ if !chan.get().is_usable() {
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+ let chan_update = self.get_channel_update(chan.get());
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: if let Ok(update) = chan_update {
+ // TODO: Note that |20 is defined as "channel FROM the processing
+ // node has been disabled" (emphasis mine), which seems to imply
+ // that we can't return |20 for an inbound channel being disabled.
+ // This probably needs a spec update but should definitely be
+ // allowed.
+ onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &{
+ let mut res = Vec::with_capacity(8 + 128);
+ res.extend_from_slice(&byte_utils::be16_to_array(update.contents.flags));
+ res.extend_from_slice(&update.encode_with_len()[..]);
+ res
+ }[..])
+ } else {
+ // This can only happen if the channel isn't in the fully-funded
+ // state yet, implying our counterparty is trying to route payments
+ // over the channel back to themselves (cause no one else should
+ // know the short_id is a lightning channel yet). We should have no
+ // problem just calling this unknown_next_peer
+ onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[])
+ },
+ }));
+ }
+ }
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let htlc_source = {
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+ Ok(())
+ }
+
+ fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ if (msg.failure_code & 0x8000) == 0 {
+ try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan);
+ }
+ try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
+ try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, commitment_signed.is_some());
+ //TODO: Rebroadcast closing_signed if present on monitor update restoration
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: their_node_id.clone(),
+ msg: revoke_and_ack,
+ });
+ if let Some(msg) = commitment_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: their_node_id.clone(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: msg,
+ },
+ });
+ }
+ if let Some(msg) = closing_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ #[inline]
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ let mut forward_event = None;
+ if !pending_forwards.is_empty() {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ if channel_state.forward_htlcs.is_empty() {
+ forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64));
+ channel_state.next_forward = forward_event.unwrap();
+ }
+ for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
+ match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }));
+ }
+ }
+ }
+ }
+ match forward_event {
+ Some(time) => {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: time
+ });
+ }
+ None => {},
+ }
+ }
+ }
+
+ fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
+ let (pending_forwards, mut pending_failures, short_channel_id) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case