Merge pull request #1011 from TheBlueMatt/2021-07-new-closing-fee
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 17 Aug 2021 17:53:21 +0000 (17:53 +0000)
committerGitHub <noreply@github.com>
Tue, 17 Aug 2021 17:53:21 +0000 (17:53 +0000)
Clean up existing and add range-based closing_signed negotiation

1  2 
lightning/src/ln/channelmanager.rs
lightning/src/util/ser_macros.rs

index 38e2172ed409c0eb684c5b0b1290820c47c23c49,cff314679ab01e3cb5a01791c574a73c617213b6..ac2297f86aeab74651b7f2f86ba2f903870be565
@@@ -276,6 -276,10 +276,10 @@@ impl MsgHandleErrInternal 
        fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
                Self {
                        err: match err {
+                               ChannelError::Warn(msg) =>  LightningError {
+                                       err: msg,
+                                       action: msgs::ErrorAction::IgnoreError,
+                               },
                                ChannelError::Ignore(msg) => LightningError {
                                        err: msg,
                                        action: msgs::ErrorAction::IgnoreError,
@@@ -819,6 -823,11 +823,11 @@@ macro_rules! handle_error 
  macro_rules! convert_chan_err {
        ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
                match $err {
+                       ChannelError::Warn(msg) => {
+                               //TODO: Once warning messages are merged, we should send a `warning` message to our
+                               //peer here.
+                               (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+                       },
                        ChannelError::Ignore(msg) => {
                                (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
                        },
@@@ -1275,12 -1284,7 +1284,7 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
        }
  
-       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
-       /// will be accepted on the given channel, and after additional timeout/the closing of all
-       /// pending HTLCs, the channel will be closed on chain.
-       ///
-       /// May generate a SendShutdown message event on success, which should be relayed.
-       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+       fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
  
                let counterparty_node_id;
                                                Some(peer_state) => {
                                                        let peer_state = peer_state.lock().unwrap();
                                                        let their_features = &peer_state.latest_features;
-                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features)?
+                                                       chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
                                                },
                                                None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
                                        };
                Ok(())
        }
  
+       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+       /// will be accepted on the given channel, and after additional timeout/the closing of all
+       /// pending HTLCs, the channel will be closed on chain.
+       ///
+       ///  * If we are the channel initiator, we will pay between our [`Background`] and
+       ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+       ///    estimate.
+       ///  * If our counterparty is the channel initiator, we will require a channel closing
+       ///    transaction feerate of at least our [`Background`] feerate or the feerate which
+       ///    would appear on a force-closure transaction, whichever is lower. We will allow our
+       ///    counterparty to pay as much fee as they'd like, however.
+       ///
+       /// May generate a SendShutdown message event on success, which should be relayed.
+       ///
+       /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+       /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+       /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+               self.close_channel_internal(channel_id, None)
+       }
+       /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+       /// will be accepted on the given channel, and after additional timeout/the closing of all
+       /// pending HTLCs, the channel will be closed on chain.
+       ///
+       /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+       /// the channel being closed or not:
+       ///  * If we are the channel initiator, we will pay at least this feerate on the closing
+       ///    transaction. The upper-bound is set by
+       ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+       ///    estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+       ///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+       ///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+       ///    will appear on a force-closure transaction, whichever is lower).
+       ///
+       /// May generate a SendShutdown message event on success, which should be relayed.
+       ///
+       /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+       /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+       /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+       pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+               self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+       }
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
                let (monitor_update_option, mut failed_htlcs) = shutdown_res;
  
                let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
                let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
 -              let (next_hop_data, next_hop_hmac) = {
 -                      match msgs::OnionHopData::read(&mut chacha_stream) {
 +              let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
 +                      match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
                                Err(err) => {
                                        let error_code = match err {
                                                msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
                                                                        // close channel and then send error message to peer.
                                                                        let counterparty_node_id = chan.get().get_counterparty_node_id();
                                                                        let err: Result<(), _>  = match e {
-                                                                               ChannelError::Ignore(_) => {
+                                                                               ChannelError::Ignore(_) | ChannelError::Warn(_) => {
                                                                                        panic!("Stated return value requirements in send_commitment() were not met");
-                                                                               },
+                                                                               }
                                                                                ChannelError::Close(msg) => {
                                                                                        log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
                                                                                        let (channel_id, mut channel) = chan.remove_entry();
                                let pending_msg_events = &mut channel_state.pending_msg_events;
                                let short_to_id = &mut channel_state.short_to_id;
                                channel_state.by_id.retain(|chan_id, chan| {
+                                       let counterparty_node_id = chan.get_counterparty_node_id();
+                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+                                       if err.is_err() {
+                                               handle_errors.push((err, counterparty_node_id));
+                                       }
+                                       if !retain_channel { return false; }
+                                       if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+                                               let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+                                               handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+                                               if needs_close { return false; }
+                                       }
                                        match chan.channel_update_status() {
                                                ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
                                                ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
                                                _ => {},
                                        }
  
-                                       let counterparty_node_id = chan.get_counterparty_node_id();
-                                       let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
-                                       if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-                                       if err.is_err() {
-                                               handle_errors.push((err, counterparty_node_id));
-                                       }
-                                       retain_channel
+                                       true
                                });
                        }
  
                        for (err, counterparty_node_id) in handle_errors.drain(..) {
                                let _ = handle_error!(self, err, counterparty_node_id);
                        }
                        should_persist
                });
        }
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
  
-                                       let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+                                       if !chan_entry.get().received_shutdown() {
+                                               log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+                                                       log_bytes!(msg.channel_id),
+                                                       if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+                                       }
+                                       let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
                                        dropped_htlcs = htlcs;
  
                                        // Update the monitor with the shutdown script if necessary.
                                                        msg,
                                                });
                                        }
-                                       if let Some(msg) = closing_signed {
-                                               // TODO: Do not send this if the monitor update failed.
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: *counterparty_node_id,
-                                                       msg,
-                                               });
-                                       }
  
                                        break Ok(());
                                },
                                if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                }
-                               let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
-                                       match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
+                               let (revoke_and_ack, commitment_signed, monitor_update) =
+                                       match chan.get_mut().commitment_signed(&msg, &self.logger) {
                                                Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
                                                Err((Some(update), e)) => {
                                                        assert!(chan.get().is_awaiting_monitor_update());
                                        };
                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                        return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
-                                       //TODO: Rebroadcast closing_signed if present on monitor update restoration
                                }
                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
                                        node_id: counterparty_node_id.clone(),
                                                },
                                        });
                                }
-                               if let Some(msg) = closing_signed {
-                                       channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                               node_id: counterparty_node_id.clone(),
-                                               msg,
-                                       });
-                               }
                                Ok(())
                        },
                        hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                                                break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
                                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
-                                       let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
-                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+                                       let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
+                                               break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
                                        htlcs_to_fail = htlcs_to_fail_in;
                                        if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                if was_frozen_for_monitor {
-                                                       assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+                                                       assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
                                                        break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
                                                } else {
                                                        if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
                                                        updates,
                                                });
                                        }
-                                       if let Some(msg) = closing_signed {
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
-                                                       node_id: counterparty_node_id.clone(),
-                                                       msg,
-                                               });
-                                       }
                                        break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
                                },
                                hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        });
                }
  
-               let has_update = has_monitor_update || !failed_htlcs.is_empty();
+               let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
                for (failures, channel_id) in failed_htlcs.drain(..) {
                        self.fail_holding_cell_htlcs(failures, channel_id);
                }
                has_update
        }
  
+       /// Check whether any channels have finished removing all pending updates after a shutdown
+       /// exchange and can now send a closing_signed.
+       /// Returns whether any closing_signed messages were generated.
+       fn maybe_generate_initial_closing_signed(&self) -> bool {
+               let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
+               let mut has_update = false;
+               {
+                       let mut channel_state_lock = self.channel_state.lock().unwrap();
+                       let channel_state = &mut *channel_state_lock;
+                       let by_id = &mut channel_state.by_id;
+                       let short_to_id = &mut channel_state.short_to_id;
+                       let pending_msg_events = &mut channel_state.pending_msg_events;
+                       by_id.retain(|channel_id, chan| {
+                               match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+                                       Ok((msg_opt, tx_opt)) => {
+                                               if let Some(msg) = msg_opt {
+                                                       has_update = true;
+                                                       pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+                                                               node_id: chan.get_counterparty_node_id(), msg,
+                                                       });
+                                               }
+                                               if let Some(tx) = tx_opt {
+                                                       // We're done with this channel. We got a closing_signed and sent back
+                                                       // a closing_signed with a closing transaction to broadcast.
+                                                       if let Some(short_id) = chan.get_short_channel_id() {
+                                                               short_to_id.remove(&short_id);
+                                                       }
+                                                       if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+                                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                                       msg: update
+                                                               });
+                                                       }
+                                                       log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+                                                       self.tx_broadcaster.broadcast_transaction(&tx);
+                                                       false
+                                               } else { true }
+                                       },
+                                       Err(e) => {
+                                               has_update = true;
+                                               let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+                                               handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+                                               !close_channel
+                                       }
+                               }
+                       });
+               }
+               for (counterparty_node_id, err) in handle_errors.drain(..) {
+                       let _ = handle_error!(self, err, counterparty_node_id);
+               }
+               has_update
+       }
        /// Handle a list of channel failures during a block_connected or block_disconnected call,
        /// pushing the channel monitor update (if any) to the background events queue and removing the
        /// Channel object.
@@@ -4096,6 -4194,9 +4194,9 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
                        if self.check_free_holding_cells() {
                                result = NotifyOption::DoPersist;
                        }
+                       if self.maybe_generate_initial_closing_signed() {
+                               result = NotifyOption::DoPersist;
+                       }
  
                        let mut pending_events = Vec::new();
                        let mut channel_state = self.channel_state.lock().unwrap();
index af1bebe680b3db9324be291c257f293f8d6f90a8,0ad9710a794b11f6e545a4a9e3ac627dc634f654..5178732c7457e93745d39319fa2f2ecee99a80dd
@@@ -115,9 -115,6 +115,9 @@@ macro_rules! check_tlv_order 
        ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, vec_type) => {{
                // no-op
        }};
 +      ($last_seen_type: expr, $typ: expr, $type: expr, $field: ident, ignorable) => {{
 +              // no-op
 +      }};
  }
  
  macro_rules! check_missing_tlv {
        ($last_seen_type: expr, $type: expr, $field: ident, option) => {{
                // no-op
        }};
 +      ($last_seen_type: expr, $type: expr, $field: ident, ignorable) => {{
 +              // no-op
 +      }};
  }
  
  macro_rules! decode_tlv {
        ($reader: expr, $field: ident, option) => {{
                $field = Some(ser::Readable::read(&mut $reader)?);
        }};
 +      ($reader: expr, $field: ident, ignorable) => {{
 +              $field = ser::MaybeReadable::read(&mut $reader)?;
 +      }};
  }
  
  macro_rules! decode_tlv_stream {
        ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { {
                use ln::msgs::DecodeError;
                let mut last_seen_type: Option<u64> = None;
+               let mut stream_ref = $stream;
                'tlv_read: loop {
                        use util::ser;
  
                                // determine whether we should break or return ShortRead if we get an
                                // UnexpectedEof. This should in every case be largely cosmetic, but its nice to
                                // pass the TLV test vectors exactly, which requre this distinction.
-                               let mut tracking_reader = ser::ReadTrackingReader::new($stream);
+                               let mut tracking_reader = ser::ReadTrackingReader::new(&mut stream_ref);
                                match ser::Readable::read(&mut tracking_reader) {
                                        Err(DecodeError::ShortRead) => {
                                                if !tracking_reader.have_read {
                        last_seen_type = Some(typ.0);
  
                        // Finally, read the length and value itself:
-                       let length: ser::BigSize = ser::Readable::read($stream)?;
-                       let mut s = ser::FixedLengthReader::new($stream, length.0);
+                       let length: ser::BigSize = ser::Readable::read(&mut stream_ref)?;
+                       let mut s = ser::FixedLengthReader::new(&mut stream_ref, length.0);
                        match typ.0 {
                                $($type => {
                                        decode_tlv!(s, $field, $fieldty);
@@@ -380,7 -372,7 +381,7 @@@ macro_rules! read_ver_prefix 
  /// Reads a suffix added by write_tlv_fields.
  macro_rules! read_tlv_fields {
        ($stream: expr, {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}) => { {
 -              let tlv_len = ::util::ser::BigSize::read($stream)?;
 +              let tlv_len: ::util::ser::BigSize = ::util::ser::Readable::read($stream)?;
                let mut rd = ::util::ser::FixedLengthReader::new($stream, tlv_len.0);
                decode_tlv_stream!(&mut rd, {$(($type, $field, $fieldty)),*});
                rd.eat_remaining().map_err(|_| ::ln::msgs::DecodeError::ShortRead)?;
@@@ -414,7 -406,7 +415,7 @@@ macro_rules! init_tlv_field_var 
        };
        ($field: ident, option) => {
                let mut $field = None;
 -      }
 +      };
  }
  
  /// Implements Readable/Writeable for a struct storing it as a set of TLVs
@@@ -467,7 -459,17 +468,7 @@@ macro_rules! impl_writeable_tlv_based 
        }
  }
  
 -/// Implement Readable and Writeable for an enum, with struct variants stored as TLVs and tuple
 -/// variants stored directly.
 -/// The format is, for example
 -/// impl_writeable_tlv_based_enum!(EnumName,
 -///   (0, StructVariantA) => {(0, required_variant_field, required), (1, optional_variant_field, option)},
 -///   (1, StructVariantB) => {(0, variant_field_a, required), (1, variant_field_b, required), (2, variant_vec_field, vec_type)};
 -///   (2, TupleVariantA), (3, TupleVariantB),
 -/// );
 -/// The type is written as a single byte, followed by any variant data.
 -/// Attempts to read an unknown type byte result in DecodeError::UnknownRequiredFeature.
 -macro_rules! impl_writeable_tlv_based_enum {
 +macro_rules! _impl_writeable_tlv_based_enum_common {
        ($st: ident, $(($variant_id: expr, $variant_name: ident) =>
                {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
        ),* $(,)*;
                                Ok(())
                        }
                }
 +      }
 +}
 +
 +/// Implement MaybeReadable and Writeable for an enum, with struct variants stored as TLVs and
 +/// tuple variants stored directly.
 +///
 +/// This is largely identical to `impl_writeable_tlv_based_enum`, except that odd variants will
 +/// return `Ok(None)` instead of `Err(UnknownRequiredFeature)`. It should generally be preferred
 +/// when `MaybeReadable` is practical instead of just `Readable` as it provides an upgrade path for
 +/// new variants to be added which are simply ignored by existing clients.
 +macro_rules! impl_writeable_tlv_based_enum_upgradable {
 +      ($st: ident, $(($variant_id: expr, $variant_name: ident) =>
 +              {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
 +      ),* $(,)*) => {
 +              _impl_writeable_tlv_based_enum_common!($st,
 +                      $(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*; );
 +
 +              impl ::util::ser::MaybeReadable for $st {
 +                      fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Option<Self>, ::ln::msgs::DecodeError> {
 +                              let id: u8 = ::util::ser::Readable::read(reader)?;
 +                              match id {
 +                                      $($variant_id => {
 +                                              // Because read_tlv_fields creates a labeled loop, we cannot call it twice
 +                                              // in the same function body. Instead, we define a closure and call it.
 +                                              let f = || {
 +                                                      $(
 +                                                              init_tlv_field_var!($field, $fieldty);
 +                                                      )*
 +                                                      read_tlv_fields!(reader, {
 +                                                              $(($type, $field, $fieldty)),*
 +                                                      });
 +                                                      Ok(Some($st::$variant_name {
 +                                                              $(
 +                                                                      $field: init_tlv_based_struct_field!($field, $fieldty)
 +                                                              ),*
 +                                                      }))
 +                                              };
 +                                              f()
 +                                      }),*
 +                                      _ if id % 2 == 1 => Ok(None),
 +                                      _ => Err(DecodeError::UnknownRequiredFeature),
 +                              }
 +                      }
 +              }
 +
 +      }
 +}
 +
 +/// Implement Readable and Writeable for an enum, with struct variants stored as TLVs and tuple
 +/// variants stored directly.
 +/// The format is, for example
 +/// impl_writeable_tlv_based_enum!(EnumName,
 +///   (0, StructVariantA) => {(0, required_variant_field, required), (1, optional_variant_field, option)},
 +///   (1, StructVariantB) => {(0, variant_field_a, required), (1, variant_field_b, required), (2, variant_vec_field, vec_type)};
 +///   (2, TupleVariantA), (3, TupleVariantB),
 +/// );
 +/// The type is written as a single byte, followed by any variant data.
 +/// Attempts to read an unknown type byte result in DecodeError::UnknownRequiredFeature.
 +macro_rules! impl_writeable_tlv_based_enum {
 +      ($st: ident, $(($variant_id: expr, $variant_name: ident) =>
 +              {$(($type: expr, $field: ident, $fieldty: tt)),* $(,)*}
 +      ),* $(,)*;
 +      $(($tuple_variant_id: expr, $tuple_variant_name: ident)),*  $(,)*) => {
 +              _impl_writeable_tlv_based_enum_common!($st,
 +                      $(($variant_id, $variant_name) => {$(($type, $field, $fieldty)),*}),*;
 +                      $(($tuple_variant_id, $tuple_variant_name)),*);
  
                impl ::util::ser::Readable for $st {
                        fn read<R: $crate::io::Read>(reader: &mut R) -> Result<Self, ::ln::msgs::DecodeError> {