use secp256k1::key::{PublicKey,SecretKey};
use secp256k1::Secp256k1;
+use std::cmp::Ordering;
+use std::collections::HashSet;
use std::sync::{Arc,Mutex};
use std::io::Cursor;
pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
}
impl TestChannelMonitor {
- pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>) -> Self {
+ pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<chaininterface::FeeEstimator>) -> Self {
Self {
- simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger),
+ simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest),
update_ret: Mutex::new(Ok(())),
}
}
($node_id: expr) => { {
let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
- let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone()));
+ let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
let keys_manager = Arc::new(KeyProvider { node_id: $node_id });
let mut config = UserConfig::new();
config.channel_options.fee_proportional_millionths = 0;
config.channel_options.announced_channel = true;
- config.channel_limits.min_dust_limit_satoshis = 0;
+ config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
(ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap(),
monitor)
} }
macro_rules! process_events {
($node: expr, $fail: expr) => { {
- for event in nodes[$node].get_and_clear_pending_events() {
+ // In case we get 256 payments we may have a hash collision, resulting in the
+ // second claim/fail call not finding the duplicate-hash HTLC, so we have to
+ // deduplicate the calls here.
+ let mut claim_set = HashSet::new();
+ let mut events = nodes[$node].get_and_clear_pending_events();
+ // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
+ // case where we first process a PendingHTLCsForwardable, then claim/fail on a
+ // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
+ // PaymentReceived event for the second HTLC in our pending_events (and breaking
+ // our claim_set deduplication).
+ events.sort_by(|a, b| {
+ if let events::Event::PaymentReceived { .. } = a {
+ if let events::Event::PendingHTLCsForwardable { .. } = b {
+ Ordering::Less
+ } else { Ordering::Equal }
+ } else if let events::Event::PendingHTLCsForwardable { .. } = a {
+ if let events::Event::PaymentReceived { .. } = b {
+ Ordering::Greater
+ } else { Ordering::Equal }
+ } else { Ordering::Equal }
+ });
+ for event in events.drain(..) {
match event {
events::Event::PaymentReceived { payment_hash, .. } => {
- if $fail {
- assert!(nodes[$node].fail_htlc_backwards(&payment_hash, 0));
- } else {
- assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+ if claim_set.insert(payment_hash.0) {
+ if $fail {
+ assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
+ } else {
+ assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+ }
}
},
events::Event::PaymentSent { .. } => {},