+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+ let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
+
+ // Rebalance the network a bit by relaying one payment through all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+
+ // Send some more payments
+ send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
+
+ // Test failure packets
+ let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
+
+ // Add a new channel that skips 3
+ let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3);
+
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
+ send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+
+ // Do some rebalance loop payments, simultaneously
+ let mut hops = Vec::with_capacity(3);
+ hops.push(RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
+ fee_msat: 1000000,
+ cltv_expiry_delta: TEST_FINAL_CLTV,
+ });
+ hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
+
+ let mut hops = Vec::with_capacity(3);
+ hops.push(RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
+ fee_msat: 1000000,
+ cltv_expiry_delta: TEST_FINAL_CLTV,
+ });
+ hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
+
+ // Claim the rebalances...
+ fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
+ claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
+
+ // Add a duplicate new channel from 2 to 4
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3);
+
+ // Send some payments across both channels
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+
+ route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
+
+ //TODO: Test that routes work again here as we've been notified that the channel is full
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
+
+ // Close down the channels...
+ close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+ close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+ close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+ close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+
+ // Check that we processed all pending events
+ for node in nodes {
+ assert_eq!(node.node.get_and_clear_pending_events().len(), 0);
+ assert_eq!(node.chan_monitor.added_monitors.lock().unwrap().len(), 0);
+ }
+ }
+
+ #[derive(PartialEq)]
+ enum HTLCType { NONE, TIMEOUT, SUCCESS }
+ fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
+ let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
+
+ let mut res = Vec::with_capacity(2);
+
+ if let Some(explicit_tx) = commitment_tx {
+ res.push(explicit_tx.clone());
+ } else {
+ for tx in node_txn.iter() {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
+ let mut funding_tx_map = HashMap::new();
+ funding_tx_map.insert(chan.3.txid(), chan.3.clone());
+ tx.verify(&funding_tx_map).unwrap();
+ res.push(tx.clone());
+ }
+ }
+ }
+ assert_eq!(res.len(), 1);
+
+ if has_htlc_tx != HTLCType::NONE {
+ for tx in node_txn.iter() {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
+ let mut funding_tx_map = HashMap::new();
+ funding_tx_map.insert(res[0].txid(), res[0].clone());
+ tx.verify(&funding_tx_map).unwrap();
+ if has_htlc_tx == HTLCType::TIMEOUT {
+ assert!(tx.lock_time != 0);
+ } else {
+ assert!(tx.lock_time == 0);
+ }
+ res.push(tx.clone());
+ break;
+ }
+ }
+ assert_eq!(res.len(), 2);
+ }
+ node_txn.clear();
+ res
+ }
+
+ fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
+ let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+
+ assert!(node_txn.len() >= 1);
+ assert_eq!(node_txn[0].input.len(), 1);
+ let mut found_prev = false;
+
+ for tx in prev_txn {
+ if node_txn[0].input[0].previous_output.txid == tx.txid() {
+ let mut funding_tx_map = HashMap::new();
+ funding_tx_map.insert(tx.txid(), tx.clone());
+ node_txn[0].verify(&funding_tx_map).unwrap();
+
+ assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
+ assert_eq!(tx.input.len(), 1); // must spend a commitment tx
+
+ found_prev = true;
+ break;
+ }
+ }
+ assert!(found_prev);
+
+ let mut res = Vec::new();
+ mem::swap(&mut *node_txn, &mut res);
+ res
+ }
+
+ fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
+ let events_1 = nodes[a].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ let as_update = match events_1[0] {
+ Event::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),