+
+ #[test]
+ fn test_timer_tick_occurred() {
+ // Create peers, a vector of two peer managers, perform initial set up and check that peers[0] has one Peer.
+ let cfgs = create_peermgr_cfgs(2);
+ let peers = create_network(2, &cfgs);
+ establish_connection(&peers[0], &peers[1]);
+ assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+
+ // peers[0] awaiting_pong is set to true, but the Peer is still connected
+ peers[0].timer_tick_occured();
+ assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+
+ // Since timer_tick_occured() is called again when awaiting_pong is true, all Peers are disconnected
+ peers[0].timer_tick_occured();
+ assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+ }
+
+ #[test]
+ fn test_do_attempt_write_data() {
+ // Create 2 peers with custom TestRoutingMessageHandlers and connect them.
+ let cfgs = create_peermgr_cfgs(2);
+ cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release);
+ cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release);
+ let peers = create_network(2, &cfgs);
+
+ // By calling establish_connect, we trigger do_attempt_write_data between
+ // the peers. Previously this function would mistakenly enter an infinite loop
+ // when there were more channel messages available than could fit into a peer's
+ // buffer. This issue would now be detected by this test (because we use custom
+ // RoutingMessageHandlers that intentionally return more channel messages
+ // than can fit into a peer's buffer).
+ let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]);
+
+ // Make each peer to read the messages that the other peer just wrote to them.
+ peers[1].read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap();
+ peers[0].read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap();
+
+ // Check that each peer has received the expected number of channel updates and channel
+ // announcements.
+ assert_eq!(cfgs[0].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
+ assert_eq!(cfgs[0].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+ assert_eq!(cfgs[1].routing_handler.chan_upds_recvd.load(Ordering::Acquire), 100);
+ assert_eq!(cfgs[1].routing_handler.chan_anns_recvd.load(Ordering::Acquire), 50);
+ }
+
+ #[test]
+ fn limit_initial_routing_sync_requests() {
+ // Inbound peer 0 requests initial_routing_sync, but outbound peer 1 does not.
+ {
+ let cfgs = create_peermgr_cfgs(2);
+ cfgs[0].routing_handler.request_full_sync.store(true, Ordering::Release);
+ let peers = create_network(2, &cfgs);
+ let (fd_0_to_1, fd_1_to_0) = establish_connection_and_read_events(&peers[0], &peers[1]);
+
+ let peer_0 = peers[0].peers.lock().unwrap();
+ let peer_1 = peers[1].peers.lock().unwrap();
+
+ let peer_0_features = peer_1.peers.get(&fd_1_to_0).unwrap().their_features.as_ref();
+ let peer_1_features = peer_0.peers.get(&fd_0_to_1).unwrap().their_features.as_ref();
+
+ assert!(peer_0_features.unwrap().initial_routing_sync());
+ assert!(!peer_1_features.unwrap().initial_routing_sync());
+ }
+
+ // Outbound peer 1 requests initial_routing_sync, but inbound peer 0 does not.
+ {
+ let cfgs = create_peermgr_cfgs(2);
+ cfgs[1].routing_handler.request_full_sync.store(true, Ordering::Release);
+ let peers = create_network(2, &cfgs);
+ let (fd_0_to_1, fd_1_to_0) = establish_connection_and_read_events(&peers[0], &peers[1]);
+
+ let peer_0 = peers[0].peers.lock().unwrap();
+ let peer_1 = peers[1].peers.lock().unwrap();
+
+ let peer_0_features = peer_1.peers.get(&fd_1_to_0).unwrap().their_features.as_ref();
+ let peer_1_features = peer_0.peers.get(&fd_0_to_1).unwrap().their_features.as_ref();
+
+ assert!(!peer_0_features.unwrap().initial_routing_sync());
+ assert!(peer_1_features.unwrap().initial_routing_sync());
+ }
+ }