+pub fn scan_node(scan_time: Instant, node: SocketAddr, manual: bool) {
+ if START_SHUTDOWN.load(Ordering::Relaxed) { return; }
+ let printer = unsafe { PRINTER.as_ref().unwrap() };
+ let store = unsafe { DATA_STORE.as_ref().unwrap() };
+
+ let peer_state = Arc::new(Mutex::new(PeerState {
+ recvd_version: false,
+ recvd_verack: false,
+ recvd_addrs: false,
+ recvd_block: false,
+ node_services: 0,
+ fail_reason: AddressState::Timeout,
+ msg: (String::new(), false),
+ request: Arc::clone(&unsafe { REQUEST_BLOCK.as_ref().unwrap() }.lock().unwrap()),
+ }));
+ let final_peer_state = Arc::clone(&peer_state);
+
+ let peer = Delay::new(scan_time).then(move |_| {
+ printer.set_stat(Stat::NewConnection);
+ let timeout = store.get_u64(U64Setting::RunTimeout);
+ Peer::new(node.clone(), Duration::from_secs(timeout), printer)
+ });
+ tokio::spawn(peer.and_then(move |(mut write, read)| {
+ TimeoutStream::new_timeout(read, scan_time + Duration::from_secs(store.get_u64(U64Setting::RunTimeout))).map_err(|_| { () }).for_each(move |msg| {
+ let mut state_lock = peer_state.lock().unwrap();
+ macro_rules! check_set_flag {
+ ($recvd_flag: ident, $msg: expr) => { {
+ if state_lock.$recvd_flag {
+ state_lock.fail_reason = AddressState::ProtocolViolation;
+ state_lock.msg = (format!("due to dup {}", $msg), true);
+ state_lock.$recvd_flag = false;
+ return future::err(());
+ }
+ state_lock.$recvd_flag = true;
+ } }
+ }
+ state_lock.fail_reason = AddressState::TimeoutDuringRequest;
+ match msg {
+ NetworkMessage::Version(ver) => {
+ if ver.start_height < 0 || ver.start_height as u64 > state_lock.request.0 + 1008*2 {
+ state_lock.fail_reason = AddressState::HighBlockCount;
+ return future::err(());
+ }
+ let safe_ua = ver.user_agent.replace(|c: char| !c.is_ascii() || c < ' ' || c > '~', "");
+ if (ver.start_height as u64) < state_lock.request.0 {
+ state_lock.msg = (format!("({} < {})", ver.start_height, state_lock.request.0), true);
+ state_lock.fail_reason = AddressState::LowBlockCount;
+ return future::err(());
+ }
+ let min_version = store.get_u64(U64Setting::MinProtocolVersion);
+ if (ver.version as u64) < min_version {
+ state_lock.msg = (format!("({} < {})", ver.version, min_version), true);
+ state_lock.fail_reason = AddressState::LowVersion;
+ return future::err(());
+ }
+ if ver.services & (1 | (1 << 10)) == 0 {
+ state_lock.msg = (format!("({}: services {:x})", safe_ua, ver.services), true);
+ state_lock.fail_reason = AddressState::NotFullNode;
+ return future::err(());
+ }
+ if !store.get_regex(RegexSetting::SubverRegex).is_match(&ver.user_agent) {
+ state_lock.msg = (format!("subver {}", safe_ua), true);
+ state_lock.fail_reason = AddressState::BadVersion;
+ return future::err(());
+ }
+ check_set_flag!(recvd_version, "version");
+ state_lock.node_services = ver.services;
+ state_lock.msg = (format!("(subver: {})", safe_ua), false);
+ if let Err(_) = write.try_send(NetworkMessage::Verack) {
+ return future::err(());
+ }
+ },
+ NetworkMessage::Verack => {
+ check_set_flag!(recvd_verack, "verack");
+ if let Err(_) = write.try_send(NetworkMessage::GetAddr) {
+ return future::err(());
+ }
+ },
+ NetworkMessage::Ping(v) => {
+ if let Err(_) = write.try_send(NetworkMessage::Pong(v)) {
+ return future::err(())
+ }
+ },
+ NetworkMessage::Addr(addrs) => {
+ if addrs.len() > 1000 {
+ state_lock.fail_reason = AddressState::ProtocolViolation;
+ state_lock.msg = (format!("due to oversized addr: {}", addrs.len()), true);
+ state_lock.recvd_addrs = false;
+ return future::err(());
+ }
+ if !state_lock.recvd_addrs {
+ if let Err(_) = write.try_send(NetworkMessage::GetData(vec![Inventory {
+ inv_type: InvType::WitnessBlock,
+ hash: state_lock.request.1,
+ }])) {
+ return future::err(());
+ }
+ }
+ state_lock.recvd_addrs = true;
+ unsafe { DATA_STORE.as_ref().unwrap() }.add_fresh_nodes(&addrs);
+ },
+ NetworkMessage::Block(block) => {
+ if block != state_lock.request.2 {
+ state_lock.fail_reason = AddressState::ProtocolViolation;
+ state_lock.msg = ("due to bad block".to_string(), true);
+ return future::err(());
+ }
+ check_set_flag!(recvd_block, "block");
+ return future::err(());
+ },
+ _ => {},
+ }
+ future::ok(())
+ }).then(|_| {
+ future::err(())
+ })
+ }).then(move |_: Result<(), ()>| {
+ let printer = unsafe { PRINTER.as_ref().unwrap() };
+ let store = unsafe { DATA_STORE.as_ref().unwrap() };
+ printer.set_stat(Stat::ConnectionClosed);
+
+ let mut state_lock = final_peer_state.lock().unwrap();
+ if state_lock.recvd_version && state_lock.recvd_verack &&
+ state_lock.recvd_addrs && state_lock.recvd_block {
+ let old_state = store.set_node_state(node, AddressState::Good, state_lock.node_services);
+ if manual || (old_state != AddressState::Good && state_lock.msg.0 != "") {
+ printer.add_line(format!("Updating {} from {} to Good {}", node, old_state.to_str(), &state_lock.msg.0), state_lock.msg.1);
+ }
+ } else {
+ assert!(state_lock.fail_reason != AddressState::Good);
+ if state_lock.fail_reason == AddressState::TimeoutDuringRequest && state_lock.recvd_version && state_lock.recvd_verack {
+ if !state_lock.recvd_addrs {
+ state_lock.fail_reason = AddressState::TimeoutAwaitingAddr;
+ } else if !state_lock.recvd_block {
+ state_lock.fail_reason = AddressState::TimeoutAwaitingBlock;
+ }
+ }
+ let old_state = store.set_node_state(node, state_lock.fail_reason, 0);
+ if (manual || old_state != state_lock.fail_reason) && state_lock.fail_reason == AddressState::TimeoutDuringRequest {
+ printer.add_line(format!("Updating {} from {} to Timeout During Request (ver: {}, vack: {})",
+ node, old_state.to_str(), state_lock.recvd_version, state_lock.recvd_verack), true);
+ } else if manual || (old_state != state_lock.fail_reason && state_lock.msg.0 != "" && state_lock.msg.1) {
+ printer.add_line(format!("Updating {} from {} to {} {}", node, old_state.to_str(), state_lock.fail_reason.to_str(), &state_lock.msg.0), state_lock.msg.1);
+ }
+ }
+ future::ok(())
+ }));
+}
+
+fn poll_dnsseeds() {