+#[allow(dead_code)]
+const ROUTE_LEN: usize = 36 - std::mem::size_of::<(u32, Route)>();
+
+// To keep memory tight (and since we dont' need such close alignment), newtype the v4/v6 routing
+// table entries to make sure they are aligned to single bytes.
+
+#[repr(packed)]
+#[derive(PartialEq, Eq, Hash)]
+struct V4Addr {
+ addr: [u8; 4],
+ pfxlen: u8,
+}
+impl From<(Ipv4Addr, u8)> for V4Addr {
+ fn from(p: (Ipv4Addr, u8)) -> Self {
+ Self {
+ addr: p.0.octets(),
+ pfxlen: p.1,
+ }
+ }
+}
+#[allow(dead_code)]
+const V4_ALIGN: usize = 1 - std::mem::align_of::<V4Addr>();
+#[allow(dead_code)]
+const V4_SIZE: usize = 5 - std::mem::size_of::<V4Addr>();
+
+#[repr(packed)]
+#[derive(PartialEq, Eq, Hash)]
+struct V6Addr {
+ addr: [u8; 16],
+ pfxlen: u8,
+}
+impl From<(Ipv6Addr, u8)> for V6Addr {
+ fn from(p: (Ipv6Addr, u8)) -> Self {
+ Self {
+ addr: p.0.octets(),
+ pfxlen: p.1,
+ }
+ }
+}
+#[allow(dead_code)]
+const V6_ALIGN: usize = 1 - std::mem::align_of::<V6Addr>();
+#[allow(dead_code)]
+const V6_SIZE: usize = 17 - std::mem::size_of::<V6Addr>();