static const int XDP_PASS = 0;
static const int XDP_DROP = 1;
-static long drop_cnt_map[RULECNT + STATIC_RULE_CNT];
+static long drop_cnt_map[STATS_RULECNT + STATIC_RULE_CNT];
#define INCREMENT_MATCH(reason) { drop_cnt_map[reason] += 1; drop_cnt_map[reason] += data_end - pktdata; }
#else /* TEST */
};
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __uint(max_entries, RULECNT + STATIC_RULE_CNT);
+ __uint(max_entries, STATS_RULECNT + STATIC_RULE_CNT);
__u32 *key;
struct match_counter *value;
} drop_cnt_map SEC(".maps");
// Then we build an array of MAX_ENTRIES/2**SRC_HASH_MAX_PARALLELISM_POW entries,
// which are split into buckets of size SRC_HASH_BUCKET_COUNT. An entry can appear
// in any of the SRC_HASH_BUCKET_COUNT buckets at it's hash value.
-#define SRC_HASH_MAX_PARALLELISM_POW 9
+//
+// Because we use buckets of size 16, see collision_prob.py, the number of
+// elements we can hold with only a 1% probability of overflowing a bucket is:
+//
+// 128K-entry hash table (2MiB): ~33K sources
+// 256K-entry hash table (4MiB): ~63K sources
+// 512K-entry hash table (8MiB): ~119K sources
+// 1M-entry hash table (16MiB): ~227K sources
+#define SRC_HASH_MAX_PARALLELISM_POW 8
#define SRC_HASH_MAX_PARALLELISM (1 << SRC_HASH_MAX_PARALLELISM_POW)
-#define SRC_HASH_BUCKET_COUNT_POW 3
+#define SRC_HASH_BUCKET_COUNT_POW 4
#define SRC_HASH_BUCKET_COUNT (1 << SRC_HASH_BUCKET_COUNT_POW)
-#include "rand.h"
-
#define CREATE_PERSRC_LOOKUP(IPV, IP_TYPE) \
struct persrc_rate##IPV##_entry { \
uint64_t sent_time; \
__attribute__((always_inline)) \
static inline struct persrc_rate##IPV##_ptr get_v##IPV##_persrc_ratelimit(IP_TYPE key, void *map, size_t map_limit, int64_t cur_time_masked) { \
struct persrc_rate##IPV##_ptr res = { .rate = NULL, .lock = NULL }; \
- uint64_t hash = siphash(&key, sizeof(key), COMPILE_TIME_RAND); \
+ uint64_t hash = siphash_##IP_TYPE(key); \
\
const uint32_t map_key = hash % SRC_HASH_MAX_PARALLELISM; \
struct persrc_rate##IPV##_bucket *buckets = bpf_map_lookup_elem(map, &map_key); \