You
Browser
Functional
package main import ( "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "log" "net" "net/http" "net/netip" "os" "path/filepath" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "github.com/redis/go-redis/v9" ) // ────────────────────────────────────────────────────────────────────────────── // Config via ENV // ────────────────────────────────────────────────────────────────────────────── func getenv(key, def string) string { if v := os.Getenv(key); v != "" { return v } return def } const defaultDelay = 2 * time.Second // default gap between two downloads var ( listenAddr = getenv("LISTEN_ADDR", ":8080") storeKind = getenv("STORE", "memory") // "memory" or "redis" importBaseURL = getenv("FLOD_IMPORT_URL", "https://git.send.nrw/sendnrw/flod-lists/raw/branch/main/") importInterval = mustParseDuration(getenv("IMPORT_INTERVAL", "30m")) delayStr = os.Getenv("DELAY") enableHoneypot = strings.EqualFold(getenv("HONEYPOT", "off"), "on") honeyTCP = splitCSV(getenv("HONEY_TCP", "135,139,445,389,636,3268,3269,88,3389")) honeyUDP = splitCSV(getenv("HONEY_UDP", "135,137,138,389,3389,88")) dataDir = getenv("DATA_DIR", "/data") ) func splitCSV(s string) []string { var out []string for _, p := range strings.Split(s, ",") { p = strings.TrimSpace(p) if p != "" { out = append(out, p) } } return out } func mustParseDuration(s string) time.Duration { d, err := time.ParseDuration(s) if err != nil { log.Fatalf("bad IMPORT_INTERVAL %q: %v", s, err) } return d } // ────────────────────────────────────────────────────────────────────────────── // Storage Abstraction // ────────────────────────────────────────────────────────────────────────────── type Store interface { // Blocklists PutPrefix(cat string, prefix string) error ListPrefixes(cat string) ([]netip.Prefix, error) ListCats() []string Count(cat string) (int, error) // Whitelist AddWhitelist(ip netip.Addr) error IsWhitelisted(ip netip.Addr) (bool, error) WhitelistCount() (int, error) AddWhitelistPrefix(pfx netip.Prefix) error RemoveWhitelistPrefix(pfx netip.Prefix) error ListWhitelistPrefixes() ([]netip.Prefix, error) // Manual hits AddManual(ip string, port string, proto string) error ListManual() map[string][]string ManualUniqueIPs() (int, error) AddBlacklist(pfx netip.Prefix, reason string) error DelBlacklist(pfx netip.Prefix) error ListBlacklist() (map[netip.Prefix]string, error) // Prefix -> Reason CountBlacklist() (int, error) } // In-memory implementation (with simple persistence to ./data/*.txt on export if desired) type memStore struct { mu sync.RWMutex byCat map[string]map[string]struct{} // cat -> "prefix" set whitelist map[string]struct{} // ip string wlPrefixes map[string]struct{} // "ip/net" manualHits map[string][]string // ip/32 -> ["port/proto", ...] blacklist map[string]string // prefix string -> reason } func (m *memStore) WhitelistCount() (int, error) { m.mu.RLock() defer m.mu.RUnlock() return len(m.whitelist), nil } func (m *memStore) ManualUniqueIPs() (int, error) { m.mu.RLock() defer m.mu.RUnlock() return len(m.manualHits), nil } func (m *memStore) blFile() string { return filepath.Join(dataDir, "blacklist.json") } func (m *memStore) loadBlacklistFromDisk() { f, err := os.Open(m.blFile()) if err != nil { return } defer f.Close() var tmp map[string]string if err := json.NewDecoder(f).Decode(&tmp); err == nil { m.blacklist = tmp } } func (m *memStore) saveBlacklistToDisk() { tmp := make(map[string]string, len(m.blacklist)) for k, v := range m.blacklist { tmp[k] = v } b, _ := json.MarshalIndent(tmp, "", " ") tmpPath := m.blFile() + ".tmp" _ = os.WriteFile(tmpPath, b, 0o644) _ = os.Rename(tmpPath, m.blFile()) } func (m *memStore) AddBlacklist(pfx netip.Prefix, reason string) error { m.mu.Lock() defer m.mu.Unlock() m.blacklist[pfx.String()] = strings.TrimSpace(reason) m.saveBlacklistToDisk() return nil } func (m *memStore) DelBlacklist(pfx netip.Prefix) error { m.mu.Lock() defer m.mu.Unlock() delete(m.blacklist, pfx.String()) m.saveBlacklistToDisk() return nil } func (m *memStore) ListBlacklist() (map[netip.Prefix]string, error) { m.mu.RLock() defer m.mu.RUnlock() out := make(map[netip.Prefix]string, len(m.blacklist)) for k, v := range m.blacklist { if p, err := netip.ParsePrefix(k); err == nil { out[p] = v } } return out, nil } func (m *memStore) CountBlacklist() (int, error) { m.mu.RLock() defer m.mu.RUnlock() return len(m.blacklist), nil } func newMemStore() *memStore { m := &memStore{ byCat: map[string]map[string]struct{}{}, whitelist: map[string]struct{}{}, wlPrefixes: map[string]struct{}{}, manualHits: map[string][]string{}, blacklist: map[string]string{}, } _ = os.MkdirAll(dataDir, 0o755) m.loadBlacklistFromDisk() m.loadWhitelistFromDisk() m.loadWhitelistPrefixesFromDisk() return m } func (m *memStore) wlFile() string { return filepath.Join(dataDir, "whitelist.json") } func (m *memStore) wlPrefixesFile() string { return filepath.Join(dataDir, "whitelist_prefixes.json") } func (m *memStore) loadWhitelistPrefixesFromDisk() { f, err := os.Open(m.wlPrefixesFile()) if err != nil { return } defer f.Close() var tmp []string if err := json.NewDecoder(f).Decode(&tmp); err == nil { for _, s := range tmp { if _, err := netip.ParsePrefix(s); err == nil { m.wlPrefixes[s] = struct{}{} } } } } func (m *memStore) saveWhitelistPrefixesToDisk() { m.mu.RLock() keys := make([]string, 0, len(m.wlPrefixes)) for k := range m.wlPrefixes { keys = append(keys, k) } m.mu.RUnlock() sort.Strings(keys) b, _ := json.MarshalIndent(keys, "", " ") tmp := m.wlPrefixesFile() + ".tmp" _ = os.WriteFile(tmp, b, 0o644) _ = os.Rename(tmp, m.wlPrefixesFile()) } func (m *memStore) AddWhitelistPrefix(pfx netip.Prefix) error { m.mu.Lock() m.wlPrefixes[pfx.String()] = struct{}{} m.mu.Unlock() m.saveWhitelistPrefixesToDisk() return nil } func (m *memStore) RemoveWhitelistPrefix(pfx netip.Prefix) error { m.mu.Lock() delete(m.wlPrefixes, pfx.String()) m.mu.Unlock() m.saveWhitelistPrefixesToDisk() return nil } func (m *memStore) ListWhitelistPrefixes() ([]netip.Prefix, error) { m.mu.RLock() defer m.mu.RUnlock() out := make([]netip.Prefix, 0, len(m.wlPrefixes)) for k := range m.wlPrefixes { if p, err := netip.ParsePrefix(k); err == nil { out = append(out, p) } } sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) return out, nil } func (m *memStore) loadWhitelistFromDisk() { f, err := os.Open(m.wlFile()) if err != nil { return // Datei existiert noch nicht – ok } defer f.Close() var tmp []string if err := json.NewDecoder(f).Decode(&tmp); err == nil { for _, ip := range tmp { if strings.TrimSpace(ip) != "" { m.whitelist[ip] = struct{}{} } } } } func (m *memStore) saveWhitelistToDisk() { // stabile Reihenfolge ist nice-to-have m.mu.RLock() keys := make([]string, 0, len(m.whitelist)) for k := range m.whitelist { keys = append(keys, k) } m.mu.RUnlock() sort.Strings(keys) b, _ := json.MarshalIndent(keys, "", " ") tmp := m.wlFile() + ".tmp" _ = os.WriteFile(tmp, b, 0o644) _ = os.Rename(tmp, m.wlFile()) } func (m *memStore) ensureCat(cat string) { if _, ok := m.byCat[cat]; !ok { m.byCat[cat] = map[string]struct{}{} } } func (m *memStore) PutPrefix(cat string, prefix string) error { m.mu.Lock() defer m.mu.Unlock() m.ensureCat(cat) m.byCat[cat][prefix] = struct{}{} return nil } func (m *memStore) ListPrefixes(cat string) ([]netip.Prefix, error) { m.mu.RLock() defer m.mu.RUnlock() set := m.byCat[cat] out := make([]netip.Prefix, 0, len(set)) for k := range set { if p, err := netip.ParsePrefix(k); err == nil { out = append(out, p) } } // stable order sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) return out, nil } func (m *memStore) ListCats() []string { m.mu.RLock() defer m.mu.RUnlock() out := make([]string, 0, len(m.byCat)) for c := range m.byCat { out = append(out, c) } sort.Strings(out) return out } func (m *memStore) Count(cat string) (int, error) { m.mu.RLock() defer m.mu.RUnlock() return len(m.byCat[cat]), nil } func (m *memStore) AddWhitelist(ip netip.Addr) error { m.mu.Lock() defer m.mu.Unlock() m.whitelist[ip.String()] = struct{}{} m.mu.Unlock() m.saveWhitelistToDisk() return nil } func (m *memStore) IsWhitelisted(ip netip.Addr) (bool, error) { m.mu.RLock() defer m.mu.RUnlock() if _, ok := m.whitelist[ip.String()]; ok { return true, nil } for k := range m.wlPrefixes { if p, err := netip.ParsePrefix(k); err == nil && p.Contains(ip) { return true, nil } } return false, nil } func (m *memStore) AddManual(ip, port, proto string) error { m.mu.Lock() defer m.mu.Unlock() key := ip + "/32" m.manualHits[key] = append(m.manualHits[key], port+"/"+proto) return nil } func (m *memStore) ListManual() map[string][]string { m.mu.RLock() defer m.mu.RUnlock() out := make(map[string][]string, len(m.manualHits)) for k, v := range m.manualHits { dst := make([]string, len(v)) copy(dst, v) out[k] = dst } return out } // Redis implementation (optional) type redisStore struct { rdb *redis.Client ctx context.Context } func newRedisStore() (*redisStore, error) { addr := getenv("REDIS_ADDR", "localhost:6379") user := os.Getenv("REDIS_USER") pass := os.Getenv("REDIS_PASS") rdb := redis.NewClient(&redis.Options{Addr: addr, Username: user, Password: pass, DB: 0}) ctx := context.Background() if err := rdb.Ping(ctx).Err(); err != nil { return nil, err } return &redisStore{rdb: rdb, ctx: ctx}, nil } func (s *redisStore) catKey(cat string) string { return "bl:" + cat } func (s *redisStore) wlKey(ip string) string { return "wl:" + ip } func (s *redisStore) wlPrefKey() string { return "wl:prefixes" } func (s *redisStore) manKey(ip string) string { return "bl:manual:" + ip } func (s *redisStore) AddWhitelistPrefix(pfx netip.Prefix) error { return s.rdb.HSet(s.ctx, s.wlPrefKey(), pfx.String(), 1).Err() } func (s *redisStore) RemoveWhitelistPrefix(pfx netip.Prefix) error { return s.rdb.HDel(s.ctx, s.wlPrefKey(), pfx.String()).Err() } func (s *redisStore) ListWhitelistPrefixes() ([]netip.Prefix, error) { keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result() if err != nil { return nil, err } out := make([]netip.Prefix, 0, len(keys)) for _, k := range keys { if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil { out = append(out, p) } } sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) return out, nil } func (s *redisStore) WhitelistCount() (int, error) { // wl:* keys zählen (SCAN statt KEYS) var cursor uint64 total := 0 for { keys, c, err := s.rdb.Scan(s.ctx, cursor, "wl:*", 1000).Result() if err != nil { return 0, err } total += len(keys) cursor = c if cursor == 0 { break } } return total, nil } func (s *redisStore) ManualUniqueIPs() (int, error) { n, err := s.rdb.HLen(s.ctx, "bl:manual").Result() return int(n), err } func (s *redisStore) PutPrefix(cat string, prefix string) error { return s.rdb.HSet(s.ctx, s.catKey(cat), prefix, 1).Err() } func (s *redisStore) blKey() string { return "bl:manual:blacklist" } func (s *redisStore) AddBlacklist(pfx netip.Prefix, reason string) error { return s.rdb.HSet(s.ctx, s.blKey(), pfx.String(), strings.TrimSpace(reason)).Err() } func (s *redisStore) DelBlacklist(pfx netip.Prefix) error { return s.rdb.HDel(s.ctx, s.blKey(), pfx.String()).Err() } func (s *redisStore) ListBlacklist() (map[netip.Prefix]string, error) { m, err := s.rdb.HGetAll(s.ctx, s.blKey()).Result() if err != nil { return nil, err } out := make(map[netip.Prefix]string, len(m)) for k, v := range m { if p, err := netip.ParsePrefix(k); err == nil { out[p] = v } } return out, nil } func (s *redisStore) CountBlacklist() (int, error) { n, err := s.rdb.HLen(s.ctx, s.blKey()).Result() return int(n), err } func (s *redisStore) ListPrefixes(cat string) ([]netip.Prefix, error) { keys, err := s.rdb.HKeys(s.ctx, s.catKey(cat)).Result() if err != nil { return nil, err } out := make([]netip.Prefix, 0, len(keys)) for _, k := range keys { if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil { out = append(out, p) } } sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) return out, nil } func (s *redisStore) ListCats() []string { // Redis doesn't list categories directly; caller should track categories elsewhere. // As a pragmatic fallback we read from a catalog file or rely on the importer to know them. // Here we just return [] and rely on the dynamic catalog in memory. return []string{} } func (s *redisStore) Count(cat string) (int, error) { n, err := s.rdb.HLen(s.ctx, s.catKey(cat)).Result() return int(n), err } func (s *redisStore) AddWhitelist(ip netip.Addr) error { return s.rdb.Set(s.ctx, s.wlKey(ip.String()), 1, 0).Err() } func (s *redisStore) IsWhitelisted(ip netip.Addr) (bool, error) { ex, err := s.rdb.Exists(s.ctx, s.wlKey(ip.String())).Result() if err != nil { return false, err } if ex > 0 { return true, nil } keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result() if err != nil { return false, err } for _, k := range keys { if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil && p.Contains(ip) { return true, nil } } return false, nil } func (s *redisStore) AddManual(ip, port, proto string) error { ipKey := ip + "/32" if err := s.rdb.HSet(s.ctx, "bl:manual", ipKey, 1).Err(); err != nil { return err } return s.rdb.SAdd(s.ctx, s.manKey(ipKey), port+"/"+proto).Err() } func (s *redisStore) ListManual() map[string][]string { keys, _ := s.rdb.HKeys(s.ctx, "bl:manual").Result() out := make(map[string][]string) for _, ip := range keys { ports, _ := s.rdb.SMembers(s.ctx, s.manKey(ip)).Result() out[ip] = ports } return out } // ── Metrics ─────────────────────────────────────────────────────────────────── type histogram struct { // prom-typische Buckets bounds []float64 // handler -> counts/buckets mu sync.Mutex counts map[string][]uint64 sum map[string]float64 n map[string]uint64 } func newHistogram(bounds []float64) *histogram { return &histogram{ bounds: bounds, counts: map[string][]uint64{}, sum: map[string]float64{}, n: map[string]uint64{}, } } func (h *histogram) observe(name string, v float64) { h.mu.Lock() defer h.mu.Unlock() if _, ok := h.counts[name]; !ok { h.counts[name] = make([]uint64, len(h.bounds)) } // Bucket suchen (<= le) for i, b := range h.bounds { if v <= b { h.counts[name][i]++ break } if i == len(h.bounds)-1 { // größer als letzter Bucket: keine extra-+Inf bucket, // Prom konform wird _count separat emittiert. } } h.sum[name] += v h.n[name]++ } type metricsT struct { // Requests reqTotal sync.Map // key: handler -> *uint64 respTotal sync.Map // key: handler|code -> *uint64 inflight uint64 // Latenzen lat *histogram // Importer importerCycles uint64 importerErrors sync.Map // key: category -> *uint64 importerLastSuccess int64 // unix seconds importerDur *histogram // Traefik Blocks traefikBlocks uint64 // Downloads downloads sync.Map // key: category -> *uint64 } var metrics = &metricsT{ lat: newHistogram([]float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}), importerDur: newHistogram([]float64{0.1, 0.25, 0.5, 1, 2, 5, 10, 20, 40, 60}), } func (m *metricsT) incReq(handler string) { v, _ := m.reqTotal.LoadOrStore(handler, new(uint64)) atomic.AddUint64(v.(*uint64), 1) } func (m *metricsT) incResp(handler string, code int) { key := fmt.Sprintf("%s|%d", handler, code) v, _ := m.respTotal.LoadOrStore(key, new(uint64)) atomic.AddUint64(v.(*uint64), 1) } func (m *metricsT) incImporterErr(cat string) { v, _ := m.importerErrors.LoadOrStore(cat, new(uint64)) atomic.AddUint64(v.(*uint64), 1) } type statusRecorder struct { http.ResponseWriter status int } func (r *statusRecorder) WriteHeader(code int) { r.status = code; r.ResponseWriter.WriteHeader(code) } // HTTP-Middleware zum Messen func (s *server) withMetrics(next http.HandlerFunc, name string) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { atomic.AddUint64(&metrics.inflight, 1) defer atomic.AddUint64(&metrics.inflight, ^uint64(0)) // -1 rec := &statusRecorder{ResponseWriter: w, status: 200} start := time.Now() next(rec, r) dur := time.Since(start).Seconds() metrics.incReq(name) metrics.incResp(name, rec.status) metrics.lat.observe(name, dur) } } // ────────────────────────────────────────────────────────────────────────────── // Catalog + Importer // ────────────────────────────────────────────────────────────────────────────── func loadCatalog() (map[string]string, error) { if importBaseURL == "" { // fallback: minimal default list return map[string]string{ "flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/", }, nil } catalogURL := strings.TrimRight(importBaseURL, "/") + "/lists.json" log.Printf("Fetching catalog from %s", catalogURL) resp, err := http.Get(catalogURL) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("catalog status: %s", resp.Status) } var m map[string]string if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { return nil, err } // also write to disk for transparency _ = os.MkdirAll("./lists", 0o755) f, _ := os.Create(filepath.Join("./lists", "lists.json")) defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") _ = enc.Encode(m) return m, nil } func normalizeLineToPrefix(s string) (string, bool) { s = strings.TrimSpace(s) if s == "" || strings.HasPrefix(s, "#") { return "", false } if !strings.Contains(s, "/") { ip := net.ParseIP(s) if ip == nil { return "", false } if ip.To4() != nil { s += "/32" } else { s += "/128" } } if _, err := netip.ParsePrefix(s); err != nil { return "", false } return s, true } func importCategory(st Store, cat, url string) (int, error) { resp, err := http.Get(url) if err != nil { return 0, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return 0, fmt.Errorf("bad status %s", resp.Status) } sc := bufio.NewScanner(resp.Body) count := 0 for sc.Scan() { if pfx, ok := normalizeLineToPrefix(sc.Text()); ok { if err := st.PutPrefix(cat, pfx); err != nil { return count, err } count++ if count%2000 == 0 { log.Printf("[%s] %d entries", cat, count) } } } return count, sc.Err() } func startImporter(st Store, cats map[string]string, srv *server) { var delay time.Duration if delayStr == "" { delay = defaultDelay } else { secs, err := strconv.Atoi(delayStr) if err != nil || secs <= 0 { log.Printf("invalid DELAY=%q, using default (%v)", delayStr, defaultDelay) } else { delay = time.Duration(secs) * time.Second } } go func() { for { start := time.Now() log.Println("Starting blocklist import...") var wg sync.WaitGroup for cat, url := range cats { cat, url := cat, url wg.Add(1) go func() { defer wg.Done() t0 := time.Now() n, err := importCategory(st, cat, url) d := time.Since(t0).Seconds() metrics.importerDur.observe(cat, d) if err != nil { metrics.incImporterErr(cat) log.Printf("❌ import %s: %v", cat, err) return } log.Printf("✅ [%s] %d entries", cat, n) }() time.Sleep(delay) } wg.Wait() srv.rebuildIndex() atomic.AddUint64(&metrics.importerCycles, 1) atomic.StoreInt64(&metrics.importerLastSuccess, time.Now().Unix()) log.Printf("Import cycle finished in %s", time.Since(start)) time.Sleep(importInterval) } }() } // ────────────────────────────────────────────────────────────────────────────── // HTTP layer // ────────────────────────────────────────────────────────────────────────────── type ipIndex struct { v4Lens []int // vorhandene Präfixlängen (z. B. 8, 16, 24, 32) v6Lens []int v4 map[int]map[string][]string // len -> "a.b.c.d/len" -> []categories v6 map[int]map[string][]string // len -> "xxxx::/len" -> []categories } type server struct { st Store catalog map[string]string // latest catalog for category list mu sync.RWMutex idx atomic.Value } func (s *server) rebuildIndex() { idx := &ipIndex{v4: map[int]map[string][]string{}, v6: map[int]map[string][]string{}} lens4 := map[int]struct{}{} lens6 := map[int]struct{}{} cats := s.cats() for _, c := range cats { pfxs, _ := s.st.ListPrefixes(c) for _, p := range pfxs { m := idx.v6 lens := lens6 if p.Addr().Is4() { m = idx.v4 lens = lens4 } l := int(p.Bits()) if m[l] == nil { m[l] = map[string][]string{} } k := p.String() m[l][k] = append(m[l][k], c) lens[l] = struct{}{} } } // manuelle Blacklist mit aufnehmen (als Kategorie "manual-blacklist") if bl, err := s.st.ListBlacklist(); err == nil { for p := range bl { m := idx.v6 lens := lens6 if p.Addr().Is4() { m = idx.v4 lens = lens4 } l := int(p.Bits()) if m[l] == nil { m[l] = map[string][]string{} } k := p.String() m[l][k] = append(m[l][k], "manual-blacklist") lens[l] = struct{}{} } } // Längen sortiert ablegen (damit Lookup nur vorhandene prüft) for l := range lens4 { idx.v4Lens = append(idx.v4Lens, l) } for l := range lens6 { idx.v6Lens = append(idx.v6Lens, l) } sort.Ints(idx.v4Lens) sort.Ints(idx.v6Lens) s.idx.Store(idx) } func (s *server) cats() []string { // prefer catalog keys if present s.mu.RLock() defer s.mu.RUnlock() if s.catalog != nil { keys := make([]string, 0, len(s.catalog)) for k := range s.catalog { keys = append(keys, k) } sort.Strings(keys) return keys } return s.st.ListCats() } func clientIPFromHeaders(r *http.Request) (netip.Addr, error) { if xff := r.Header.Get("X-Forwarded-For"); xff != "" { parts := strings.Split(xff, ",") s := strings.TrimSpace(parts[0]) if a, err := netip.ParseAddr(s); err == nil { return a.Unmap(), nil } } if xr := r.Header.Get("X-Real-Ip"); xr != "" { if a, err := netip.ParseAddr(strings.TrimSpace(xr)); err == nil { return a.Unmap(), nil } } host, _, err := net.SplitHostPort(r.RemoteAddr) if err == nil { if a, err := netip.ParseAddr(host); err == nil { return a.Unmap(), nil } } return netip.Addr{}, errors.New("cannot determine client ip") } func (s *server) handleCheck(w http.ResponseWriter, r *http.Request) { ipStr := strings.TrimPrefix(r.URL.Path, "/check/") ip, err := netip.ParseAddr(ipStr) if err != nil { http.Error(w, "invalid IP", http.StatusBadRequest) return } matches, _ := s.checkIP(ip, s.cats()) writeJSON(w, map[string]any{ "ip": ip.String(), "blocked": len(matches) > 0, "categories": matches, }) } func (s *server) handleTraefik(w http.ResponseWriter, r *http.Request) { ip, err := clientIPFromHeaders(r) if err != nil { http.Error(w, "invalid IP", http.StatusBadRequest) return } matches, _ := s.checkIP(ip, s.cats()) if len(matches) > 0 { atomic.AddUint64(&metrics.traefikBlocks, 1) // NEW errorhtml(w, r) return } w.WriteHeader(http.StatusOK) w.Write([]byte("OK")) } func errorhtml(w http.ResponseWriter, r *http.Request) { html := `
Your connection attempt to the target server was blocked by the First-Line-Of-Defense-Project. Your IP address is listed on at least one blacklist.
Browser
Functional
Security-Gateway
Blocked your request
Origin-Server
Functional
No Request
-`)) } func writeJSON(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(v) } func (s *server) handleBlacklistRoot(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodGet: s.handleBlacklistList(w, r) case http.MethodPost: s.handleBlacklistAdd(w, r) default: http.Error(w, "method not allowed", http.StatusMethodNotAllowed) } } func (s *server) handleBlacklistAdd(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } var body struct { Prefix string `json:"prefix"` // "1.2.3.4" oder "1.2.3.0/24" etc. Reason string `json:"reason"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { http.Error(w, "bad request", http.StatusBadRequest) return } norm, ok := normalizeLineToPrefix(strings.TrimSpace(body.Prefix)) if !ok { http.Error(w, "invalid prefix/ip", http.StatusBadRequest) return } pfx, _ := netip.ParsePrefix(norm) if err := s.st.AddBlacklist(pfx, body.Reason); err != nil { http.Error(w, "store error", http.StatusInternalServerError) return } s.rebuildIndex() writeJSON(w, map[string]any{"status": "blacklisted", "prefix": pfx.String(), "reason": strings.TrimSpace(body.Reason)}) } func (s *server) handleBlacklistDel(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodDelete { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } raw := strings.TrimPrefix(r.URL.Path, "/blacklist/") norm, ok := normalizeLineToPrefix(strings.TrimSpace(raw)) if !ok { http.Error(w, "invalid prefix/ip", http.StatusBadRequest) return } pfx, _ := netip.ParsePrefix(norm) if err := s.st.DelBlacklist(pfx); err != nil { http.Error(w, "store error", http.StatusInternalServerError) return } s.rebuildIndex() writeJSON(w, map[string]any{"status": "removed", "prefix": pfx.String()}) } func (s *server) handleBlacklistList(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } m, err := s.st.ListBlacklist() if err != nil { http.Error(w, "store error", http.StatusInternalServerError) return } type entry struct{ Prefix, Reason string } var list []entry for p, reason := range m { list = append(list, entry{Prefix: p.String(), Reason: reason}) } sort.Slice(list, func(i, j int) bool { return list[i].Prefix < list[j].Prefix }) writeJSON(w, map[string]any{"entries": list}) } func (s *server) checkIP(ip netip.Addr, _ []string) ([]string, error) { if ok, _ := s.st.IsWhitelisted(ip); ok { return nil, nil } idx := s.idx.Load().(*ipIndex) var lens []int var m map[int]map[string][]string if ip.Is4() { lens, m = idx.v4Lens, idx.v4 } else { lens, m = idx.v6Lens, idx.v6 } seen := map[string]struct{}{} var matches []string for _, l := range lens { p := netip.PrefixFrom(ip, l).String() if cats, ok := m[l][p]; ok { for _, c := range cats { if _, dup := seen[c]; !dup { seen[c] = struct{}{} matches = append(matches, c) } } } } return matches, nil } // ────────────────────────────────────────────────────────────────────────────── // Honeypot (optional) // ────────────────────────────────────────────────────────────────────────────── func startTCPListener(st Store, network, port string) { ln, err := net.Listen(network, ":"+port) if err != nil { log.Printf("❌ could not listen on %s/%s: %v", network, port, err) return } log.Printf("🚀 TCP Honeypot on %s/%s", network, port) for { conn, err := ln.Accept() if err != nil { log.Printf("accept err: %v", err) continue } ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) _ = st.AddManual(ip, port, "TCP") conn.Close() } } func startUDPListener(st Store, network, port string) { addr := net.UDPAddr{Port: atoi(port)} conn, err := net.ListenUDP(network, &addr) if err != nil { log.Printf("❌ could not listen on %s/%s: %v", network, port, err) return } log.Printf("🚀 UDP Honeypot on %s/%s", network, port) buf := make([]byte, 1024) for { n, ra, err := conn.ReadFromUDP(buf) if err != nil { log.Printf("udp read err: %v", err) continue } if n > 0 { _ = st.AddManual(ra.IP.String(), port, "UDP") } } } func atoi(s string) int { n, _ := strconvAtoiSafe(s) return n } func strconvAtoiSafe(s string) (int, error) { var n int for _, r := range s { if r < '0' || r > '9' { return 0, fmt.Errorf("bad int %q", s) } n = n*10 + int(r-'0') } return n, nil } // ────────────────────────────────────────────────────────────────────────────── // main // ────────────────────────────────────────────────────────────────────────────── func main() { // choose store var st Store switch strings.ToLower(storeKind) { case "redis": rs, err := newRedisStore() if err != nil { log.Fatalf("redis: %v", err) } st = rs default: st = newMemStore() } // fetch catalog and start importer cat, err := loadCatalog() if err != nil { log.Printf("catalog error: %v (using fallback FLOD only)", err) cat = map[string]string{ "flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/lists.json", } } srv := &server{st: st, catalog: cat} startImporter(st, cat, srv) // honeypot listeners if enableHoneypot { for _, p := range honeyTCP { go startTCPListener(st, "tcp4", p) go startTCPListener(st, "tcp6", p) } for _, p := range honeyUDP { go startUDPListener(st, "udp4", p) go startUDPListener(st, "udp6", p) } } // routes http.HandleFunc("/", srv.withMetrics(srv.handleGUI, "gui")) http.HandleFunc("/check/", srv.withMetrics(srv.handleCheck, "check")) http.HandleFunc("/traefik", srv.withMetrics(srv.handleTraefik, "traefik")) http.HandleFunc("/whitelist", srv.withMetrics(srv.handleWhitelist, "whitelist")) http.HandleFunc("/download/", srv.withMetrics(srv.handleDownload, "download")) http.HandleFunc("/metrics", srv.handleMetrics) // selbst enthält keine eigene Messung //http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistAdd, "blacklist_add")) // POST /*http.HandleFunc("/blacklist/", func(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodDelete: srv.withMetrics(srv.handleBlacklistDel, "blacklist_del")(w, r) case http.MethodGet: // erlauben: GET /blacklist (Liste) if r.URL.Path == "/blacklist/" || r.URL.Path == "/blacklist" { srv.withMetrics(srv.handleBlacklistList, "blacklist_list")(w, r) return } http.NotFound(w, r) default: http.Error(w, "method not allowed", http.StatusMethodNotAllowed) } })*/ http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistRoot, "blacklist_root")) // GET & POST http.HandleFunc("/blacklist/", srv.withMetrics(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/blacklist/" && r.Method == http.MethodGet { srv.handleBlacklistList(w, r) return } if r.Method == http.MethodDelete { srv.handleBlacklistDel(w, r) return } http.Error(w, "not found", http.StatusNotFound) }, "blacklist_misc")) log.Printf("listening on %s (store=%s, honeypot=%v)", listenAddr, storeKind, enableHoneypot) if err := http.ListenAndServe(listenAddr, nil); err != nil { log.Fatal(err) } }