diff --git a/.gitea/workflows/registry.yml b/.gitea/workflows/registry.yml new file mode 100644 index 0000000..20912ac --- /dev/null +++ b/.gitea/workflows/registry.yml @@ -0,0 +1,51 @@ +name: release-tag +on: + push: + branches: + - 'main' +jobs: + release-image: + runs-on: ubuntu-fast + env: + DOCKER_ORG: ${{ vars.DOCKER_ORG }} + DOCKER_LATEST: latest + RUNNER_TOOL_CACHE: /toolcache + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker BuildX + uses: docker/setup-buildx-action@v2 + with: # replace it with your local IP + config-inline: | + [registry."${{ vars.DOCKER_REGISTRY }}"] + http = true + insecure = true + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + registry: ${{ vars.DOCKER_REGISTRY }} # replace it with your local IP + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Get Meta + id: meta + run: | + echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT + echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + platforms: | + linux/amd64 + push: true + tags: | # replace it with your local IP and tags + ${{ vars.DOCKER_REGISTRY }}/${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }} + ${{ vars.DOCKER_REGISTRY }}/${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..551afd6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,24 @@ +# -------- Dockerfile (Multi-Stage Build) -------- +# 1. Builder-Stage +FROM golang:1.25-alpine AS builder + +WORKDIR /app +COPY go.* ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /bin/flodhome + +# 2. Runtime-Stage +FROM alpine:3.22 + +# HTTPS-Callouts in Alpine brauchen ca-certificates +RUN apk add --no-cache ca-certificates +RUN mkdir /lists + +COPY --from=builder /bin/flodhome /bin/flodhome +# Default listens on :8080 – siehe main.go +EXPOSE 8080 + +VOLUME /data + +ENTRYPOINT ["/bin/flodhome"] diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..b1e72d0 --- /dev/null +++ b/go.mod @@ -0,0 +1,10 @@ +module git.send.nrw/sendnrw/flod-home + +go 1.25.3 + +require github.com/redis/go-redis/v9 v9.16.0 + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..e578cfc --- /dev/null +++ b/go.sum @@ -0,0 +1,10 @@ +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/redis/go-redis/v9 v9.16.0 h1:OotgqgLSRCmzfqChbQyG1PHC3tLNR89DG4jdOERSEP4= +github.com/redis/go-redis/v9 v9.16.0/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= diff --git a/main.go b/main.go new file mode 100644 index 0000000..46a4c16 --- /dev/null +++ b/main.go @@ -0,0 +1,1479 @@ +package main + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net" + "net/http" + "net/netip" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/redis/go-redis/v9" +) + +// ────────────────────────────────────────────────────────────────────────────── +// Config via ENV +// ────────────────────────────────────────────────────────────────────────────── + +func getenv(key, def string) string { + if v := os.Getenv(key); v != "" { + return v + } + return def +} + +var ( + listenAddr = getenv("LISTEN_ADDR", ":8080") + storeKind = getenv("STORE", "memory") // "memory" or "redis" + importBaseURL = getenv("FLOD_IMPORT_URL", "https://git.send.nrw/sendnrw/flod-lists/raw/branch/main/") + importInterval = mustParseDuration(getenv("IMPORT_INTERVAL", "30m")) + enableHoneypot = strings.EqualFold(getenv("HONEYPOT", "off"), "on") + honeyTCP = splitCSV(getenv("HONEY_TCP", "135,139,445,389,636,3268,3269,88,3389")) + honeyUDP = splitCSV(getenv("HONEY_UDP", "135,137,138,389,3389,88")) + dataDir = getenv("DATA_DIR", "/data") +) + +func splitCSV(s string) []string { + var out []string + for _, p := range strings.Split(s, ",") { + p = strings.TrimSpace(p) + if p != "" { + out = append(out, p) + } + } + return out +} + +func mustParseDuration(s string) time.Duration { + d, err := time.ParseDuration(s) + if err != nil { + log.Fatalf("bad IMPORT_INTERVAL %q: %v", s, err) + } + return d +} + +// ────────────────────────────────────────────────────────────────────────────── +// Storage Abstraction +// ────────────────────────────────────────────────────────────────────────────── + +type Store interface { + // Blocklists + PutPrefix(cat string, prefix string) error + ListPrefixes(cat string) ([]netip.Prefix, error) + ListCats() []string + Count(cat string) (int, error) + + // Whitelist + AddWhitelist(ip netip.Addr) error + IsWhitelisted(ip netip.Addr) (bool, error) + WhitelistCount() (int, error) + AddWhitelistPrefix(pfx netip.Prefix) error + RemoveWhitelistPrefix(pfx netip.Prefix) error + ListWhitelistPrefixes() ([]netip.Prefix, error) + + // Manual hits + AddManual(ip string, port string, proto string) error + ListManual() map[string][]string + ManualUniqueIPs() (int, error) + + AddBlacklist(pfx netip.Prefix, reason string) error + DelBlacklist(pfx netip.Prefix) error + ListBlacklist() (map[netip.Prefix]string, error) // Prefix -> Reason + CountBlacklist() (int, error) +} + +// In-memory implementation (with simple persistence to ./data/*.txt on export if desired) +type memStore struct { + mu sync.RWMutex + byCat map[string]map[string]struct{} // cat -> "prefix" set + whitelist map[string]struct{} // ip string + wlPrefixes map[string]struct{} // "ip/net" + manualHits map[string][]string // ip/32 -> ["port/proto", ...] + blacklist map[string]string // prefix string -> reason +} + +func (m *memStore) WhitelistCount() (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.whitelist), nil +} + +func (m *memStore) ManualUniqueIPs() (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.manualHits), nil +} + +func (m *memStore) blFile() string { return filepath.Join(dataDir, "blacklist.json") } + +func (m *memStore) loadBlacklistFromDisk() { + f, err := os.Open(m.blFile()) + if err != nil { + return + } + defer f.Close() + var tmp map[string]string + if err := json.NewDecoder(f).Decode(&tmp); err == nil { + m.blacklist = tmp + } +} + +func (m *memStore) saveBlacklistToDisk() { + tmp := make(map[string]string, len(m.blacklist)) + for k, v := range m.blacklist { + tmp[k] = v + } + + b, _ := json.MarshalIndent(tmp, "", " ") + tmpPath := m.blFile() + ".tmp" + _ = os.WriteFile(tmpPath, b, 0o644) + _ = os.Rename(tmpPath, m.blFile()) +} + +func (m *memStore) AddBlacklist(pfx netip.Prefix, reason string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.blacklist[pfx.String()] = strings.TrimSpace(reason) + m.saveBlacklistToDisk() + return nil +} + +func (m *memStore) DelBlacklist(pfx netip.Prefix) error { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.blacklist, pfx.String()) + m.saveBlacklistToDisk() + return nil +} + +func (m *memStore) ListBlacklist() (map[netip.Prefix]string, error) { + m.mu.RLock() + defer m.mu.RUnlock() + out := make(map[netip.Prefix]string, len(m.blacklist)) + for k, v := range m.blacklist { + if p, err := netip.ParsePrefix(k); err == nil { + out[p] = v + } + } + return out, nil +} + +func (m *memStore) CountBlacklist() (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.blacklist), nil +} + +func newMemStore() *memStore { + m := &memStore{ + byCat: map[string]map[string]struct{}{}, + whitelist: map[string]struct{}{}, + wlPrefixes: map[string]struct{}{}, + manualHits: map[string][]string{}, + blacklist: map[string]string{}, + } + _ = os.MkdirAll(dataDir, 0o755) + m.loadBlacklistFromDisk() + m.loadWhitelistFromDisk() + m.loadWhitelistPrefixesFromDisk() + return m +} + +func (m *memStore) wlFile() string { + return filepath.Join(dataDir, "whitelist.json") +} + +func (m *memStore) wlPrefixesFile() string { return filepath.Join(dataDir, "whitelist_prefixes.json") } + +func (m *memStore) loadWhitelistPrefixesFromDisk() { + f, err := os.Open(m.wlPrefixesFile()) + if err != nil { + return + } + defer f.Close() + var tmp []string + if err := json.NewDecoder(f).Decode(&tmp); err == nil { + for _, s := range tmp { + if _, err := netip.ParsePrefix(s); err == nil { + m.wlPrefixes[s] = struct{}{} + } + } + } +} +func (m *memStore) saveWhitelistPrefixesToDisk() { + m.mu.RLock() + keys := make([]string, 0, len(m.wlPrefixes)) + for k := range m.wlPrefixes { + keys = append(keys, k) + } + m.mu.RUnlock() + sort.Strings(keys) + b, _ := json.MarshalIndent(keys, "", " ") + tmp := m.wlPrefixesFile() + ".tmp" + _ = os.WriteFile(tmp, b, 0o644) + _ = os.Rename(tmp, m.wlPrefixesFile()) +} + +func (m *memStore) AddWhitelistPrefix(pfx netip.Prefix) error { + m.mu.Lock() + m.wlPrefixes[pfx.String()] = struct{}{} + m.mu.Unlock() + m.saveWhitelistPrefixesToDisk() + return nil +} +func (m *memStore) RemoveWhitelistPrefix(pfx netip.Prefix) error { + m.mu.Lock() + delete(m.wlPrefixes, pfx.String()) + m.mu.Unlock() + m.saveWhitelistPrefixesToDisk() + return nil +} +func (m *memStore) ListWhitelistPrefixes() ([]netip.Prefix, error) { + m.mu.RLock() + defer m.mu.RUnlock() + out := make([]netip.Prefix, 0, len(m.wlPrefixes)) + for k := range m.wlPrefixes { + if p, err := netip.ParsePrefix(k); err == nil { + out = append(out, p) + } + } + sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) + return out, nil +} + +func (m *memStore) loadWhitelistFromDisk() { + f, err := os.Open(m.wlFile()) + if err != nil { + return // Datei existiert noch nicht – ok + } + defer f.Close() + var tmp []string + if err := json.NewDecoder(f).Decode(&tmp); err == nil { + for _, ip := range tmp { + if strings.TrimSpace(ip) != "" { + m.whitelist[ip] = struct{}{} + } + } + } +} + +func (m *memStore) saveWhitelistToDisk() { + // stabile Reihenfolge ist nice-to-have + m.mu.RLock() + keys := make([]string, 0, len(m.whitelist)) + for k := range m.whitelist { + keys = append(keys, k) + } + m.mu.RUnlock() + sort.Strings(keys) + + b, _ := json.MarshalIndent(keys, "", " ") + tmp := m.wlFile() + ".tmp" + _ = os.WriteFile(tmp, b, 0o644) + _ = os.Rename(tmp, m.wlFile()) +} + +func (m *memStore) ensureCat(cat string) { + if _, ok := m.byCat[cat]; !ok { + m.byCat[cat] = map[string]struct{}{} + } +} + +func (m *memStore) PutPrefix(cat string, prefix string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.ensureCat(cat) + m.byCat[cat][prefix] = struct{}{} + return nil +} + +func (m *memStore) ListPrefixes(cat string) ([]netip.Prefix, error) { + m.mu.RLock() + defer m.mu.RUnlock() + set := m.byCat[cat] + out := make([]netip.Prefix, 0, len(set)) + for k := range set { + if p, err := netip.ParsePrefix(k); err == nil { + out = append(out, p) + } + } + // stable order + sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) + return out, nil +} + +func (m *memStore) ListCats() []string { + m.mu.RLock() + defer m.mu.RUnlock() + out := make([]string, 0, len(m.byCat)) + for c := range m.byCat { + out = append(out, c) + } + sort.Strings(out) + return out +} + +func (m *memStore) Count(cat string) (int, error) { + m.mu.RLock() + defer m.mu.RUnlock() + return len(m.byCat[cat]), nil +} + +func (m *memStore) AddWhitelist(ip netip.Addr) error { + m.mu.Lock() + defer m.mu.Unlock() + m.whitelist[ip.String()] = struct{}{} + m.mu.Unlock() + m.saveWhitelistToDisk() + return nil +} + +func (m *memStore) IsWhitelisted(ip netip.Addr) (bool, error) { + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.whitelist[ip.String()]; ok { + return true, nil + } + for k := range m.wlPrefixes { + if p, err := netip.ParsePrefix(k); err == nil && p.Contains(ip) { + return true, nil + } + } + return false, nil +} + +func (m *memStore) AddManual(ip, port, proto string) error { + m.mu.Lock() + defer m.mu.Unlock() + key := ip + "/32" + m.manualHits[key] = append(m.manualHits[key], port+"/"+proto) + return nil +} + +func (m *memStore) ListManual() map[string][]string { + m.mu.RLock() + defer m.mu.RUnlock() + out := make(map[string][]string, len(m.manualHits)) + for k, v := range m.manualHits { + dst := make([]string, len(v)) + copy(dst, v) + out[k] = dst + } + return out +} + +// Redis implementation (optional) +type redisStore struct { + rdb *redis.Client + ctx context.Context +} + +func newRedisStore() (*redisStore, error) { + addr := getenv("REDIS_ADDR", "localhost:6379") + user := os.Getenv("REDIS_USER") + pass := os.Getenv("REDIS_PASS") + rdb := redis.NewClient(&redis.Options{Addr: addr, Username: user, Password: pass, DB: 0}) + ctx := context.Background() + if err := rdb.Ping(ctx).Err(); err != nil { + return nil, err + } + return &redisStore{rdb: rdb, ctx: ctx}, nil +} + +func (s *redisStore) catKey(cat string) string { return "bl:" + cat } +func (s *redisStore) wlKey(ip string) string { return "wl:" + ip } +func (s *redisStore) wlPrefKey() string { return "wl:prefixes" } +func (s *redisStore) manKey(ip string) string { return "bl:manual:" + ip } + +func (s *redisStore) AddWhitelistPrefix(pfx netip.Prefix) error { + return s.rdb.HSet(s.ctx, s.wlPrefKey(), pfx.String(), 1).Err() +} +func (s *redisStore) RemoveWhitelistPrefix(pfx netip.Prefix) error { + return s.rdb.HDel(s.ctx, s.wlPrefKey(), pfx.String()).Err() +} +func (s *redisStore) ListWhitelistPrefixes() ([]netip.Prefix, error) { + keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result() + if err != nil { + return nil, err + } + out := make([]netip.Prefix, 0, len(keys)) + for _, k := range keys { + if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil { + out = append(out, p) + } + } + sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) + return out, nil +} + +func (s *redisStore) WhitelistCount() (int, error) { + // wl:* keys zählen (SCAN statt KEYS) + var cursor uint64 + total := 0 + for { + keys, c, err := s.rdb.Scan(s.ctx, cursor, "wl:*", 1000).Result() + if err != nil { + return 0, err + } + total += len(keys) + cursor = c + if cursor == 0 { + break + } + } + return total, nil +} + +func (s *redisStore) ManualUniqueIPs() (int, error) { + n, err := s.rdb.HLen(s.ctx, "bl:manual").Result() + return int(n), err +} + +func (s *redisStore) PutPrefix(cat string, prefix string) error { + return s.rdb.HSet(s.ctx, s.catKey(cat), prefix, 1).Err() +} + +func (s *redisStore) blKey() string { return "bl:manual:blacklist" } + +func (s *redisStore) AddBlacklist(pfx netip.Prefix, reason string) error { + return s.rdb.HSet(s.ctx, s.blKey(), pfx.String(), strings.TrimSpace(reason)).Err() +} +func (s *redisStore) DelBlacklist(pfx netip.Prefix) error { + return s.rdb.HDel(s.ctx, s.blKey(), pfx.String()).Err() +} +func (s *redisStore) ListBlacklist() (map[netip.Prefix]string, error) { + m, err := s.rdb.HGetAll(s.ctx, s.blKey()).Result() + if err != nil { + return nil, err + } + out := make(map[netip.Prefix]string, len(m)) + for k, v := range m { + if p, err := netip.ParsePrefix(k); err == nil { + out[p] = v + } + } + return out, nil +} +func (s *redisStore) CountBlacklist() (int, error) { + n, err := s.rdb.HLen(s.ctx, s.blKey()).Result() + return int(n), err +} + +func (s *redisStore) ListPrefixes(cat string) ([]netip.Prefix, error) { + keys, err := s.rdb.HKeys(s.ctx, s.catKey(cat)).Result() + if err != nil { + return nil, err + } + out := make([]netip.Prefix, 0, len(keys)) + for _, k := range keys { + if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil { + out = append(out, p) + } + } + sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() }) + return out, nil +} + +func (s *redisStore) ListCats() []string { + // Redis doesn't list categories directly; caller should track categories elsewhere. + // As a pragmatic fallback we read from a catalog file or rely on the importer to know them. + // Here we just return [] and rely on the dynamic catalog in memory. + return []string{} +} + +func (s *redisStore) Count(cat string) (int, error) { + n, err := s.rdb.HLen(s.ctx, s.catKey(cat)).Result() + return int(n), err +} + +func (s *redisStore) AddWhitelist(ip netip.Addr) error { + return s.rdb.Set(s.ctx, s.wlKey(ip.String()), 1, 0).Err() +} + +func (s *redisStore) IsWhitelisted(ip netip.Addr) (bool, error) { + ex, err := s.rdb.Exists(s.ctx, s.wlKey(ip.String())).Result() + if err != nil { + return false, err + } + if ex > 0 { + return true, nil + } + + keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result() + if err != nil { + return false, err + } + for _, k := range keys { + if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil && p.Contains(ip) { + return true, nil + } + } + return false, nil +} + +func (s *redisStore) AddManual(ip, port, proto string) error { + ipKey := ip + "/32" + if err := s.rdb.HSet(s.ctx, "bl:manual", ipKey, 1).Err(); err != nil { + return err + } + return s.rdb.SAdd(s.ctx, s.manKey(ipKey), port+"/"+proto).Err() +} + +func (s *redisStore) ListManual() map[string][]string { + keys, _ := s.rdb.HKeys(s.ctx, "bl:manual").Result() + out := make(map[string][]string) + for _, ip := range keys { + ports, _ := s.rdb.SMembers(s.ctx, s.manKey(ip)).Result() + out[ip] = ports + } + return out +} + +// ── Metrics ─────────────────────────────────────────────────────────────────── + +type histogram struct { + // prom-typische Buckets + bounds []float64 + // handler -> counts/buckets + mu sync.Mutex + counts map[string][]uint64 + sum map[string]float64 + n map[string]uint64 +} + +func newHistogram(bounds []float64) *histogram { + return &histogram{ + bounds: bounds, + counts: map[string][]uint64{}, + sum: map[string]float64{}, + n: map[string]uint64{}, + } +} +func (h *histogram) observe(name string, v float64) { + h.mu.Lock() + defer h.mu.Unlock() + if _, ok := h.counts[name]; !ok { + h.counts[name] = make([]uint64, len(h.bounds)) + } + // Bucket suchen (<= le) + for i, b := range h.bounds { + if v <= b { + h.counts[name][i]++ + break + } + if i == len(h.bounds)-1 { + // größer als letzter Bucket: keine extra-+Inf bucket, + // Prom konform wird _count separat emittiert. + } + } + h.sum[name] += v + h.n[name]++ +} + +type metricsT struct { + // Requests + reqTotal sync.Map // key: handler -> *uint64 + respTotal sync.Map // key: handler|code -> *uint64 + inflight uint64 + + // Latenzen + lat *histogram + + // Importer + importerCycles uint64 + importerErrors sync.Map // key: category -> *uint64 + importerLastSuccess int64 // unix seconds + importerDur *histogram + + // Traefik Blocks + traefikBlocks uint64 + + // Downloads + downloads sync.Map // key: category -> *uint64 +} + +var metrics = &metricsT{ + lat: newHistogram([]float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}), + importerDur: newHistogram([]float64{0.1, 0.25, 0.5, 1, 2, 5, 10, 20, 40, 60}), +} + +func (m *metricsT) incReq(handler string) { + v, _ := m.reqTotal.LoadOrStore(handler, new(uint64)) + atomic.AddUint64(v.(*uint64), 1) +} +func (m *metricsT) incResp(handler string, code int) { + key := fmt.Sprintf("%s|%d", handler, code) + v, _ := m.respTotal.LoadOrStore(key, new(uint64)) + atomic.AddUint64(v.(*uint64), 1) +} +func (m *metricsT) incImporterErr(cat string) { + v, _ := m.importerErrors.LoadOrStore(cat, new(uint64)) + atomic.AddUint64(v.(*uint64), 1) +} + +type statusRecorder struct { + http.ResponseWriter + status int +} + +func (r *statusRecorder) WriteHeader(code int) { r.status = code; r.ResponseWriter.WriteHeader(code) } + +// HTTP-Middleware zum Messen +func (s *server) withMetrics(next http.HandlerFunc, name string) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint64(&metrics.inflight, 1) + defer atomic.AddUint64(&metrics.inflight, ^uint64(0)) // -1 + + rec := &statusRecorder{ResponseWriter: w, status: 200} + start := time.Now() + next(rec, r) + dur := time.Since(start).Seconds() + + metrics.incReq(name) + metrics.incResp(name, rec.status) + metrics.lat.observe(name, dur) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Catalog + Importer +// ────────────────────────────────────────────────────────────────────────────── + +func loadCatalog() (map[string]string, error) { + if importBaseURL == "" { + // fallback: minimal default list + return map[string]string{ + "flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/", + }, nil + } + catalogURL := strings.TrimRight(importBaseURL, "/") + "/lists.json" + log.Printf("Fetching catalog from %s", catalogURL) + resp, err := http.Get(catalogURL) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("catalog status: %s", resp.Status) + } + var m map[string]string + if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { + return nil, err + } + // also write to disk for transparency + _ = os.MkdirAll("./lists", 0o755) + f, _ := os.Create(filepath.Join("./lists", "lists.json")) + defer f.Close() + enc := json.NewEncoder(f) + enc.SetIndent("", " ") + _ = enc.Encode(m) + return m, nil +} + +func normalizeLineToPrefix(s string) (string, bool) { + s = strings.TrimSpace(s) + if s == "" || strings.HasPrefix(s, "#") { + return "", false + } + if !strings.Contains(s, "/") { + ip := net.ParseIP(s) + if ip == nil { + return "", false + } + if ip.To4() != nil { + s += "/32" + } else { + s += "/128" + } + } + if _, err := netip.ParsePrefix(s); err != nil { + return "", false + } + return s, true +} + +func importCategory(st Store, cat, url string) (int, error) { + resp, err := http.Get(url) + if err != nil { + return 0, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return 0, fmt.Errorf("bad status %s", resp.Status) + } + sc := bufio.NewScanner(resp.Body) + count := 0 + for sc.Scan() { + if pfx, ok := normalizeLineToPrefix(sc.Text()); ok { + if err := st.PutPrefix(cat, pfx); err != nil { + return count, err + } + count++ + if count%2000 == 0 { + log.Printf("[%s] %d entries", cat, count) + } + } + } + return count, sc.Err() +} + +func startImporter(st Store, cats map[string]string) { + go func() { + for { + start := time.Now() + log.Println("Starting blocklist import...") + var wg sync.WaitGroup + for cat, url := range cats { + cat, url := cat, url + wg.Add(1) + go func() { + defer wg.Done() + t0 := time.Now() + n, err := importCategory(st, cat, url) + d := time.Since(t0).Seconds() + metrics.importerDur.observe(cat, d) + if err != nil { + metrics.incImporterErr(cat) + log.Printf("❌ import %s: %v", cat, err) + return + } + log.Printf("✅ [%s] %d entries", cat, n) + }() + } + wg.Wait() + atomic.AddUint64(&metrics.importerCycles, 1) + atomic.StoreInt64(&metrics.importerLastSuccess, time.Now().Unix()) + log.Printf("Import cycle finished in %s", time.Since(start)) + time.Sleep(importInterval) + } + }() +} + +// ────────────────────────────────────────────────────────────────────────────── +// HTTP layer +// ────────────────────────────────────────────────────────────────────────────── + +type server struct { + st Store + catalog map[string]string // latest catalog for category list + mu sync.RWMutex +} + +func (s *server) cats() []string { + // prefer catalog keys if present + s.mu.RLock() + defer s.mu.RUnlock() + if s.catalog != nil { + keys := make([]string, 0, len(s.catalog)) + for k := range s.catalog { + keys = append(keys, k) + } + sort.Strings(keys) + return keys + } + return s.st.ListCats() +} + +func clientIPFromHeaders(r *http.Request) (netip.Addr, error) { + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + parts := strings.Split(xff, ",") + s := strings.TrimSpace(parts[0]) + if a, err := netip.ParseAddr(s); err == nil { + return a.Unmap(), nil + } + } + if xr := r.Header.Get("X-Real-Ip"); xr != "" { + if a, err := netip.ParseAddr(strings.TrimSpace(xr)); err == nil { + return a.Unmap(), nil + } + } + host, _, err := net.SplitHostPort(r.RemoteAddr) + if err == nil { + if a, err := netip.ParseAddr(host); err == nil { + return a.Unmap(), nil + } + } + return netip.Addr{}, errors.New("cannot determine client ip") +} + +func (s *server) handleCheck(w http.ResponseWriter, r *http.Request) { + ipStr := strings.TrimPrefix(r.URL.Path, "/check/") + ip, err := netip.ParseAddr(ipStr) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + matches, _ := s.checkIP(ip, s.cats()) + writeJSON(w, map[string]any{ + "ip": ip.String(), + "blocked": len(matches) > 0, + "categories": matches, + }) +} + +func (s *server) handleTraefik(w http.ResponseWriter, r *http.Request) { + ip, err := clientIPFromHeaders(r) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + matches, _ := s.checkIP(ip, s.cats()) + if len(matches) > 0 { + atomic.AddUint64(&metrics.traefikBlocks, 1) // NEW + errorhtml(w, r) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) +} + +func errorhtml(w http.ResponseWriter, r *http.Request) { + html := ` + + + + + Blocked by the First-Line-Of-Defense-Project + + + +
+
+

You have been blocked by the First-Line-Of-Defense-Project

+ ERROR 403 Forbidden +
+

+ Your connection attempt to the target server was blocked by the First-Line-Of-Defense-Project. Your IP address is listed on at least one blacklist. +

+ +
+
+ +

You

+

Browser

+

Functional

+
+ + +
+ +

FLODP-SERVICE

+

Security-Gateway

+

Blocked your request

+
+ + +
+ +

Host

+

Origin-Server

+

Functional

+
+
+ +
+ + +
+ +
+ Technical details + +

Tips: Check if your system (browser, API, or similar) has a high connection frequency and has been blocked on other systems protected by FLODP.

+
+ + +
+ + + +` + + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(http.StatusForbidden) + _, _ = w.Write([]byte(html)) +} + +func (s *server) handleWhitelist(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + var body struct { + Entry string `json:"ip"` // rückwärtskompatibel: Feld heißt weiter "ip" + // optional: "entry" wäre semantisch besser; hier bleiben wir kompatibel + } + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + sRaw := strings.TrimSpace(body.Entry) + + // 1) Einzel-IP? + if addr, err := netip.ParseAddr(sRaw); err == nil { + if err := s.st.AddWhitelist(addr.Unmap()); err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + writeJSON(w, map[string]any{"status": "whitelisted_ip", "ip": addr.Unmap().String()}) + return + } + + // 2) Prefix? + if norm, ok := normalizeLineToPrefix(sRaw); ok { + if pfx, err := netip.ParsePrefix(norm); err == nil { + if err := s.st.AddWhitelistPrefix(pfx); err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + writeJSON(w, map[string]any{"status": "whitelisted_prefix", "prefix": pfx.String()}) + return + } + } + + http.Error(w, "invalid IP or prefix", http.StatusBadRequest) +} + +func (s *server) handleDownload(w http.ResponseWriter, r *http.Request) { + cat := strings.TrimPrefix(r.URL.Path, "/download/") + if cat == "" { + http.Error(w, "category missing", http.StatusBadRequest) + return + } + // validate category if catalog is present + s.mu.RLock() + _, known := s.catalog[cat] + s.mu.RUnlock() + if s.catalog != nil && !known { + http.Error(w, "unknown category", http.StatusNotFound) + return + } + prefixes, err := s.st.ListPrefixes(cat) + if err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + v, _ := metrics.downloads.LoadOrStore(cat, new(uint64)) + atomic.AddUint64(v.(*uint64), 1) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.txt\"", cat)) + for _, p := range prefixes { + fmt.Fprintln(w, p.String()) + } +} + +func (s *server) handleMetrics(w http.ResponseWriter, r *http.Request) { + var buf bytes.Buffer + + // Requests & Inflight + buf.WriteString("# HELP ipcheck_requests_total Total HTTP requests by handler\n") + buf.WriteString("# TYPE ipcheck_requests_total counter\n") + metrics.reqTotal.Range(func(k, v any) bool { + fmt.Fprintf(&buf, "ipcheck_requests_total{handler=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64))) + return true + }) + + buf.WriteString("# HELP ipcheck_http_responses_total HTTP responses by handler and code\n") + buf.WriteString("# TYPE ipcheck_http_responses_total counter\n") + metrics.respTotal.Range(func(k, v any) bool { + parts := strings.SplitN(k.(string), "|", 2) + handler, code := parts[0], parts[1] + fmt.Fprintf(&buf, "ipcheck_http_responses_total{handler=%q,code=%q} %d\n", handler, code, atomic.LoadUint64(v.(*uint64))) + return true + }) + + buf.WriteString("# HELP ipcheck_requests_inflight Inflight HTTP requests\n") + buf.WriteString("# TYPE ipcheck_requests_inflight gauge\n") + fmt.Fprintf(&buf, "ipcheck_requests_inflight %d\n", atomic.LoadUint64(&metrics.inflight)) + + // Latenz-Histogramme + buf.WriteString("# HELP ipcheck_request_duration_seconds Request duration seconds\n") + buf.WriteString("# TYPE ipcheck_request_duration_seconds histogram\n") + metrics.lat.mu.Lock() + for h, counts := range metrics.lat.counts { + cum := uint64(0) + for i, le := range metrics.lat.bounds { + cum += counts[i] + fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_bucket{handler=%q,le=%q} %d\n", h, fmt.Sprintf("%.3f", le), cum) + } + // +Inf bucket: gesamter count + total := metrics.lat.n[h] + fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_bucket{handler=%q,le=\"+Inf\"} %d\n", h, total) + fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_sum{handler=%q} %.6f\n", h, metrics.lat.sum[h]) + fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_count{handler=%q} %d\n", h, total) + } + metrics.lat.mu.Unlock() + + // Importer + buf.WriteString("# HELP ipcheck_import_cycles_total Completed import cycles\n") + buf.WriteString("# TYPE ipcheck_import_cycles_total counter\n") + fmt.Fprintf(&buf, "ipcheck_import_cycles_total %d\n", atomic.LoadUint64(&metrics.importerCycles)) + + buf.WriteString("# HELP ipcheck_import_last_success_timestamp_seconds Last successful import Unix time\n") + buf.WriteString("# TYPE ipcheck_import_last_success_timestamp_seconds gauge\n") + fmt.Fprintf(&buf, "ipcheck_import_last_success_timestamp_seconds %d\n", atomic.LoadInt64(&metrics.importerLastSuccess)) + + buf.WriteString("# HELP ipcheck_import_errors_total Import errors by category\n") + buf.WriteString("# TYPE ipcheck_import_errors_total counter\n") + metrics.importerErrors.Range(func(k, v any) bool { + fmt.Fprintf(&buf, "ipcheck_import_errors_total{category=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64))) + return true + }) + + buf.WriteString("# HELP ipcheck_import_duration_seconds Import durations by category\n") + buf.WriteString("# TYPE ipcheck_import_duration_seconds histogram\n") + metrics.importerDur.mu.Lock() + for cat, counts := range metrics.importerDur.counts { + cum := uint64(0) + for i, le := range metrics.importerDur.bounds { + cum += counts[i] + fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_bucket{category=%q,le=%q} %d\n", cat, fmt.Sprintf("%.2f", le), cum) + } + total := metrics.importerDur.n[cat] + fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_bucket{category=%q,le=\"+Inf\"} %d\n", cat, total) + fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_sum{category=%q} %.6f\n", cat, metrics.importerDur.sum[cat]) + fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_count{category=%q} %d\n", cat, total) + } + metrics.importerDur.mu.Unlock() + + // Blocklist-Größen je Kategorie (bestehend) + for _, c := range s.cats() { + n, _ := s.st.Count(c) + fmt.Fprintf(&buf, "ipcheck_blocklist_hash_size{category=%q} %d\n", c, n) + } + + // Katalog-Anzahl Kategorien + buf.WriteString("# HELP ipcheck_catalog_categories_total Number of categories in catalog\n") + buf.WriteString("# TYPE ipcheck_catalog_categories_total gauge\n") + fmt.Fprintf(&buf, "ipcheck_catalog_categories_total %d\n", len(s.cats())) + + // Honeypot: bestehende Port-Hits + neue Unique-IPs + hits := s.st.ListManual() + portCount := map[string]int{} + for _, ports := range hits { + for _, p := range ports { + portCount[p]++ + } + } + buf.WriteString("# TYPE honeypot_port_hits counter\n") + for pp, n := range portCount { + parts := strings.Split(pp, "/") + if len(parts) == 2 { + fmt.Fprintf(&buf, "honeypot_port_hits{port=%q,protocol=%q} %d\n", parts[0], parts[1], n) + } + } + if uniq, err := s.st.ManualUniqueIPs(); err == nil { + buf.WriteString("# HELP honeypot_unique_ips Total unique source IPs seen by honeypot\n") + buf.WriteString("# TYPE honeypot_unique_ips gauge\n") + fmt.Fprintf(&buf, "honeypot_unique_ips %d\n", uniq) + } + + // Whitelist Größe + if wl, err := s.st.WhitelistCount(); err == nil { + buf.WriteString("# HELP ipcheck_whitelist_total Whitelisted IPs\n") + buf.WriteString("# TYPE ipcheck_whitelist_total gauge\n") + fmt.Fprintf(&buf, "ipcheck_whitelist_total %d\n", wl) + } + + // Traefik Blocks + buf.WriteString("# HELP ipcheck_traefik_blocks_total Traefik blocks due to matches\n") + buf.WriteString("# TYPE ipcheck_traefik_blocks_total counter\n") + fmt.Fprintf(&buf, "ipcheck_traefik_blocks_total %d\n", atomic.LoadUint64(&metrics.traefikBlocks)) + + // Downloads pro Kategorie + buf.WriteString("# HELP ipcheck_downloads_total Downloads served by category\n") + buf.WriteString("# TYPE ipcheck_downloads_total counter\n") + metrics.downloads.Range(func(k, v any) bool { + fmt.Fprintf(&buf, "ipcheck_downloads_total{category=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64))) + return true + }) + + if n, err := s.st.CountBlacklist(); err == nil { + fmt.Fprintf(&buf, "ipcheck_manual_blacklist_size %d\n", n) + } + + w.Header().Set("Content-Type", "text/plain; version=0.0.4") + w.Write(buf.Bytes()) +} + +func (s *server) handleGUI(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write([]byte(` +Blocklist GUI + + +

IP Checker + Whitelist

+ + +

Ergebnis

No Request
+

Blacklist

+ + + + +
-
+ + +`)) +} + +func writeJSON(w http.ResponseWriter, v any) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(v) +} + +func (s *server) handleBlacklistRoot(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodGet: + s.handleBlacklistList(w, r) + case http.MethodPost: + s.handleBlacklistAdd(w, r) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func (s *server) handleBlacklistAdd(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + var body struct { + Prefix string `json:"prefix"` // "1.2.3.4" oder "1.2.3.0/24" etc. + Reason string `json:"reason"` + } + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + norm, ok := normalizeLineToPrefix(strings.TrimSpace(body.Prefix)) + if !ok { + http.Error(w, "invalid prefix/ip", http.StatusBadRequest) + return + } + pfx, _ := netip.ParsePrefix(norm) + if err := s.st.AddBlacklist(pfx, body.Reason); err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + writeJSON(w, map[string]any{"status": "blacklisted", "prefix": pfx.String(), "reason": strings.TrimSpace(body.Reason)}) +} + +func (s *server) handleBlacklistDel(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodDelete { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + raw := strings.TrimPrefix(r.URL.Path, "/blacklist/") + norm, ok := normalizeLineToPrefix(strings.TrimSpace(raw)) + if !ok { + http.Error(w, "invalid prefix/ip", http.StatusBadRequest) + return + } + pfx, _ := netip.ParsePrefix(norm) + if err := s.st.DelBlacklist(pfx); err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + writeJSON(w, map[string]any{"status": "removed", "prefix": pfx.String()}) +} + +func (s *server) handleBlacklistList(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + m, err := s.st.ListBlacklist() + if err != nil { + http.Error(w, "store error", http.StatusInternalServerError) + return + } + type entry struct{ Prefix, Reason string } + var list []entry + for p, reason := range m { + list = append(list, entry{Prefix: p.String(), Reason: reason}) + } + sort.Slice(list, func(i, j int) bool { return list[i].Prefix < list[j].Prefix }) + writeJSON(w, map[string]any{"entries": list}) +} + +func (s *server) checkIP(ip netip.Addr, cats []string) ([]string, error) { + // 1) Whitelist gewinnt + if ok, _ := s.st.IsWhitelisted(ip); ok { + return nil, nil + } + + // 2) Manuelle Blacklist + bl, _ := s.st.ListBlacklist() + for pfx := range bl { + if pfx.Contains(ip) { + // "manual-blacklist" als erste Kategorie markieren + return []string{"manual-blacklist"}, nil + } + } + + // 3) Import-Kategorien + var matches []string + for _, c := range cats { + pfxs, err := s.st.ListPrefixes(c) + if err != nil { + return nil, err + } + for _, p := range pfxs { + if p.Contains(ip) { + matches = append(matches, c) + break + } + } + } + return matches, nil +} + +// ────────────────────────────────────────────────────────────────────────────── +// Honeypot (optional) +// ────────────────────────────────────────────────────────────────────────────── + +func startTCPListener(st Store, network, port string) { + ln, err := net.Listen(network, ":"+port) + if err != nil { + log.Printf("❌ could not listen on %s/%s: %v", network, port, err) + return + } + log.Printf("🚀 TCP Honeypot on %s/%s", network, port) + for { + conn, err := ln.Accept() + if err != nil { + log.Printf("accept err: %v", err) + continue + } + ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) + _ = st.AddManual(ip, port, "TCP") + conn.Close() + } +} + +func startUDPListener(st Store, network, port string) { + addr := net.UDPAddr{Port: atoi(port)} + conn, err := net.ListenUDP(network, &addr) + if err != nil { + log.Printf("❌ could not listen on %s/%s: %v", network, port, err) + return + } + log.Printf("🚀 UDP Honeypot on %s/%s", network, port) + buf := make([]byte, 1024) + for { + n, ra, err := conn.ReadFromUDP(buf) + if err != nil { + log.Printf("udp read err: %v", err) + continue + } + if n > 0 { + _ = st.AddManual(ra.IP.String(), port, "UDP") + } + } +} + +func atoi(s string) int { + n, _ := strconvAtoiSafe(s) + return n +} +func strconvAtoiSafe(s string) (int, error) { + var n int + for _, r := range s { + if r < '0' || r > '9' { + return 0, fmt.Errorf("bad int %q", s) + } + n = n*10 + int(r-'0') + } + return n, nil +} + +// ────────────────────────────────────────────────────────────────────────────── +// main +// ────────────────────────────────────────────────────────────────────────────── + +func main() { + // choose store + var st Store + switch strings.ToLower(storeKind) { + case "redis": + rs, err := newRedisStore() + if err != nil { + log.Fatalf("redis: %v", err) + } + st = rs + default: + st = newMemStore() + } + + // fetch catalog and start importer + cat, err := loadCatalog() + if err != nil { + log.Printf("catalog error: %v (using fallback FLOD only)", err) + cat = map[string]string{ + "flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/lists.json", + } + } + srv := &server{st: st, catalog: cat} + startImporter(st, cat) + + // honeypot listeners + if enableHoneypot { + for _, p := range honeyTCP { + go startTCPListener(st, "tcp4", p) + go startTCPListener(st, "tcp6", p) + } + for _, p := range honeyUDP { + go startUDPListener(st, "udp4", p) + go startUDPListener(st, "udp6", p) + } + } + + // routes + http.HandleFunc("/", srv.withMetrics(srv.handleGUI, "gui")) + http.HandleFunc("/check/", srv.withMetrics(srv.handleCheck, "check")) + http.HandleFunc("/traefik", srv.withMetrics(srv.handleTraefik, "traefik")) + http.HandleFunc("/whitelist", srv.withMetrics(srv.handleWhitelist, "whitelist")) + http.HandleFunc("/download/", srv.withMetrics(srv.handleDownload, "download")) + http.HandleFunc("/metrics", srv.handleMetrics) // selbst enthält keine eigene Messung + //http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistAdd, "blacklist_add")) // POST + /*http.HandleFunc("/blacklist/", func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodDelete: + srv.withMetrics(srv.handleBlacklistDel, "blacklist_del")(w, r) + case http.MethodGet: + // erlauben: GET /blacklist (Liste) + if r.URL.Path == "/blacklist/" || r.URL.Path == "/blacklist" { + srv.withMetrics(srv.handleBlacklistList, "blacklist_list")(w, r) + return + } + http.NotFound(w, r) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } + })*/ + + http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistRoot, "blacklist_root")) // GET & POST + http.HandleFunc("/blacklist/", srv.withMetrics(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/blacklist/" && r.Method == http.MethodGet { + srv.handleBlacklistList(w, r) + return + } + if r.Method == http.MethodDelete { + srv.handleBlacklistDel(w, r) + return + } + http.Error(w, "not found", http.StatusNotFound) + }, "blacklist_misc")) + + log.Printf("listening on %s (store=%s, honeypot=%v)", listenAddr, storeKind, enableHoneypot) + if err := http.ListenAndServe(listenAddr, nil); err != nil { + log.Fatal(err) + } +}