All checks were successful
release-tag / release-image (push) Successful in 1m44s
1568 lines
48 KiB
Go
1568 lines
48 KiB
Go
package main
|
||
|
||
import (
|
||
"bufio"
|
||
"bytes"
|
||
"context"
|
||
"encoding/json"
|
||
"errors"
|
||
"fmt"
|
||
"log"
|
||
"net"
|
||
"net/http"
|
||
"net/netip"
|
||
"os"
|
||
"path/filepath"
|
||
"sort"
|
||
"strconv"
|
||
"strings"
|
||
"sync"
|
||
"sync/atomic"
|
||
"time"
|
||
|
||
"github.com/redis/go-redis/v9"
|
||
)
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// Config via ENV
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
func getenv(key, def string) string {
|
||
if v := os.Getenv(key); v != "" {
|
||
return v
|
||
}
|
||
return def
|
||
}
|
||
|
||
const defaultDelay = 2 * time.Second // default gap between two downloads
|
||
|
||
var (
|
||
listenAddr = getenv("LISTEN_ADDR", ":8080")
|
||
storeKind = getenv("STORE", "memory") // "memory" or "redis"
|
||
importBaseURL = getenv("FLOD_IMPORT_URL", "https://git.send.nrw/sendnrw/flod-lists/raw/branch/main/")
|
||
importInterval = mustParseDuration(getenv("IMPORT_INTERVAL", "30m"))
|
||
delayStr = os.Getenv("DELAY")
|
||
enableHoneypot = strings.EqualFold(getenv("HONEYPOT", "off"), "on")
|
||
honeyTCP = splitCSV(getenv("HONEY_TCP", "135,139,445,389,636,3268,3269,88,3389"))
|
||
honeyUDP = splitCSV(getenv("HONEY_UDP", "135,137,138,389,3389,88"))
|
||
dataDir = getenv("DATA_DIR", "/data")
|
||
)
|
||
|
||
func splitCSV(s string) []string {
|
||
var out []string
|
||
for _, p := range strings.Split(s, ",") {
|
||
p = strings.TrimSpace(p)
|
||
if p != "" {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
return out
|
||
}
|
||
|
||
func mustParseDuration(s string) time.Duration {
|
||
d, err := time.ParseDuration(s)
|
||
if err != nil {
|
||
log.Fatalf("bad IMPORT_INTERVAL %q: %v", s, err)
|
||
}
|
||
return d
|
||
}
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// Storage Abstraction
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
type Store interface {
|
||
// Blocklists
|
||
PutPrefix(cat string, prefix string) error
|
||
ListPrefixes(cat string) ([]netip.Prefix, error)
|
||
ListCats() []string
|
||
Count(cat string) (int, error)
|
||
|
||
// Whitelist
|
||
AddWhitelist(ip netip.Addr) error
|
||
IsWhitelisted(ip netip.Addr) (bool, error)
|
||
WhitelistCount() (int, error)
|
||
AddWhitelistPrefix(pfx netip.Prefix) error
|
||
RemoveWhitelistPrefix(pfx netip.Prefix) error
|
||
ListWhitelistPrefixes() ([]netip.Prefix, error)
|
||
|
||
// Manual hits
|
||
AddManual(ip string, port string, proto string) error
|
||
ListManual() map[string][]string
|
||
ManualUniqueIPs() (int, error)
|
||
|
||
AddBlacklist(pfx netip.Prefix, reason string) error
|
||
DelBlacklist(pfx netip.Prefix) error
|
||
ListBlacklist() (map[netip.Prefix]string, error) // Prefix -> Reason
|
||
CountBlacklist() (int, error)
|
||
}
|
||
|
||
// In-memory implementation (with simple persistence to ./data/*.txt on export if desired)
|
||
type memStore struct {
|
||
mu sync.RWMutex
|
||
byCat map[string]map[string]struct{} // cat -> "prefix" set
|
||
whitelist map[string]struct{} // ip string
|
||
wlPrefixes map[string]struct{} // "ip/net"
|
||
manualHits map[string][]string // ip/32 -> ["port/proto", ...]
|
||
blacklist map[string]string // prefix string -> reason
|
||
}
|
||
|
||
func (m *memStore) WhitelistCount() (int, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
return len(m.whitelist), nil
|
||
}
|
||
|
||
func (m *memStore) ManualUniqueIPs() (int, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
return len(m.manualHits), nil
|
||
}
|
||
|
||
func (m *memStore) blFile() string { return filepath.Join(dataDir, "blacklist.json") }
|
||
|
||
func (m *memStore) loadBlacklistFromDisk() {
|
||
f, err := os.Open(m.blFile())
|
||
if err != nil {
|
||
return
|
||
}
|
||
defer f.Close()
|
||
var tmp map[string]string
|
||
if err := json.NewDecoder(f).Decode(&tmp); err == nil {
|
||
m.blacklist = tmp
|
||
}
|
||
}
|
||
|
||
func (m *memStore) saveBlacklistToDisk() {
|
||
tmp := make(map[string]string, len(m.blacklist))
|
||
for k, v := range m.blacklist {
|
||
tmp[k] = v
|
||
}
|
||
|
||
b, _ := json.MarshalIndent(tmp, "", " ")
|
||
tmpPath := m.blFile() + ".tmp"
|
||
_ = os.WriteFile(tmpPath, b, 0o644)
|
||
_ = os.Rename(tmpPath, m.blFile())
|
||
}
|
||
|
||
func (m *memStore) AddBlacklist(pfx netip.Prefix, reason string) error {
|
||
m.mu.Lock()
|
||
defer m.mu.Unlock()
|
||
m.blacklist[pfx.String()] = strings.TrimSpace(reason)
|
||
m.saveBlacklistToDisk()
|
||
return nil
|
||
}
|
||
|
||
func (m *memStore) DelBlacklist(pfx netip.Prefix) error {
|
||
m.mu.Lock()
|
||
defer m.mu.Unlock()
|
||
delete(m.blacklist, pfx.String())
|
||
m.saveBlacklistToDisk()
|
||
return nil
|
||
}
|
||
|
||
func (m *memStore) ListBlacklist() (map[netip.Prefix]string, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
out := make(map[netip.Prefix]string, len(m.blacklist))
|
||
for k, v := range m.blacklist {
|
||
if p, err := netip.ParsePrefix(k); err == nil {
|
||
out[p] = v
|
||
}
|
||
}
|
||
return out, nil
|
||
}
|
||
|
||
func (m *memStore) CountBlacklist() (int, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
return len(m.blacklist), nil
|
||
}
|
||
|
||
func newMemStore() *memStore {
|
||
m := &memStore{
|
||
byCat: map[string]map[string]struct{}{},
|
||
whitelist: map[string]struct{}{},
|
||
wlPrefixes: map[string]struct{}{},
|
||
manualHits: map[string][]string{},
|
||
blacklist: map[string]string{},
|
||
}
|
||
_ = os.MkdirAll(dataDir, 0o755)
|
||
m.loadBlacklistFromDisk()
|
||
m.loadWhitelistFromDisk()
|
||
m.loadWhitelistPrefixesFromDisk()
|
||
return m
|
||
}
|
||
|
||
func (m *memStore) wlFile() string {
|
||
return filepath.Join(dataDir, "whitelist.json")
|
||
}
|
||
|
||
func (m *memStore) wlPrefixesFile() string { return filepath.Join(dataDir, "whitelist_prefixes.json") }
|
||
|
||
func (m *memStore) loadWhitelistPrefixesFromDisk() {
|
||
f, err := os.Open(m.wlPrefixesFile())
|
||
if err != nil {
|
||
return
|
||
}
|
||
defer f.Close()
|
||
var tmp []string
|
||
if err := json.NewDecoder(f).Decode(&tmp); err == nil {
|
||
for _, s := range tmp {
|
||
if _, err := netip.ParsePrefix(s); err == nil {
|
||
m.wlPrefixes[s] = struct{}{}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
func (m *memStore) saveWhitelistPrefixesToDisk() {
|
||
m.mu.RLock()
|
||
keys := make([]string, 0, len(m.wlPrefixes))
|
||
for k := range m.wlPrefixes {
|
||
keys = append(keys, k)
|
||
}
|
||
m.mu.RUnlock()
|
||
sort.Strings(keys)
|
||
b, _ := json.MarshalIndent(keys, "", " ")
|
||
tmp := m.wlPrefixesFile() + ".tmp"
|
||
_ = os.WriteFile(tmp, b, 0o644)
|
||
_ = os.Rename(tmp, m.wlPrefixesFile())
|
||
}
|
||
|
||
func (m *memStore) AddWhitelistPrefix(pfx netip.Prefix) error {
|
||
m.mu.Lock()
|
||
m.wlPrefixes[pfx.String()] = struct{}{}
|
||
m.mu.Unlock()
|
||
m.saveWhitelistPrefixesToDisk()
|
||
return nil
|
||
}
|
||
func (m *memStore) RemoveWhitelistPrefix(pfx netip.Prefix) error {
|
||
m.mu.Lock()
|
||
delete(m.wlPrefixes, pfx.String())
|
||
m.mu.Unlock()
|
||
m.saveWhitelistPrefixesToDisk()
|
||
return nil
|
||
}
|
||
func (m *memStore) ListWhitelistPrefixes() ([]netip.Prefix, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
out := make([]netip.Prefix, 0, len(m.wlPrefixes))
|
||
for k := range m.wlPrefixes {
|
||
if p, err := netip.ParsePrefix(k); err == nil {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() })
|
||
return out, nil
|
||
}
|
||
|
||
func (m *memStore) loadWhitelistFromDisk() {
|
||
f, err := os.Open(m.wlFile())
|
||
if err != nil {
|
||
return // Datei existiert noch nicht – ok
|
||
}
|
||
defer f.Close()
|
||
var tmp []string
|
||
if err := json.NewDecoder(f).Decode(&tmp); err == nil {
|
||
for _, ip := range tmp {
|
||
if strings.TrimSpace(ip) != "" {
|
||
m.whitelist[ip] = struct{}{}
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
func (m *memStore) saveWhitelistToDisk() {
|
||
// stabile Reihenfolge ist nice-to-have
|
||
m.mu.RLock()
|
||
keys := make([]string, 0, len(m.whitelist))
|
||
for k := range m.whitelist {
|
||
keys = append(keys, k)
|
||
}
|
||
m.mu.RUnlock()
|
||
sort.Strings(keys)
|
||
|
||
b, _ := json.MarshalIndent(keys, "", " ")
|
||
tmp := m.wlFile() + ".tmp"
|
||
_ = os.WriteFile(tmp, b, 0o644)
|
||
_ = os.Rename(tmp, m.wlFile())
|
||
}
|
||
|
||
func (m *memStore) ensureCat(cat string) {
|
||
if _, ok := m.byCat[cat]; !ok {
|
||
m.byCat[cat] = map[string]struct{}{}
|
||
}
|
||
}
|
||
|
||
func (m *memStore) PutPrefix(cat string, prefix string) error {
|
||
m.mu.Lock()
|
||
defer m.mu.Unlock()
|
||
m.ensureCat(cat)
|
||
m.byCat[cat][prefix] = struct{}{}
|
||
return nil
|
||
}
|
||
|
||
func (m *memStore) ListPrefixes(cat string) ([]netip.Prefix, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
set := m.byCat[cat]
|
||
out := make([]netip.Prefix, 0, len(set))
|
||
for k := range set {
|
||
if p, err := netip.ParsePrefix(k); err == nil {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
// stable order
|
||
sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() })
|
||
return out, nil
|
||
}
|
||
|
||
func (m *memStore) ListCats() []string {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
out := make([]string, 0, len(m.byCat))
|
||
for c := range m.byCat {
|
||
out = append(out, c)
|
||
}
|
||
sort.Strings(out)
|
||
return out
|
||
}
|
||
|
||
func (m *memStore) Count(cat string) (int, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
return len(m.byCat[cat]), nil
|
||
}
|
||
|
||
func (m *memStore) AddWhitelist(ip netip.Addr) error {
|
||
m.mu.Lock()
|
||
defer m.mu.Unlock()
|
||
m.whitelist[ip.String()] = struct{}{}
|
||
m.mu.Unlock()
|
||
m.saveWhitelistToDisk()
|
||
return nil
|
||
}
|
||
|
||
func (m *memStore) IsWhitelisted(ip netip.Addr) (bool, error) {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
if _, ok := m.whitelist[ip.String()]; ok {
|
||
return true, nil
|
||
}
|
||
for k := range m.wlPrefixes {
|
||
if p, err := netip.ParsePrefix(k); err == nil && p.Contains(ip) {
|
||
return true, nil
|
||
}
|
||
}
|
||
return false, nil
|
||
}
|
||
|
||
func (m *memStore) AddManual(ip, port, proto string) error {
|
||
m.mu.Lock()
|
||
defer m.mu.Unlock()
|
||
key := ip + "/32"
|
||
m.manualHits[key] = append(m.manualHits[key], port+"/"+proto)
|
||
return nil
|
||
}
|
||
|
||
func (m *memStore) ListManual() map[string][]string {
|
||
m.mu.RLock()
|
||
defer m.mu.RUnlock()
|
||
out := make(map[string][]string, len(m.manualHits))
|
||
for k, v := range m.manualHits {
|
||
dst := make([]string, len(v))
|
||
copy(dst, v)
|
||
out[k] = dst
|
||
}
|
||
return out
|
||
}
|
||
|
||
// Redis implementation (optional)
|
||
type redisStore struct {
|
||
rdb *redis.Client
|
||
ctx context.Context
|
||
}
|
||
|
||
func newRedisStore() (*redisStore, error) {
|
||
addr := getenv("REDIS_ADDR", "localhost:6379")
|
||
user := os.Getenv("REDIS_USER")
|
||
pass := os.Getenv("REDIS_PASS")
|
||
rdb := redis.NewClient(&redis.Options{Addr: addr, Username: user, Password: pass, DB: 0})
|
||
ctx := context.Background()
|
||
if err := rdb.Ping(ctx).Err(); err != nil {
|
||
return nil, err
|
||
}
|
||
return &redisStore{rdb: rdb, ctx: ctx}, nil
|
||
}
|
||
|
||
func (s *redisStore) catKey(cat string) string { return "bl:" + cat }
|
||
func (s *redisStore) wlKey(ip string) string { return "wl:" + ip }
|
||
func (s *redisStore) wlPrefKey() string { return "wl:prefixes" }
|
||
func (s *redisStore) manKey(ip string) string { return "bl:manual:" + ip }
|
||
|
||
func (s *redisStore) AddWhitelistPrefix(pfx netip.Prefix) error {
|
||
return s.rdb.HSet(s.ctx, s.wlPrefKey(), pfx.String(), 1).Err()
|
||
}
|
||
func (s *redisStore) RemoveWhitelistPrefix(pfx netip.Prefix) error {
|
||
return s.rdb.HDel(s.ctx, s.wlPrefKey(), pfx.String()).Err()
|
||
}
|
||
func (s *redisStore) ListWhitelistPrefixes() ([]netip.Prefix, error) {
|
||
keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
out := make([]netip.Prefix, 0, len(keys))
|
||
for _, k := range keys {
|
||
if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() })
|
||
return out, nil
|
||
}
|
||
|
||
func (s *redisStore) WhitelistCount() (int, error) {
|
||
// wl:* keys zählen (SCAN statt KEYS)
|
||
var cursor uint64
|
||
total := 0
|
||
for {
|
||
keys, c, err := s.rdb.Scan(s.ctx, cursor, "wl:*", 1000).Result()
|
||
if err != nil {
|
||
return 0, err
|
||
}
|
||
total += len(keys)
|
||
cursor = c
|
||
if cursor == 0 {
|
||
break
|
||
}
|
||
}
|
||
return total, nil
|
||
}
|
||
|
||
func (s *redisStore) ManualUniqueIPs() (int, error) {
|
||
n, err := s.rdb.HLen(s.ctx, "bl:manual").Result()
|
||
return int(n), err
|
||
}
|
||
|
||
func (s *redisStore) PutPrefix(cat string, prefix string) error {
|
||
return s.rdb.HSet(s.ctx, s.catKey(cat), prefix, 1).Err()
|
||
}
|
||
|
||
func (s *redisStore) blKey() string { return "bl:manual:blacklist" }
|
||
|
||
func (s *redisStore) AddBlacklist(pfx netip.Prefix, reason string) error {
|
||
return s.rdb.HSet(s.ctx, s.blKey(), pfx.String(), strings.TrimSpace(reason)).Err()
|
||
}
|
||
func (s *redisStore) DelBlacklist(pfx netip.Prefix) error {
|
||
return s.rdb.HDel(s.ctx, s.blKey(), pfx.String()).Err()
|
||
}
|
||
func (s *redisStore) ListBlacklist() (map[netip.Prefix]string, error) {
|
||
m, err := s.rdb.HGetAll(s.ctx, s.blKey()).Result()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
out := make(map[netip.Prefix]string, len(m))
|
||
for k, v := range m {
|
||
if p, err := netip.ParsePrefix(k); err == nil {
|
||
out[p] = v
|
||
}
|
||
}
|
||
return out, nil
|
||
}
|
||
func (s *redisStore) CountBlacklist() (int, error) {
|
||
n, err := s.rdb.HLen(s.ctx, s.blKey()).Result()
|
||
return int(n), err
|
||
}
|
||
|
||
func (s *redisStore) ListPrefixes(cat string) ([]netip.Prefix, error) {
|
||
keys, err := s.rdb.HKeys(s.ctx, s.catKey(cat)).Result()
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
out := make([]netip.Prefix, 0, len(keys))
|
||
for _, k := range keys {
|
||
if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil {
|
||
out = append(out, p)
|
||
}
|
||
}
|
||
sort.Slice(out, func(i, j int) bool { return out[i].String() < out[j].String() })
|
||
return out, nil
|
||
}
|
||
|
||
func (s *redisStore) ListCats() []string {
|
||
// Redis doesn't list categories directly; caller should track categories elsewhere.
|
||
// As a pragmatic fallback we read from a catalog file or rely on the importer to know them.
|
||
// Here we just return [] and rely on the dynamic catalog in memory.
|
||
return []string{}
|
||
}
|
||
|
||
func (s *redisStore) Count(cat string) (int, error) {
|
||
n, err := s.rdb.HLen(s.ctx, s.catKey(cat)).Result()
|
||
return int(n), err
|
||
}
|
||
|
||
func (s *redisStore) AddWhitelist(ip netip.Addr) error {
|
||
return s.rdb.Set(s.ctx, s.wlKey(ip.String()), 1, 0).Err()
|
||
}
|
||
|
||
func (s *redisStore) IsWhitelisted(ip netip.Addr) (bool, error) {
|
||
ex, err := s.rdb.Exists(s.ctx, s.wlKey(ip.String())).Result()
|
||
if err != nil {
|
||
return false, err
|
||
}
|
||
if ex > 0 {
|
||
return true, nil
|
||
}
|
||
|
||
keys, err := s.rdb.HKeys(s.ctx, s.wlPrefKey()).Result()
|
||
if err != nil {
|
||
return false, err
|
||
}
|
||
for _, k := range keys {
|
||
if p, err := netip.ParsePrefix(strings.TrimSpace(k)); err == nil && p.Contains(ip) {
|
||
return true, nil
|
||
}
|
||
}
|
||
return false, nil
|
||
}
|
||
|
||
func (s *redisStore) AddManual(ip, port, proto string) error {
|
||
ipKey := ip + "/32"
|
||
if err := s.rdb.HSet(s.ctx, "bl:manual", ipKey, 1).Err(); err != nil {
|
||
return err
|
||
}
|
||
return s.rdb.SAdd(s.ctx, s.manKey(ipKey), port+"/"+proto).Err()
|
||
}
|
||
|
||
func (s *redisStore) ListManual() map[string][]string {
|
||
keys, _ := s.rdb.HKeys(s.ctx, "bl:manual").Result()
|
||
out := make(map[string][]string)
|
||
for _, ip := range keys {
|
||
ports, _ := s.rdb.SMembers(s.ctx, s.manKey(ip)).Result()
|
||
out[ip] = ports
|
||
}
|
||
return out
|
||
}
|
||
|
||
// ── Metrics ───────────────────────────────────────────────────────────────────
|
||
|
||
type histogram struct {
|
||
// prom-typische Buckets
|
||
bounds []float64
|
||
// handler -> counts/buckets
|
||
mu sync.Mutex
|
||
counts map[string][]uint64
|
||
sum map[string]float64
|
||
n map[string]uint64
|
||
}
|
||
|
||
func newHistogram(bounds []float64) *histogram {
|
||
return &histogram{
|
||
bounds: bounds,
|
||
counts: map[string][]uint64{},
|
||
sum: map[string]float64{},
|
||
n: map[string]uint64{},
|
||
}
|
||
}
|
||
func (h *histogram) observe(name string, v float64) {
|
||
h.mu.Lock()
|
||
defer h.mu.Unlock()
|
||
if _, ok := h.counts[name]; !ok {
|
||
h.counts[name] = make([]uint64, len(h.bounds))
|
||
}
|
||
// Bucket suchen (<= le)
|
||
for i, b := range h.bounds {
|
||
if v <= b {
|
||
h.counts[name][i]++
|
||
break
|
||
}
|
||
if i == len(h.bounds)-1 {
|
||
// größer als letzter Bucket: keine extra-+Inf bucket,
|
||
// Prom konform wird _count separat emittiert.
|
||
}
|
||
}
|
||
h.sum[name] += v
|
||
h.n[name]++
|
||
}
|
||
|
||
type metricsT struct {
|
||
// Requests
|
||
reqTotal sync.Map // key: handler -> *uint64
|
||
respTotal sync.Map // key: handler|code -> *uint64
|
||
inflight uint64
|
||
|
||
// Latenzen
|
||
lat *histogram
|
||
|
||
// Importer
|
||
importerCycles uint64
|
||
importerErrors sync.Map // key: category -> *uint64
|
||
importerLastSuccess int64 // unix seconds
|
||
importerDur *histogram
|
||
|
||
// Traefik Blocks
|
||
traefikBlocks uint64
|
||
|
||
// Downloads
|
||
downloads sync.Map // key: category -> *uint64
|
||
}
|
||
|
||
var metrics = &metricsT{
|
||
lat: newHistogram([]float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}),
|
||
importerDur: newHistogram([]float64{0.1, 0.25, 0.5, 1, 2, 5, 10, 20, 40, 60}),
|
||
}
|
||
|
||
func (m *metricsT) incReq(handler string) {
|
||
v, _ := m.reqTotal.LoadOrStore(handler, new(uint64))
|
||
atomic.AddUint64(v.(*uint64), 1)
|
||
}
|
||
func (m *metricsT) incResp(handler string, code int) {
|
||
key := fmt.Sprintf("%s|%d", handler, code)
|
||
v, _ := m.respTotal.LoadOrStore(key, new(uint64))
|
||
atomic.AddUint64(v.(*uint64), 1)
|
||
}
|
||
func (m *metricsT) incImporterErr(cat string) {
|
||
v, _ := m.importerErrors.LoadOrStore(cat, new(uint64))
|
||
atomic.AddUint64(v.(*uint64), 1)
|
||
}
|
||
|
||
type statusRecorder struct {
|
||
http.ResponseWriter
|
||
status int
|
||
}
|
||
|
||
func (r *statusRecorder) WriteHeader(code int) { r.status = code; r.ResponseWriter.WriteHeader(code) }
|
||
|
||
// HTTP-Middleware zum Messen
|
||
func (s *server) withMetrics(next http.HandlerFunc, name string) http.HandlerFunc {
|
||
return func(w http.ResponseWriter, r *http.Request) {
|
||
atomic.AddUint64(&metrics.inflight, 1)
|
||
defer atomic.AddUint64(&metrics.inflight, ^uint64(0)) // -1
|
||
|
||
rec := &statusRecorder{ResponseWriter: w, status: 200}
|
||
start := time.Now()
|
||
next(rec, r)
|
||
dur := time.Since(start).Seconds()
|
||
|
||
metrics.incReq(name)
|
||
metrics.incResp(name, rec.status)
|
||
metrics.lat.observe(name, dur)
|
||
}
|
||
}
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// Catalog + Importer
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
func loadCatalog() (map[string]string, error) {
|
||
if importBaseURL == "" {
|
||
// fallback: minimal default list
|
||
return map[string]string{
|
||
"flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/",
|
||
}, nil
|
||
}
|
||
catalogURL := strings.TrimRight(importBaseURL, "/") + "/lists.json"
|
||
log.Printf("Fetching catalog from %s", catalogURL)
|
||
resp, err := http.Get(catalogURL)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
defer resp.Body.Close()
|
||
if resp.StatusCode != http.StatusOK {
|
||
return nil, fmt.Errorf("catalog status: %s", resp.Status)
|
||
}
|
||
var m map[string]string
|
||
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
|
||
return nil, err
|
||
}
|
||
// also write to disk for transparency
|
||
_ = os.MkdirAll("./lists", 0o755)
|
||
f, _ := os.Create(filepath.Join("./lists", "lists.json"))
|
||
defer f.Close()
|
||
enc := json.NewEncoder(f)
|
||
enc.SetIndent("", " ")
|
||
_ = enc.Encode(m)
|
||
return m, nil
|
||
}
|
||
|
||
func normalizeLineToPrefix(s string) (string, bool) {
|
||
s = strings.TrimSpace(s)
|
||
if s == "" || strings.HasPrefix(s, "#") {
|
||
return "", false
|
||
}
|
||
if !strings.Contains(s, "/") {
|
||
ip := net.ParseIP(s)
|
||
if ip == nil {
|
||
return "", false
|
||
}
|
||
if ip.To4() != nil {
|
||
s += "/32"
|
||
} else {
|
||
s += "/128"
|
||
}
|
||
}
|
||
if _, err := netip.ParsePrefix(s); err != nil {
|
||
return "", false
|
||
}
|
||
return s, true
|
||
}
|
||
|
||
func importCategory(st Store, cat, url string) (int, error) {
|
||
resp, err := http.Get(url)
|
||
if err != nil {
|
||
return 0, err
|
||
}
|
||
defer resp.Body.Close()
|
||
if resp.StatusCode != http.StatusOK {
|
||
return 0, fmt.Errorf("bad status %s", resp.Status)
|
||
}
|
||
sc := bufio.NewScanner(resp.Body)
|
||
count := 0
|
||
for sc.Scan() {
|
||
if pfx, ok := normalizeLineToPrefix(sc.Text()); ok {
|
||
if err := st.PutPrefix(cat, pfx); err != nil {
|
||
return count, err
|
||
}
|
||
count++
|
||
if count%2000 == 0 {
|
||
log.Printf("[%s] %d entries", cat, count)
|
||
}
|
||
}
|
||
}
|
||
return count, sc.Err()
|
||
}
|
||
|
||
func startImporter(st Store, cats map[string]string, srv *server) {
|
||
|
||
var delay time.Duration
|
||
if delayStr == "" {
|
||
delay = defaultDelay
|
||
} else {
|
||
secs, err := strconv.Atoi(delayStr)
|
||
if err != nil || secs <= 0 {
|
||
log.Printf("invalid DELAY=%q, using default (%v)", delayStr, defaultDelay)
|
||
} else {
|
||
delay = time.Duration(secs) * time.Second
|
||
}
|
||
}
|
||
|
||
go func() {
|
||
for {
|
||
start := time.Now()
|
||
log.Println("Starting blocklist import...")
|
||
var wg sync.WaitGroup
|
||
for cat, url := range cats {
|
||
cat, url := cat, url
|
||
wg.Add(1)
|
||
go func() {
|
||
defer wg.Done()
|
||
t0 := time.Now()
|
||
n, err := importCategory(st, cat, url)
|
||
d := time.Since(t0).Seconds()
|
||
metrics.importerDur.observe(cat, d)
|
||
if err != nil {
|
||
metrics.incImporterErr(cat)
|
||
log.Printf("❌ import %s: %v", cat, err)
|
||
return
|
||
}
|
||
log.Printf("✅ [%s] %d entries", cat, n)
|
||
}()
|
||
time.Sleep(delay)
|
||
}
|
||
wg.Wait()
|
||
srv.rebuildIndex()
|
||
atomic.AddUint64(&metrics.importerCycles, 1)
|
||
atomic.StoreInt64(&metrics.importerLastSuccess, time.Now().Unix())
|
||
log.Printf("Import cycle finished in %s", time.Since(start))
|
||
time.Sleep(importInterval)
|
||
}
|
||
}()
|
||
}
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// HTTP layer
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
type ipIndex struct {
|
||
v4Lens []int // vorhandene Präfixlängen (z. B. 8, 16, 24, 32)
|
||
v6Lens []int
|
||
v4 map[int]map[string][]string // len -> "a.b.c.d/len" -> []categories
|
||
v6 map[int]map[string][]string // len -> "xxxx::/len" -> []categories
|
||
}
|
||
|
||
type server struct {
|
||
st Store
|
||
catalog map[string]string // latest catalog for category list
|
||
mu sync.RWMutex
|
||
idx atomic.Value
|
||
}
|
||
|
||
func (s *server) rebuildIndex() {
|
||
idx := &ipIndex{v4: map[int]map[string][]string{}, v6: map[int]map[string][]string{}}
|
||
lens4 := map[int]struct{}{}
|
||
lens6 := map[int]struct{}{}
|
||
|
||
cats := s.cats()
|
||
for _, c := range cats {
|
||
pfxs, _ := s.st.ListPrefixes(c)
|
||
for _, p := range pfxs {
|
||
m := idx.v6
|
||
lens := lens6
|
||
if p.Addr().Is4() {
|
||
m = idx.v4
|
||
lens = lens4
|
||
}
|
||
l := int(p.Bits())
|
||
if m[l] == nil {
|
||
m[l] = map[string][]string{}
|
||
}
|
||
k := p.String()
|
||
m[l][k] = append(m[l][k], c)
|
||
lens[l] = struct{}{}
|
||
}
|
||
}
|
||
// manuelle Blacklist mit aufnehmen (als Kategorie "manual-blacklist")
|
||
if bl, err := s.st.ListBlacklist(); err == nil {
|
||
for p := range bl {
|
||
m := idx.v6
|
||
lens := lens6
|
||
if p.Addr().Is4() {
|
||
m = idx.v4
|
||
lens = lens4
|
||
}
|
||
l := int(p.Bits())
|
||
if m[l] == nil {
|
||
m[l] = map[string][]string{}
|
||
}
|
||
k := p.String()
|
||
m[l][k] = append(m[l][k], "manual-blacklist")
|
||
lens[l] = struct{}{}
|
||
}
|
||
}
|
||
|
||
// Längen sortiert ablegen (damit Lookup nur vorhandene prüft)
|
||
for l := range lens4 {
|
||
idx.v4Lens = append(idx.v4Lens, l)
|
||
}
|
||
for l := range lens6 {
|
||
idx.v6Lens = append(idx.v6Lens, l)
|
||
}
|
||
sort.Ints(idx.v4Lens)
|
||
sort.Ints(idx.v6Lens)
|
||
|
||
s.idx.Store(idx)
|
||
}
|
||
|
||
func (s *server) cats() []string {
|
||
// prefer catalog keys if present
|
||
s.mu.RLock()
|
||
defer s.mu.RUnlock()
|
||
if s.catalog != nil {
|
||
keys := make([]string, 0, len(s.catalog))
|
||
for k := range s.catalog {
|
||
keys = append(keys, k)
|
||
}
|
||
sort.Strings(keys)
|
||
return keys
|
||
}
|
||
return s.st.ListCats()
|
||
}
|
||
|
||
func clientIPFromHeaders(r *http.Request) (netip.Addr, error) {
|
||
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
|
||
parts := strings.Split(xff, ",")
|
||
s := strings.TrimSpace(parts[0])
|
||
if a, err := netip.ParseAddr(s); err == nil {
|
||
return a.Unmap(), nil
|
||
}
|
||
}
|
||
if xr := r.Header.Get("X-Real-Ip"); xr != "" {
|
||
if a, err := netip.ParseAddr(strings.TrimSpace(xr)); err == nil {
|
||
return a.Unmap(), nil
|
||
}
|
||
}
|
||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||
if err == nil {
|
||
if a, err := netip.ParseAddr(host); err == nil {
|
||
return a.Unmap(), nil
|
||
}
|
||
}
|
||
return netip.Addr{}, errors.New("cannot determine client ip")
|
||
}
|
||
|
||
func (s *server) handleCheck(w http.ResponseWriter, r *http.Request) {
|
||
ipStr := strings.TrimPrefix(r.URL.Path, "/check/")
|
||
ip, err := netip.ParseAddr(ipStr)
|
||
if err != nil {
|
||
http.Error(w, "invalid IP", http.StatusBadRequest)
|
||
return
|
||
}
|
||
matches, _ := s.checkIP(ip, s.cats())
|
||
writeJSON(w, map[string]any{
|
||
"ip": ip.String(),
|
||
"blocked": len(matches) > 0,
|
||
"categories": matches,
|
||
})
|
||
}
|
||
|
||
func (s *server) handleTraefik(w http.ResponseWriter, r *http.Request) {
|
||
ip, err := clientIPFromHeaders(r)
|
||
if err != nil {
|
||
http.Error(w, "invalid IP", http.StatusBadRequest)
|
||
return
|
||
}
|
||
matches, _ := s.checkIP(ip, s.cats())
|
||
if len(matches) > 0 {
|
||
atomic.AddUint64(&metrics.traefikBlocks, 1) // NEW
|
||
errorhtml(w, r)
|
||
return
|
||
}
|
||
w.WriteHeader(http.StatusOK)
|
||
w.Write([]byte("OK"))
|
||
}
|
||
|
||
func errorhtml(w http.ResponseWriter, r *http.Request) {
|
||
html := `<!doctype html>
|
||
<html lang="de">
|
||
<head>
|
||
<meta charset="utf-8" />
|
||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||
<title>Blocked by the First-Line-Of-Defense-Project</title>
|
||
<style>
|
||
:root{
|
||
--bg:#f6f7f9;--text:#1f2937;--muted:#6b7280;--card:#ffffff;
|
||
--success:#22c55e;--danger:#ef4444;--accent:#2563eb;--border:#e5e7eb;
|
||
}
|
||
*{box-sizing:border-box}
|
||
html,body{height:100%}
|
||
body{margin:0;background:var(--bg);color:var(--text);font:16px/1.5 system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,"Helvetica Neue",Arial}
|
||
.wrap{max-width:980px;margin:0 auto;padding:40px 16px 64px}
|
||
header{display:flex;align-items:center;gap:12px;flex-wrap:wrap;}
|
||
h1{font-size:clamp(24px,4vw,38px);font-weight:700;margin:0}
|
||
.pill{display:inline-flex;align-items:center;gap:8px;padding:6px 10px;border-radius:999px;border:1px solid var(--border);background:#fff;font-weight:600;font-size:14px;color:#111}
|
||
.pill small{font-weight:500;color:var(--muted)}
|
||
p.lead{margin:12px 0 24px;color:var(--muted)}
|
||
|
||
.grid{display:grid;grid-template-columns:repeat(auto-fit,minmax(220px,1fr));gap:18px;margin-top:16px}
|
||
.node{background:var(--card);border:1px solid var(--border);border-radius:14px;padding:18px;display:flex;flex-direction:column;align-items:center;gap:10px;box-shadow:0 6px 18px rgba(0,0,0,.04)}
|
||
.icon{position:relative;width:104px;height:80px;display:grid;place-items:center}
|
||
.status{position:absolute;right:-8px;bottom:-8px;width:30px;height:30px;border-radius:999px;display:grid;place-items:center;color:#fff;font-weight:800;font-size:14px}
|
||
.status.ok{background:var(--success)}
|
||
.status.err{background:var(--danger)}
|
||
.node h3{margin:6px 0 0;font-size:16px}
|
||
.node .sub{margin:0;color:var(--muted);font-size:14px}
|
||
.node .state{margin:4px 0 0;font-weight:700}
|
||
.state.ok{color:var(--success)}
|
||
.state.err{color:var(--danger)}
|
||
|
||
.actions{margin-top:28px;display:flex;gap:12px;flex-wrap:wrap}
|
||
.btn{border:1px solid var(--border);background:#fff;font-weight:600;padding:10px 14px;border-radius:10px;cursor:pointer}
|
||
.btn.primary{background:var(--accent);border-color:var(--accent);color:#fff}
|
||
.meta{margin-top:24px;color:var(--muted);font-size:13px}
|
||
footer{margin-top:40px;color:var(--muted);font-size:13px}
|
||
|
||
/* Simple, friendly SVG look */
|
||
svg{display:block}
|
||
.dim{fill:#e5e7eb}
|
||
.stroke{stroke:#9ca3af}
|
||
</style>
|
||
</head>
|
||
<body>
|
||
<div class="wrap">
|
||
<header>
|
||
<h1>You have been blocked by the First-Line-Of-Defense-Project</h1>
|
||
<span class="pill">ERROR 403 <small>Forbidden</small></span>
|
||
</header>
|
||
<p class="lead">
|
||
Your connection attempt to the target server was blocked by the First-Line-Of-Defense-Project. Your IP address is listed on at least one blacklist.
|
||
</p>
|
||
|
||
<section class="grid" aria-label="Diagnostic chain">
|
||
<article class="node" aria-label="Browser Status">
|
||
<div class="icon" aria-hidden="true">
|
||
<svg width="88" height="62" viewBox="0 0 88 62" xmlns="http://www.w3.org/2000/svg" role="img">
|
||
<rect x="1" y="6" width="86" height="55" rx="8" fill="#fff" stroke="#d1d5db"/>
|
||
<rect x="1" y="1" width="86" height="14" rx="8" fill="#f3f4f6" stroke="#d1d5db"/>
|
||
<circle cx="10" cy="8" r="2.5" fill="#ef4444"/>
|
||
<circle cx="18" cy="8" r="2.5" fill="#f59e0b"/>
|
||
<circle cx="26" cy="8" r="2.5" fill="#22c55e"/>
|
||
</svg>
|
||
<div class="status ok" title="Functional">✓</div>
|
||
</div>
|
||
<h3>You</h3>
|
||
<p class="sub">Browser</p>
|
||
<p class="state ok">Functional</p>
|
||
</article>
|
||
|
||
<!-- Edge / Proxy -->
|
||
<article class="node" aria-label="FLODP Status">
|
||
<div class="icon" aria-hidden="true">
|
||
<svg width="96" height="64" viewBox="0 0 96 64" xmlns="http://www.w3.org/2000/svg" role="img">
|
||
<path d="M33 44h32a14 14 0 0 0 0-28 18 18 0 0 0-34-5 16 16 0 0 0-4 31z" fill="#e5e7eb" stroke="#d1d5db"/>
|
||
</svg>
|
||
<div class="status err" title="Blocked">✕</div>
|
||
</div>
|
||
<h3>FLODP-SERVICE</h3>
|
||
<p class="sub">Security-Gateway</p>
|
||
<p class="state err">Blocked your request</p>
|
||
</article>
|
||
|
||
<!-- Host / Origin -->
|
||
<article class="node" aria-label="Origin/Host Status">
|
||
<div class="icon" aria-hidden="true">
|
||
<svg width="88" height="62" viewBox="0 0 88 62" xmlns="http://www.w3.org/2000/svg" role="img">
|
||
<rect x="6" y="10" width="76" height="18" rx="4" fill="#f3f4f6" stroke="#d1d5db"/>
|
||
<circle cx="16" cy="19" r="3" fill="#9ca3af"/>
|
||
<rect x="6" y="34" width="76" height="18" rx="4" fill="#f3f4f6" stroke="#d1d5db"/>
|
||
<circle cx="16" cy="43" r="3" fill="#9ca3af"/>
|
||
</svg>
|
||
<div class="status ok" title="Functional">✓</div>
|
||
</div>
|
||
<h3>Host</h3>
|
||
<p class="sub">Origin-Server</p>
|
||
<p class="state ok">Functional</p>
|
||
</article>
|
||
</section>
|
||
|
||
<div class="actions">
|
||
<button class="btn primary" onclick="location.reload()">Try again</button>
|
||
<button class="btn" onclick="document.getElementById('details').toggleAttribute('open')">Show details</button>
|
||
</div>
|
||
|
||
<details id="details" class="meta">
|
||
<summary><strong>Technical details</strong></summary>
|
||
<ul>
|
||
<li>Error: <strong>403</strong> - Your IP address is listed on at least one blacklist. The service's security system has therefore rejected your connection.</li>
|
||
<li>Time: <span id="now">-</span></li>
|
||
</ul>
|
||
<p>Tips: Check if your system (browser, API, or similar) has a high connection frequency and has been blocked on other systems protected by FLODP.</p>
|
||
</details>
|
||
|
||
<footer>
|
||
<span>If the problem persists, contact the website operator.</span>
|
||
</footer>
|
||
</div>
|
||
|
||
<script>
|
||
(function(){
|
||
const now = new Date()
|
||
document.getElementById('now').textContent = now.toLocaleString()
|
||
})()
|
||
</script>
|
||
</body>
|
||
</html>`
|
||
|
||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||
w.WriteHeader(http.StatusForbidden)
|
||
_, _ = w.Write([]byte(html))
|
||
}
|
||
|
||
func (s *server) handleWhitelist(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodPost {
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
return
|
||
}
|
||
var body struct {
|
||
IP string `json:"ip"`
|
||
} // GUI bleibt kompatibel
|
||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||
http.Error(w, "bad request", http.StatusBadRequest)
|
||
return
|
||
}
|
||
raw := strings.TrimSpace(body.IP)
|
||
|
||
// Fall 1: Einzel-IP
|
||
if addr, err := netip.ParseAddr(raw); err == nil {
|
||
addr = addr.Unmap()
|
||
if err := s.st.AddWhitelist(addr); err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
writeJSON(w, map[string]any{
|
||
"status": "whitelisted_ip",
|
||
"ip": addr.String(),
|
||
})
|
||
return
|
||
}
|
||
|
||
// Fall 2: Prefix (wir erlauben auch "8.8.8.8" → wird zu /32 normalisiert)
|
||
if norm, ok := normalizeLineToPrefix(raw); ok {
|
||
if pfx, err := netip.ParsePrefix(norm); err == nil {
|
||
// wenn du Prefix-Whitelist hast:
|
||
if err := s.st.AddWhitelistPrefix(pfx); err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
writeJSON(w, map[string]any{
|
||
"status": "whitelisted_prefix",
|
||
"prefix": pfx.String(),
|
||
})
|
||
return
|
||
}
|
||
}
|
||
|
||
http.Error(w, "invalid IP or prefix", http.StatusBadRequest)
|
||
}
|
||
|
||
func (s *server) handleDownload(w http.ResponseWriter, r *http.Request) {
|
||
cat := strings.TrimPrefix(r.URL.Path, "/download/")
|
||
if cat == "" {
|
||
http.Error(w, "category missing", http.StatusBadRequest)
|
||
return
|
||
}
|
||
// validate category if catalog is present
|
||
s.mu.RLock()
|
||
_, known := s.catalog[cat]
|
||
s.mu.RUnlock()
|
||
if s.catalog != nil && !known {
|
||
http.Error(w, "unknown category", http.StatusNotFound)
|
||
return
|
||
}
|
||
prefixes, err := s.st.ListPrefixes(cat)
|
||
if err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
v, _ := metrics.downloads.LoadOrStore(cat, new(uint64))
|
||
atomic.AddUint64(v.(*uint64), 1)
|
||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.txt\"", cat))
|
||
for _, p := range prefixes {
|
||
fmt.Fprintln(w, p.String())
|
||
}
|
||
}
|
||
|
||
func (s *server) handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||
var buf bytes.Buffer
|
||
|
||
// Requests & Inflight
|
||
buf.WriteString("# HELP ipcheck_requests_total Total HTTP requests by handler\n")
|
||
buf.WriteString("# TYPE ipcheck_requests_total counter\n")
|
||
metrics.reqTotal.Range(func(k, v any) bool {
|
||
fmt.Fprintf(&buf, "ipcheck_requests_total{handler=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64)))
|
||
return true
|
||
})
|
||
|
||
buf.WriteString("# HELP ipcheck_http_responses_total HTTP responses by handler and code\n")
|
||
buf.WriteString("# TYPE ipcheck_http_responses_total counter\n")
|
||
metrics.respTotal.Range(func(k, v any) bool {
|
||
parts := strings.SplitN(k.(string), "|", 2)
|
||
handler, code := parts[0], parts[1]
|
||
fmt.Fprintf(&buf, "ipcheck_http_responses_total{handler=%q,code=%q} %d\n", handler, code, atomic.LoadUint64(v.(*uint64)))
|
||
return true
|
||
})
|
||
|
||
buf.WriteString("# HELP ipcheck_requests_inflight Inflight HTTP requests\n")
|
||
buf.WriteString("# TYPE ipcheck_requests_inflight gauge\n")
|
||
fmt.Fprintf(&buf, "ipcheck_requests_inflight %d\n", atomic.LoadUint64(&metrics.inflight))
|
||
|
||
// Latenz-Histogramme
|
||
buf.WriteString("# HELP ipcheck_request_duration_seconds Request duration seconds\n")
|
||
buf.WriteString("# TYPE ipcheck_request_duration_seconds histogram\n")
|
||
metrics.lat.mu.Lock()
|
||
for h, counts := range metrics.lat.counts {
|
||
cum := uint64(0)
|
||
for i, le := range metrics.lat.bounds {
|
||
cum += counts[i]
|
||
fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_bucket{handler=%q,le=%q} %d\n", h, fmt.Sprintf("%.3f", le), cum)
|
||
}
|
||
// +Inf bucket: gesamter count
|
||
total := metrics.lat.n[h]
|
||
fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_bucket{handler=%q,le=\"+Inf\"} %d\n", h, total)
|
||
fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_sum{handler=%q} %.6f\n", h, metrics.lat.sum[h])
|
||
fmt.Fprintf(&buf, "ipcheck_request_duration_seconds_count{handler=%q} %d\n", h, total)
|
||
}
|
||
metrics.lat.mu.Unlock()
|
||
|
||
// Importer
|
||
buf.WriteString("# HELP ipcheck_import_cycles_total Completed import cycles\n")
|
||
buf.WriteString("# TYPE ipcheck_import_cycles_total counter\n")
|
||
fmt.Fprintf(&buf, "ipcheck_import_cycles_total %d\n", atomic.LoadUint64(&metrics.importerCycles))
|
||
|
||
buf.WriteString("# HELP ipcheck_import_last_success_timestamp_seconds Last successful import Unix time\n")
|
||
buf.WriteString("# TYPE ipcheck_import_last_success_timestamp_seconds gauge\n")
|
||
fmt.Fprintf(&buf, "ipcheck_import_last_success_timestamp_seconds %d\n", atomic.LoadInt64(&metrics.importerLastSuccess))
|
||
|
||
buf.WriteString("# HELP ipcheck_import_errors_total Import errors by category\n")
|
||
buf.WriteString("# TYPE ipcheck_import_errors_total counter\n")
|
||
metrics.importerErrors.Range(func(k, v any) bool {
|
||
fmt.Fprintf(&buf, "ipcheck_import_errors_total{category=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64)))
|
||
return true
|
||
})
|
||
|
||
buf.WriteString("# HELP ipcheck_import_duration_seconds Import durations by category\n")
|
||
buf.WriteString("# TYPE ipcheck_import_duration_seconds histogram\n")
|
||
metrics.importerDur.mu.Lock()
|
||
for cat, counts := range metrics.importerDur.counts {
|
||
cum := uint64(0)
|
||
for i, le := range metrics.importerDur.bounds {
|
||
cum += counts[i]
|
||
fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_bucket{category=%q,le=%q} %d\n", cat, fmt.Sprintf("%.2f", le), cum)
|
||
}
|
||
total := metrics.importerDur.n[cat]
|
||
fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_bucket{category=%q,le=\"+Inf\"} %d\n", cat, total)
|
||
fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_sum{category=%q} %.6f\n", cat, metrics.importerDur.sum[cat])
|
||
fmt.Fprintf(&buf, "ipcheck_import_duration_seconds_count{category=%q} %d\n", cat, total)
|
||
}
|
||
metrics.importerDur.mu.Unlock()
|
||
|
||
// Blocklist-Größen je Kategorie (bestehend)
|
||
for _, c := range s.cats() {
|
||
n, _ := s.st.Count(c)
|
||
fmt.Fprintf(&buf, "ipcheck_blocklist_hash_size{category=%q} %d\n", c, n)
|
||
}
|
||
|
||
// Katalog-Anzahl Kategorien
|
||
buf.WriteString("# HELP ipcheck_catalog_categories_total Number of categories in catalog\n")
|
||
buf.WriteString("# TYPE ipcheck_catalog_categories_total gauge\n")
|
||
fmt.Fprintf(&buf, "ipcheck_catalog_categories_total %d\n", len(s.cats()))
|
||
|
||
// Honeypot: bestehende Port-Hits + neue Unique-IPs
|
||
hits := s.st.ListManual()
|
||
portCount := map[string]int{}
|
||
for _, ports := range hits {
|
||
for _, p := range ports {
|
||
portCount[p]++
|
||
}
|
||
}
|
||
buf.WriteString("# TYPE honeypot_port_hits counter\n")
|
||
for pp, n := range portCount {
|
||
parts := strings.Split(pp, "/")
|
||
if len(parts) == 2 {
|
||
fmt.Fprintf(&buf, "honeypot_port_hits{port=%q,protocol=%q} %d\n", parts[0], parts[1], n)
|
||
}
|
||
}
|
||
if uniq, err := s.st.ManualUniqueIPs(); err == nil {
|
||
buf.WriteString("# HELP honeypot_unique_ips Total unique source IPs seen by honeypot\n")
|
||
buf.WriteString("# TYPE honeypot_unique_ips gauge\n")
|
||
fmt.Fprintf(&buf, "honeypot_unique_ips %d\n", uniq)
|
||
}
|
||
|
||
// Whitelist Größe
|
||
if wl, err := s.st.WhitelistCount(); err == nil {
|
||
buf.WriteString("# HELP ipcheck_whitelist_total Whitelisted IPs\n")
|
||
buf.WriteString("# TYPE ipcheck_whitelist_total gauge\n")
|
||
fmt.Fprintf(&buf, "ipcheck_whitelist_total %d\n", wl)
|
||
}
|
||
|
||
// Traefik Blocks
|
||
buf.WriteString("# HELP ipcheck_traefik_blocks_total Traefik blocks due to matches\n")
|
||
buf.WriteString("# TYPE ipcheck_traefik_blocks_total counter\n")
|
||
fmt.Fprintf(&buf, "ipcheck_traefik_blocks_total %d\n", atomic.LoadUint64(&metrics.traefikBlocks))
|
||
|
||
// Downloads pro Kategorie
|
||
buf.WriteString("# HELP ipcheck_downloads_total Downloads served by category\n")
|
||
buf.WriteString("# TYPE ipcheck_downloads_total counter\n")
|
||
metrics.downloads.Range(func(k, v any) bool {
|
||
fmt.Fprintf(&buf, "ipcheck_downloads_total{category=%q} %d\n", k.(string), atomic.LoadUint64(v.(*uint64)))
|
||
return true
|
||
})
|
||
|
||
if n, err := s.st.CountBlacklist(); err == nil {
|
||
fmt.Fprintf(&buf, "ipcheck_manual_blacklist_size %d\n", n)
|
||
}
|
||
|
||
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
|
||
w.Write(buf.Bytes())
|
||
}
|
||
|
||
func (s *server) handleGUI(w http.ResponseWriter, r *http.Request) {
|
||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||
w.Write([]byte(`<!doctype html>
|
||
<html><head><meta charset="utf-8"><title>Blocklist GUI</title>
|
||
<style>body{font-family:sans-serif;max-width:900px;margin:auto;padding:2rem;background:#f9fafb}
|
||
input,button{padding:.6rem;margin:.3rem 0;width:100%;border-radius:.4rem;border:1px solid #ccc;box-sizing:border-box}
|
||
button{background:#2563eb;color:#fff;border:none;cursor:pointer}button:hover{background:#1d4ed8}
|
||
pre{background:#fff;padding:1rem;border:1px solid #ddd;border-radius:.4rem;white-space:pre-wrap}
|
||
</style></head>
|
||
<body>
|
||
<h1>IP Checker + Whitelist</h1>
|
||
<input id="ip" placeholder="Enter IP..."><button onclick="checkIP()">Check</button>
|
||
<button onclick="wl()">Whitelist</button>
|
||
<h2>Ergebnis</h2><pre id="res">No Request</pre>
|
||
<h2>Blacklist</h2>
|
||
<input id="bl_prefix" placeholder="Prefix oder IP, z.B. 203.0.113.0/24 oder 2001:db8::/32">
|
||
<input id="bl_reason" placeholder="Reason (optional)">
|
||
<button onclick="blAdd()">Add to Blacklist</button>
|
||
<button onclick="blList()">List Blacklist</button>
|
||
<pre id="bl_out">-</pre>
|
||
<script>
|
||
async function blAdd(){
|
||
const prefix=document.getElementById('bl_prefix').value.trim();
|
||
const reason=document.getElementById('bl_reason').value.trim();
|
||
const r=await fetch('/blacklist',{method:'POST',headers:{'content-type':'application/json'},body:JSON.stringify({prefix,reason})});
|
||
document.getElementById('bl_out').textContent=await r.text();
|
||
}
|
||
async function blList(){
|
||
const r=await fetch('/blacklist',{method:'GET'});
|
||
document.getElementById('bl_out').textContent=await r.text();
|
||
}
|
||
</script>
|
||
<script>
|
||
async function checkIP(){
|
||
const ip=document.getElementById('ip').value.trim();
|
||
const r=await fetch('/check/'+encodeURIComponent(ip));
|
||
document.getElementById('res').textContent=await r.text();
|
||
}
|
||
async function wl(){
|
||
const ip=document.getElementById('ip').value.trim();
|
||
const r=await fetch('/whitelist',{method:'POST',headers:{'content-type':'application/json'},body:JSON.stringify({ip})});
|
||
document.getElementById('res').textContent=await r.text();
|
||
}
|
||
</script>
|
||
</body></html>`))
|
||
}
|
||
|
||
func writeJSON(w http.ResponseWriter, v any) {
|
||
w.Header().Set("Content-Type", "application/json")
|
||
_ = json.NewEncoder(w).Encode(v)
|
||
}
|
||
|
||
func (s *server) handleBlacklistRoot(w http.ResponseWriter, r *http.Request) {
|
||
switch r.Method {
|
||
case http.MethodGet:
|
||
s.handleBlacklistList(w, r)
|
||
case http.MethodPost:
|
||
s.handleBlacklistAdd(w, r)
|
||
default:
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
}
|
||
}
|
||
|
||
func (s *server) handleBlacklistAdd(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodPost {
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
return
|
||
}
|
||
var body struct {
|
||
Prefix string `json:"prefix"` // "1.2.3.4" oder "1.2.3.0/24" etc.
|
||
Reason string `json:"reason"`
|
||
}
|
||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||
http.Error(w, "bad request", http.StatusBadRequest)
|
||
return
|
||
}
|
||
norm, ok := normalizeLineToPrefix(strings.TrimSpace(body.Prefix))
|
||
if !ok {
|
||
http.Error(w, "invalid prefix/ip", http.StatusBadRequest)
|
||
return
|
||
}
|
||
pfx, _ := netip.ParsePrefix(norm)
|
||
if err := s.st.AddBlacklist(pfx, body.Reason); err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
writeJSON(w, map[string]any{"status": "blacklisted", "prefix": pfx.String(), "reason": strings.TrimSpace(body.Reason)})
|
||
}
|
||
|
||
func (s *server) handleBlacklistDel(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodDelete {
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
return
|
||
}
|
||
raw := strings.TrimPrefix(r.URL.Path, "/blacklist/")
|
||
norm, ok := normalizeLineToPrefix(strings.TrimSpace(raw))
|
||
if !ok {
|
||
http.Error(w, "invalid prefix/ip", http.StatusBadRequest)
|
||
return
|
||
}
|
||
pfx, _ := netip.ParsePrefix(norm)
|
||
if err := s.st.DelBlacklist(pfx); err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
writeJSON(w, map[string]any{"status": "removed", "prefix": pfx.String()})
|
||
}
|
||
|
||
func (s *server) handleBlacklistList(w http.ResponseWriter, r *http.Request) {
|
||
if r.Method != http.MethodGet {
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
return
|
||
}
|
||
m, err := s.st.ListBlacklist()
|
||
if err != nil {
|
||
http.Error(w, "store error", http.StatusInternalServerError)
|
||
return
|
||
}
|
||
type entry struct{ Prefix, Reason string }
|
||
var list []entry
|
||
for p, reason := range m {
|
||
list = append(list, entry{Prefix: p.String(), Reason: reason})
|
||
}
|
||
sort.Slice(list, func(i, j int) bool { return list[i].Prefix < list[j].Prefix })
|
||
writeJSON(w, map[string]any{"entries": list})
|
||
}
|
||
|
||
func (s *server) checkIP(ip netip.Addr, _ []string) ([]string, error) {
|
||
if ok, _ := s.st.IsWhitelisted(ip); ok {
|
||
return nil, nil
|
||
}
|
||
|
||
idx := s.idx.Load().(*ipIndex)
|
||
var lens []int
|
||
var m map[int]map[string][]string
|
||
if ip.Is4() {
|
||
lens, m = idx.v4Lens, idx.v4
|
||
} else {
|
||
lens, m = idx.v6Lens, idx.v6
|
||
}
|
||
|
||
seen := map[string]struct{}{}
|
||
var matches []string
|
||
for _, l := range lens {
|
||
p := netip.PrefixFrom(ip, l).String()
|
||
if cats, ok := m[l][p]; ok {
|
||
for _, c := range cats {
|
||
if _, dup := seen[c]; !dup {
|
||
seen[c] = struct{}{}
|
||
matches = append(matches, c)
|
||
}
|
||
}
|
||
}
|
||
}
|
||
return matches, nil
|
||
}
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// Honeypot (optional)
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
func startTCPListener(st Store, network, port string) {
|
||
ln, err := net.Listen(network, ":"+port)
|
||
if err != nil {
|
||
log.Printf("❌ could not listen on %s/%s: %v", network, port, err)
|
||
return
|
||
}
|
||
log.Printf("🚀 TCP Honeypot on %s/%s", network, port)
|
||
for {
|
||
conn, err := ln.Accept()
|
||
if err != nil {
|
||
log.Printf("accept err: %v", err)
|
||
continue
|
||
}
|
||
ip, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
|
||
_ = st.AddManual(ip, port, "TCP")
|
||
conn.Close()
|
||
}
|
||
}
|
||
|
||
func startUDPListener(st Store, network, port string) {
|
||
addr := net.UDPAddr{Port: atoi(port)}
|
||
conn, err := net.ListenUDP(network, &addr)
|
||
if err != nil {
|
||
log.Printf("❌ could not listen on %s/%s: %v", network, port, err)
|
||
return
|
||
}
|
||
log.Printf("🚀 UDP Honeypot on %s/%s", network, port)
|
||
buf := make([]byte, 1024)
|
||
for {
|
||
n, ra, err := conn.ReadFromUDP(buf)
|
||
if err != nil {
|
||
log.Printf("udp read err: %v", err)
|
||
continue
|
||
}
|
||
if n > 0 {
|
||
_ = st.AddManual(ra.IP.String(), port, "UDP")
|
||
}
|
||
}
|
||
}
|
||
|
||
func atoi(s string) int {
|
||
n, _ := strconvAtoiSafe(s)
|
||
return n
|
||
}
|
||
func strconvAtoiSafe(s string) (int, error) {
|
||
var n int
|
||
for _, r := range s {
|
||
if r < '0' || r > '9' {
|
||
return 0, fmt.Errorf("bad int %q", s)
|
||
}
|
||
n = n*10 + int(r-'0')
|
||
}
|
||
return n, nil
|
||
}
|
||
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
// main
|
||
// ──────────────────────────────────────────────────────────────────────────────
|
||
|
||
func main() {
|
||
// choose store
|
||
var st Store
|
||
switch strings.ToLower(storeKind) {
|
||
case "redis":
|
||
rs, err := newRedisStore()
|
||
if err != nil {
|
||
log.Fatalf("redis: %v", err)
|
||
}
|
||
st = rs
|
||
default:
|
||
st = newMemStore()
|
||
}
|
||
|
||
// fetch catalog and start importer
|
||
cat, err := loadCatalog()
|
||
if err != nil {
|
||
log.Printf("catalog error: %v (using fallback FLOD only)", err)
|
||
cat = map[string]string{
|
||
"flod-official": "https://git.send.nrw/sendnrw/flod-lists/src/branch/main/lists.json",
|
||
}
|
||
}
|
||
srv := &server{st: st, catalog: cat}
|
||
startImporter(st, cat, srv)
|
||
|
||
// honeypot listeners
|
||
if enableHoneypot {
|
||
for _, p := range honeyTCP {
|
||
go startTCPListener(st, "tcp4", p)
|
||
go startTCPListener(st, "tcp6", p)
|
||
}
|
||
for _, p := range honeyUDP {
|
||
go startUDPListener(st, "udp4", p)
|
||
go startUDPListener(st, "udp6", p)
|
||
}
|
||
}
|
||
|
||
// routes
|
||
http.HandleFunc("/", srv.withMetrics(srv.handleGUI, "gui"))
|
||
http.HandleFunc("/check/", srv.withMetrics(srv.handleCheck, "check"))
|
||
http.HandleFunc("/traefik", srv.withMetrics(srv.handleTraefik, "traefik"))
|
||
http.HandleFunc("/whitelist", srv.withMetrics(srv.handleWhitelist, "whitelist"))
|
||
http.HandleFunc("/download/", srv.withMetrics(srv.handleDownload, "download"))
|
||
http.HandleFunc("/metrics", srv.handleMetrics) // selbst enthält keine eigene Messung
|
||
//http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistAdd, "blacklist_add")) // POST
|
||
/*http.HandleFunc("/blacklist/", func(w http.ResponseWriter, r *http.Request) {
|
||
switch r.Method {
|
||
case http.MethodDelete:
|
||
srv.withMetrics(srv.handleBlacklistDel, "blacklist_del")(w, r)
|
||
case http.MethodGet:
|
||
// erlauben: GET /blacklist (Liste)
|
||
if r.URL.Path == "/blacklist/" || r.URL.Path == "/blacklist" {
|
||
srv.withMetrics(srv.handleBlacklistList, "blacklist_list")(w, r)
|
||
return
|
||
}
|
||
http.NotFound(w, r)
|
||
default:
|
||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||
}
|
||
})*/
|
||
|
||
http.HandleFunc("/blacklist", srv.withMetrics(srv.handleBlacklistRoot, "blacklist_root")) // GET & POST
|
||
http.HandleFunc("/blacklist/", srv.withMetrics(func(w http.ResponseWriter, r *http.Request) {
|
||
if r.URL.Path == "/blacklist/" && r.Method == http.MethodGet {
|
||
srv.handleBlacklistList(w, r)
|
||
return
|
||
}
|
||
if r.Method == http.MethodDelete {
|
||
srv.handleBlacklistDel(w, r)
|
||
return
|
||
}
|
||
http.Error(w, "not found", http.StatusNotFound)
|
||
}, "blacklist_misc"))
|
||
|
||
log.Printf("listening on %s (store=%s, honeypot=%v)", listenAddr, storeKind, enableHoneypot)
|
||
if err := http.ListenAndServe(listenAddr, nil); err != nil {
|
||
log.Fatal(err)
|
||
}
|
||
}
|