package main import ( "bufio" "context" "encoding/json" "fmt" "io" "log" "net" "net/http" "net/netip" "os" "path/filepath" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/redis/go-redis/v9" ) // Redis + Context var ctx = context.Background() var rdb = redis.NewClient(&redis.Options{ Addr: "flodredis:6379", Password: os.Getenv("REDIS_PASS"), }) // ────────────────────────────────────────────────────────────────────────────── // Helpers // ────────────────────────────────────────────────────────────────────────────── // ExportListJSON schreibt die Map als prettified JSON‑Datei. func ExportListJSON(path string, m map[string]string) error { f, err := os.Create(path) if err != nil { return err } defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") return enc.Encode(m) } // ImportListJSON liest eine JSON‑Datei und gibt map[string]string zurück. func ImportListJSON(path string) (map[string]string, error) { f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() var m map[string]string if err := json.NewDecoder(f).Decode(&m); err != nil { return nil, err } return m, nil } // URLs der Blocklisten var blocklistURLs = map[string]string{ "bitwire": "https://raw.githubusercontent.com/bitwire-it/ipblocklist/refs/heads/main/ip-list.txt", } // Präfix-Cache type prefixCacheEntry struct { prefixes []netip.Prefix expireAt time.Time } var ( prefixCache = map[string]prefixCacheEntry{} prefixCacheMu sync.RWMutex ) // Prometheus Metriken var ( checkRequests = prometheus.NewCounter(prometheus.CounterOpts{ Name: "ipcheck_requests_total", Help: "Total IP check requests", }) checkBlocked = prometheus.NewCounter(prometheus.CounterOpts{ Name: "ipcheck_blocked_total", Help: "Total blocked IPs", }) checkWhitelist = prometheus.NewCounter(prometheus.CounterOpts{ Name: "ipcheck_whitelisted_total", Help: "Total whitelisted IPs", }) blocklistHashSizes = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "ipcheck_blocklist_hash_size", Help: "Number of entries in each category", }, []string{"category"}, ) ) func init() { prometheus.MustRegister(checkRequests, checkBlocked, checkWhitelist, blocklistHashSizes) } // Main func main() { // Import Blocklisten if err := importBlocklists(); err != nil { fmt.Println("Blocklisten-Import FEHLGESCHLAGEN:", err) return } // Server http.HandleFunc("/", handleGUI) http.HandleFunc("/download/", handleDownload) http.HandleFunc("/whitelist", handleWhitelist) http.HandleFunc("/check/", handleCheck) http.HandleFunc("/traefik", handleTraefik) http.Handle("/metrics", promhttp.Handler()) go func() { ticker := time.NewTicker(10 * time.Second) defer ticker.Stop() for { updateBlocklistMetrics() <-ticker.C } }() fmt.Println("Server läuft auf :8080") http.ListenAndServe(":8080", nil) } func updateBlocklistMetrics() { for cat := range blocklistURLs { key := "bl:" + cat count, err := rdb.HLen(ctx, key).Result() if err != nil { fmt.Printf("❌ Redis HLen Error for %s: %v\n", key, err) continue } blocklistHashSizes.WithLabelValues(cat).Set(float64(count)) } } type target struct { Name, URL string } func fetchAndSave(client *http.Client, t target, outDir string) error { fileName := filepath.Base(t.URL) if fileName == "" { fileName = strings.ReplaceAll(strings.ToLower(strings.ReplaceAll(t.Name, " ", "_")), "..", "") } dst := filepath.Join(outDir, fileName) log.Printf("Downloading %-40s → %s", t.Name, dst) resp, err := client.Get(t.URL) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("bad HTTP status: %s", resp.Status) } tmp := dst + ".part" f, err := os.Create(tmp) if err != nil { return err } if _, err := io.Copy(f, resp.Body); err != nil { f.Close() os.Remove(tmp) return err } f.Close() return os.Rename(tmp, dst) } // Import-Logik func importBlocklists() error { client := &http.Client{Timeout: 60 * time.Second} t := target{Name: "Catalog", URL: os.Getenv("FLOD_IMPORT_URL") + "/lists.json"} if err := os.MkdirAll("./lists/", 0o755); err != nil { fmt.Println("creating output dir", err) } if err := fetchAndSave(client, t, "./lists/"); err != nil { log.Printf("ERROR %s → %v", t.URL, err) } blocklistURLs, _ = ImportListJSON("./lists/lists.json") var wg sync.WaitGroup errCh := make(chan error, len(blocklistURLs)) for cat, url := range blocklistURLs { wg.Add(1) go func(c, u string) { defer wg.Done() if err := importCategory(c, u); err != nil { errCh <- fmt.Errorf("%s: %v", c, err) } }(cat, url) } wg.Wait() close(errCh) for err := range errCh { fmt.Println("❌", err) } if len(errCh) > 0 { return fmt.Errorf("Blocklisten-Import teilweise fehlgeschlagen") } fmt.Println("✅ Blocklisten-Import abgeschlossen") fmt.Println(blocklistURLs) blocklistURLs["flodpod"] = "null" return nil } func importCategory(cat, url string) error { fmt.Printf("⬇️ Lade %s (%s)\n", cat, url) resp, err := http.Get(url) if err != nil { return fmt.Errorf("HTTP-Fehler: %v", err) } defer resp.Body.Close() if resp.StatusCode != 200 { return fmt.Errorf("HTTP %d", resp.StatusCode) } scanner := bufio.NewScanner(resp.Body) pipe := rdb.Pipeline() count, batchCount := 0, 0 const batchSize = 500 for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) if line == "" || strings.HasPrefix(line, "#") { continue } prefix, valid := normalizePrefix(line) if !valid { fmt.Printf("⚠️ Ungültig %s: %s\n", cat, line) continue } pipe.HSet(ctx, "bl:"+cat, prefix, 1) count++ batchCount++ if batchCount >= batchSize { if _, err := pipe.Exec(ctx); err != nil { return fmt.Errorf("Redis-Fehler: %v", err) } batchCount = 0 } if count%1000 == 0 { fmt.Printf("📈 [%s] %d Einträge\n", cat, count) } } if err := scanner.Err(); err != nil { return fmt.Errorf("lesefehler: %v", err) } if batchCount > 0 { if _, err := pipe.Exec(ctx); err != nil { return fmt.Errorf("Redis-Fehler final: %v", err) } } fmt.Printf("✅ [%s] %d Einträge importiert\n", cat, count) return nil } func normalizePrefix(s string) (string, bool) { if !strings.Contains(s, "/") { ip := net.ParseIP(s) if ip == nil { return "", false } if ip.To4() != nil { s += "/32" } else { s += "/128" } } s = strings.TrimSpace(s) _, err := netip.ParsePrefix(s) return s, err == nil } func handleWhitelist(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } var body struct { IP string `json:"ip"` } if err := json.NewDecoder(r.Body).Decode(&body); err != nil { http.Error(w, "bad request", http.StatusBadRequest) return } addr, err := netip.ParseAddr(body.IP) if err != nil { http.Error(w, "invalid IP", http.StatusBadRequest) return } key := "wl:" + addr.String() if err := rdb.Set(ctx, key, "1", 0).Err(); err != nil { http.Error(w, "redis error", http.StatusInternalServerError) return } // Optional: Cache leeren für die IP prefixCacheMu.Lock() defer prefixCacheMu.Unlock() // Kein spezifischer IP-Cache in deinem Design, aber hier könnte man Cache invalidieren falls nötig writeJSON(w, map[string]string{ "status": "whitelisted", "ip": addr.String(), }) } // Check-Handler func handleCheck(w http.ResponseWriter, r *http.Request) { checkRequests.Inc() ipStr := strings.TrimPrefix(r.URL.Path, "/check/") ip, err := netip.ParseAddr(ipStr) if err != nil { http.Error(w, "invalid IP", http.StatusBadRequest) return } var cats []string for a := range blocklistURLs { cats = append(cats, a) } //cats := []string{"firehol", "bitwire", "RU", "CN"} matches, err := checkIP(ip, cats) if err != nil { http.Error(w, "server error", http.StatusInternalServerError) return } if len(matches) > 0 { checkBlocked.Inc() } else { wl, _ := rdb.Exists(ctx, "wl:"+ip.String()).Result() if wl > 0 { checkWhitelist.Inc() } } writeJSON(w, map[string]any{ "ip": ip.String(), "blocked": len(matches) > 0, "categories": matches, }) } // Check-Handler func handleTraefik(w http.ResponseWriter, r *http.Request) { checkRequests.Inc() ipStr := r.Header.Get("X-Forwarded-For") if ipStr == "" { ipStr = r.RemoteAddr } ip, err := netip.ParseAddr(ipStr) if err != nil { http.Error(w, "invalid IP", http.StatusBadRequest) return } var cats []string for a := range blocklistURLs { cats = append(cats, a) } //cats := []string{"firehol", "bitwire", "RU", "CN"} matches, err := checkIP(ip, cats) if err != nil { http.Error(w, "server error", http.StatusInternalServerError) return } if len(matches) > 0 { checkBlocked.Inc() } else { wl, _ := rdb.Exists(ctx, "wl:"+ip.String()).Result() if wl > 0 { checkWhitelist.Inc() } } if len(matches) > 0 { http.Error(w, "blocked", http.StatusForbidden) return } w.WriteHeader(http.StatusOK) } // Check-Logik func checkIP(ip netip.Addr, cats []string) ([]string, error) { wl, err := rdb.Exists(ctx, "wl:"+ip.String()).Result() if err != nil { return nil, err } if wl > 0 { return []string{}, nil } matches := []string{} for _, cat := range cats { prefixes, err := loadCategoryPrefixes(cat) if err != nil { return nil, err } for _, pfx := range prefixes { if pfx.Contains(ip) { fmt.Printf("💡 MATCH: %s in %s (%s)\n", ip, cat, pfx) matches = append(matches, cat) break } } } return matches, nil } func loadCategoryPrefixes(cat string) ([]netip.Prefix, error) { prefixCacheMu.Lock() defer prefixCacheMu.Unlock() entry, ok := prefixCache[cat] if ok && time.Now().Before(entry.expireAt) { return entry.prefixes, nil } keys, err := rdb.HKeys(ctx, "bl:"+cat).Result() if err != nil { return nil, err } var prefixes []netip.Prefix for _, k := range keys { k = strings.TrimSpace(k) pfx, err := netip.ParsePrefix(k) if err == nil { prefixes = append(prefixes, pfx) } else { fmt.Printf("⚠️ Ungültiger Redis-Prefix %s: %s\n", cat, k) } } prefixCache[cat] = prefixCacheEntry{ prefixes: prefixes, expireAt: time.Now().Add(10 * time.Minute), //Hier geändert von 1 * time.Second } return prefixes, nil } // JSON-Helfer func writeJSON(w http.ResponseWriter, v any) { w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(v) } func handleDownload(w http.ResponseWriter, r *http.Request) { cat := strings.TrimPrefix(r.URL.Path, "/download/") if cat == "" { http.Error(w, "category missing", http.StatusBadRequest) return } // Prüfen, ob Kategorie existiert if _, ok := blocklistURLs[cat]; !ok { http.Error(w, "unknown category", http.StatusNotFound) return } // Alle Einträge holen keys, err := rdb.HKeys(ctx, "bl:"+cat).Result() if err != nil { http.Error(w, "redis error", http.StatusInternalServerError) return } // Header für Download setzen w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.txt\"", cat)) // Zeilenweise schreiben for _, k := range keys { _, _ = fmt.Fprintln(w, k) } } func handleGUI(w http.ResponseWriter, r *http.Request) { html := `