Files
flod/main.go
jbergner 485a3b1034
All checks were successful
release-tag / release-image (push) Successful in 1m37s
RC2
2025-06-14 11:33:12 +02:00

268 lines
5.9 KiB
Go

package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/netip"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/redis/go-redis/v9"
)
// Redis + Context
var ctx = context.Background()
var rdb = redis.NewClient(&redis.Options{
Addr: "10.10.5.249:6379",
})
// URLs der Blocklisten
var blocklistURLs = map[string]string{
"firehol": "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/firehol_level1.netset",
"bitwire": "https://raw.githubusercontent.com/bitwire-it/ipblocklist/refs/heads/main/ip-list.txt",
"RU": "https://ipv64.net/blocklists/countries/ipv64_blocklist_RU.txt",
"CN": "https://ipv64.net/blocklists/countries/ipv64_blocklist_CN.txt",
}
// Präfix-Cache
type prefixCacheEntry struct {
prefixes []netip.Prefix
expireAt time.Time
}
var (
prefixCache = map[string]prefixCacheEntry{}
prefixCacheMu sync.Mutex
)
// Prometheus Metriken
var (
checkRequests = prometheus.NewCounter(prometheus.CounterOpts{
Name: "ipcheck_requests_total",
Help: "Total IP check requests",
})
checkBlocked = prometheus.NewCounter(prometheus.CounterOpts{
Name: "ipcheck_blocked_total",
Help: "Total blocked IPs",
})
checkWhitelist = prometheus.NewCounter(prometheus.CounterOpts{
Name: "ipcheck_whitelisted_total",
Help: "Total whitelisted IPs",
})
)
func init() {
prometheus.MustRegister(checkRequests, checkBlocked, checkWhitelist)
}
// Main
func main() {
// Import Blocklisten
if err := importBlocklists(); err != nil {
fmt.Println("Blocklisten-Import FEHLGESCHLAGEN:", err)
return
}
// Server
http.HandleFunc("/check/", handleCheck)
http.Handle("/metrics", promhttp.Handler())
fmt.Println("Server läuft auf :8080")
http.ListenAndServe(":8080", nil)
}
// Import-Logik
func importBlocklists() error {
var wg sync.WaitGroup
errCh := make(chan error, len(blocklistURLs))
for cat, url := range blocklistURLs {
wg.Add(1)
go func(c, u string) {
defer wg.Done()
if err := importCategory(c, u); err != nil {
errCh <- fmt.Errorf("%s: %v", c, err)
}
}(cat, url)
}
wg.Wait()
close(errCh)
for err := range errCh {
fmt.Println("❌", err)
}
if len(errCh) > 0 {
return fmt.Errorf("Blocklisten-Import teilweise fehlgeschlagen")
}
fmt.Println("✅ Blocklisten-Import abgeschlossen")
return nil
}
func importCategory(cat, url string) error {
fmt.Printf("⬇️ Lade %s (%s)\n", cat, url)
resp, err := http.Get(url)
if err != nil {
return fmt.Errorf("HTTP-Fehler: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("HTTP %d", resp.StatusCode)
}
scanner := bufio.NewScanner(resp.Body)
pipe := rdb.Pipeline()
count, batchCount := 0, 0
const batchSize = 500
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
prefix, valid := normalizePrefix(line)
if !valid {
fmt.Printf("⚠️ Ungültig %s: %s\n", cat, line)
continue
}
pipe.HSet(ctx, "bl:"+cat, prefix, 1)
count++
batchCount++
if batchCount >= batchSize {
if _, err := pipe.Exec(ctx); err != nil {
return fmt.Errorf("Redis-Fehler: %v", err)
}
batchCount = 0
}
if count%1000 == 0 {
fmt.Printf("📈 [%s] %d Einträge\n", cat, count)
}
}
if err := scanner.Err(); err != nil {
return fmt.Errorf("Lesefehler: %v", err)
}
if batchCount > 0 {
if _, err := pipe.Exec(ctx); err != nil {
return fmt.Errorf("Redis-Fehler final: %v", err)
}
}
fmt.Printf("✅ [%s] %d Einträge importiert\n", cat, count)
return nil
}
func normalizePrefix(s string) (string, bool) {
if !strings.Contains(s, "/") {
ip := net.ParseIP(s)
if ip == nil {
return "", false
}
if ip.To4() != nil {
s += "/32"
} else {
s += "/128"
}
}
s = strings.TrimSpace(s)
_, err := netip.ParsePrefix(s)
return s, err == nil
}
// Check-Handler
func handleCheck(w http.ResponseWriter, r *http.Request) {
checkRequests.Inc()
ipStr := strings.TrimPrefix(r.URL.Path, "/check/")
ip, err := netip.ParseAddr(ipStr)
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
return
}
cats := []string{"firehol", "bitwire", "RU", "CN"}
matches, err := checkIP(ip, cats)
if err != nil {
http.Error(w, "server error", http.StatusInternalServerError)
return
}
if len(matches) > 0 {
checkBlocked.Inc()
} else {
wl, _ := rdb.Exists(ctx, "wl:"+ip.String()).Result()
if wl > 0 {
checkWhitelist.Inc()
}
}
writeJSON(w, map[string]any{
"ip": ip.String(),
"blocked": len(matches) > 0,
"categories": matches,
})
}
// Check-Logik
func checkIP(ip netip.Addr, cats []string) ([]string, error) {
wl, err := rdb.Exists(ctx, "wl:"+ip.String()).Result()
if err != nil {
return nil, err
}
if wl > 0 {
return []string{}, nil
}
matches := []string{}
for _, cat := range cats {
prefixes, err := loadCategoryPrefixes(cat)
if err != nil {
return nil, err
}
for _, pfx := range prefixes {
if pfx.Contains(ip) {
fmt.Printf("💡 MATCH: %s in %s (%s)\n", ip, cat, pfx)
matches = append(matches, cat)
break
}
}
}
return matches, nil
}
func loadCategoryPrefixes(cat string) ([]netip.Prefix, error) {
prefixCacheMu.Lock()
defer prefixCacheMu.Unlock()
entry, ok := prefixCache[cat]
if ok && time.Now().Before(entry.expireAt) {
return entry.prefixes, nil
}
keys, err := rdb.HKeys(ctx, "bl:"+cat).Result()
if err != nil {
return nil, err
}
var prefixes []netip.Prefix
for _, k := range keys {
k = strings.TrimSpace(k)
pfx, err := netip.ParsePrefix(k)
if err == nil {
prefixes = append(prefixes, pfx)
} else {
fmt.Printf("⚠️ Ungültiger Redis-Prefix %s: %s\n", cat, k)
}
}
prefixCache[cat] = prefixCacheEntry{
prefixes: prefixes,
expireAt: time.Now().Add(1 * time.Second),
}
return prefixes, nil
}
// JSON-Helfer
func writeJSON(w http.ResponseWriter, v any) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(v)
}