Rollback without Ranger
All checks were successful
release-tag / release-image (push) Successful in 1m41s

This commit is contained in:
2025-06-17 18:18:16 +02:00
parent 3e57aaa098
commit b87c8a9a6d

133
main.go
View File

@@ -19,57 +19,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/redis/go-redis/v9"
"github.com/yl2chen/cidranger"
)
// ──────────────────────────────────────────────
// Ranger-Cache (statt prefixCache)
// ──────────────────────────────────────────────
type rangerCacheEntry struct {
ranger cidranger.Ranger
expireAt time.Time
}
var (
rangerCache = map[string]rangerCacheEntry{}
rangerCacheMu sync.RWMutex
)
// buildCategoryRanger holt alle CIDRs aus Redis, baut einen PCTrie
// und legt ihn 10 Minuten im Cache ab.
func buildCategoryRanger(cat string) (cidranger.Ranger, error) {
// --------- 1) Leser-Lock: schneller Cache-Hit
rangerCacheMu.RLock()
if e, ok := rangerCache[cat]; ok && time.Now().Before(e.expireAt) {
rangerCacheMu.RUnlock()
return e.ranger, nil
}
rangerCacheMu.RUnlock()
// --------- 2) Daten aus Redis ziehen
keys, err := rdb.HKeys(ctx, "bl:"+cat).Result()
log.Printf("Building ranger for %s - keys: %d", cat, len(keys))
if err != nil {
return nil, err
}
r := cidranger.NewPCTrieRanger()
for _, k := range keys {
if _, ipNet, err := net.ParseCIDR(strings.TrimSpace(k)); err == nil {
_ = r.Insert(cidranger.NewBasicRangerEntry(*ipNet))
}
}
// --------- 3) Schreiber-Lock: Cache aktualisieren
rangerCacheMu.Lock()
rangerCache[cat] = rangerCacheEntry{
ranger: r,
expireAt: time.Now().Add(10 * time.Minute),
}
rangerCacheMu.Unlock()
return r, nil
}
// Redis + Context
var ctx = context.Background()
var rdb = redis.NewClient(&redis.Options{
@@ -110,6 +61,17 @@ var blocklistURLs = map[string]string{
"bitwire": "https://raw.githubusercontent.com/bitwire-it/ipblocklist/refs/heads/main/ip-list.txt",
}
// Präfix-Cache
type prefixCacheEntry struct {
prefixes []netip.Prefix
expireAt time.Time
}
var (
prefixCache = map[string]prefixCacheEntry{}
prefixCacheMu sync.RWMutex
)
// Prometheus Metriken
var (
checkRequests = prometheus.NewCounter(prometheus.CounterOpts{
@@ -145,8 +107,6 @@ func main() {
return
}
warmUpRangers()
// Server
http.HandleFunc("/", handleGUI)
http.HandleFunc("/download/", handleDownload)
@@ -351,6 +311,10 @@ func handleWhitelist(w http.ResponseWriter, r *http.Request) {
http.Error(w, "redis error", http.StatusInternalServerError)
return
}
// Optional: Cache leeren für die IP
prefixCacheMu.Lock()
defer prefixCacheMu.Unlock()
// Kein spezifischer IP-Cache in deinem Design, aber hier könnte man Cache invalidieren falls nötig
writeJSON(w, map[string]string{
@@ -370,7 +334,7 @@ func handleCheck(w http.ResponseWriter, r *http.Request) {
}
var cats []string
for a := range blocklistURLs {
for a, _ := range blocklistURLs {
cats = append(cats, a)
}
@@ -402,12 +366,6 @@ func handleTraefik(w http.ResponseWriter, r *http.Request) {
if ipStr == "" {
ipStr = r.RemoteAddr
}
ipStr = strings.TrimSpace(strings.Split(ipStr, ",")[0]) // evtl. mehrere IPs
// Port abschneiden funktioniert für IPv4 und IPv6:
if host, _, err := net.SplitHostPort(ipStr); err == nil {
ipStr = host
}
ip, err := netip.ParseAddr(ipStr)
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
@@ -415,7 +373,7 @@ func handleTraefik(w http.ResponseWriter, r *http.Request) {
}
var cats []string
for a := range blocklistURLs {
for a, _ := range blocklistURLs {
cats = append(cats, a)
}
@@ -442,34 +400,57 @@ func handleTraefik(w http.ResponseWriter, r *http.Request) {
// Check-Logik
func checkIP(ip netip.Addr, cats []string) ([]string, error) {
// Whitelist zuerst prüfen
if wl, err := rdb.Exists(ctx, "wl:"+ip.String()).Result(); err == nil && wl > 0 {
return nil, nil
}
var matches []string
needle := net.IP(ip.AsSlice())
for _, cat := range cats {
r, err := buildCategoryRanger(cat)
wl, err := rdb.Exists(ctx, "wl:"+ip.String()).Result()
if err != nil {
return nil, err
}
ok, _ := r.Contains(needle)
if ok {
fmt.Printf("💡 MATCH: %s in %s\n", ip, cat)
if wl > 0 {
return []string{}, nil
}
matches := []string{}
for _, cat := range cats {
prefixes, err := loadCategoryPrefixes(cat)
if err != nil {
return nil, err
}
for _, pfx := range prefixes {
if pfx.Contains(ip) {
fmt.Printf("💡 MATCH: %s in %s (%s)\n", ip, cat, pfx)
matches = append(matches, cat)
break
}
}
}
return matches, nil
}
func warmUpRangers() {
for cat := range blocklistURLs {
if _, err := buildCategoryRanger(cat); err != nil {
log.Printf("warm-up error for %s: %v", cat, err)
func loadCategoryPrefixes(cat string) ([]netip.Prefix, error) {
prefixCacheMu.Lock()
defer prefixCacheMu.Unlock()
entry, ok := prefixCache[cat]
if ok && time.Now().Before(entry.expireAt) {
return entry.prefixes, nil
}
keys, err := rdb.HKeys(ctx, "bl:"+cat).Result()
if err != nil {
return nil, err
}
var prefixes []netip.Prefix
for _, k := range keys {
k = strings.TrimSpace(k)
pfx, err := netip.ParsePrefix(k)
if err == nil {
prefixes = append(prefixes, pfx)
} else {
fmt.Printf("⚠️ Ungültiger Redis-Prefix %s: %s\n", cat, k)
}
}
prefixCache[cat] = prefixCacheEntry{
prefixes: prefixes,
expireAt: time.Now().Add(10 * time.Minute),
//Hier geändert von 1 * time.Second
}
return prefixes, nil
}
// JSON-Helfer