diff --git a/main.go b/main.go index 92a2601..c16eb86 100644 --- a/main.go +++ b/main.go @@ -21,6 +21,153 @@ import ( "github.com/redis/go-redis/v9" ) +var ( + // Requests & Responses & Inflight & Duration + reqTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "ipcheck_requests_total", + Help: "Total HTTP requests by handler", + }, + []string{"handler"}, + ) + respTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "ipcheck_http_responses_total", + Help: "HTTP responses by handler and code", + }, + []string{"handler", "code"}, + ) + inflight = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipcheck_requests_inflight", + Help: "Inflight HTTP requests", + }, + ) + reqDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ipcheck_request_duration_seconds", + Help: "Request duration seconds", + // Wähle Buckets ähnlich deinem manuellen Histogramm + Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10}, + }, + []string{"handler"}, + ) + + // Importer + importCycles = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "ipcheck_import_cycles_total", + Help: "Completed import cycles", + }, + ) + importLastSuccess = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipcheck_import_last_success_timestamp_seconds", + Help: "Last successful import Unix time", + }, + ) + importErrors = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "ipcheck_import_errors_total", + Help: "Import errors by category", + }, + []string{"category"}, + ) + importDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "ipcheck_import_duration_seconds", + Help: "Import duration by category", + Buckets: []float64{0.5, 1, 2, 5, 10, 30, 60, 120, 300}, + }, + []string{"category"}, + ) + + // Bereits vorhanden: blocklistHashSizes (GaugeVec) + + catalogCategories = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipcheck_catalog_categories_total", + Help: "Number of categories in catalog", + }, + ) + + // Honeypot-Teile hast du im zweiten Projekt nicht → weglassen oder später ergänzen + + whitelistTotal = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipcheck_whitelist_total", + Help: "Whitelisted IPs", + }, + ) + + traefikBlocks = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "ipcheck_traefik_blocks_total", + Help: "Traefik blocks due to matches", + }, + ) + + downloads = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "ipcheck_downloads_total", + Help: "Downloads served by category", + }, + []string{"category"}, + ) + + manualBlacklistSize = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipcheck_manual_blacklist_size", + Help: "Manual blacklist size", + }, + ) +) + +func init() { + prometheus.MustRegister( + reqTotal, respTotal, inflight, reqDuration, + importCycles, importLastSuccess, importErrors, importDuration, + blocklistHashSizes, catalogCategories, whitelistTotal, + traefikBlocks, downloads, manualBlacklistSize, + ) + + // Deine existierenden Counter: + // checkRequests, checkBlocked, checkWhitelist sind okay – können bleiben. +} + +type statusRecorder struct { + http.ResponseWriter + code int +} + +func (w *statusRecorder) WriteHeader(code int) { + w.code = code + w.ResponseWriter.WriteHeader(code) +} + +func instrumentHandler(name string, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + inflight.Inc() + start := time.Now() + rec := &statusRecorder{ResponseWriter: w, code: 200} + reqTotal.WithLabelValues(name).Inc() + + next.ServeHTTP(rec, r) + + inflight.Dec() + reqDuration.WithLabelValues(name).Observe(time.Since(start).Seconds()) + respTotal.WithLabelValues(name, fmt.Sprintf("%d", rec.code)).Inc() + }) +} + +func instrumentFunc(name string, fn http.HandlerFunc) http.Handler { + return instrumentHandler(name, http.HandlerFunc(fn)) +} + +// -------------------------------------------------- +// +// -------------------------------------------------- + // Redis + Context var ctx = context.Background() @@ -105,11 +252,11 @@ func main() { } // Server - http.HandleFunc("/", handleGUI) - http.HandleFunc("/download/", handleDownload) - http.HandleFunc("/whitelist", handleWhitelist) - http.HandleFunc("/check/", handleCheck) - http.HandleFunc("/traefik", handleTraefik) + http.Handle("/", instrumentFunc("gui", handleGUI)) + http.Handle("/download/", instrumentFunc("download", handleDownload)) + http.Handle("/whitelist", instrumentFunc("whitelist", handleWhitelist)) + http.Handle("/check/", instrumentFunc("check", handleCheck)) + http.Handle("/traefik", instrumentFunc("traefik", handleTraefik)) http.Handle("/metrics", promhttp.Handler()) go func() { @@ -148,12 +295,14 @@ func clientIPFromHeaders(r *http.Request) (netip.Addr, error) { } func updateBlocklistMetrics() { - var rdb = redis.NewClient(&redis.Options{ + rdb := redis.NewClient(&redis.Options{ Addr: os.Getenv("REDIS_ADDR"), DB: 0, Username: os.Getenv("REDIS_USER"), Password: os.Getenv("REDIS_PASS"), }) + + // Blocklist-Hash-Größen pro Kategorie for cat := range blocklistURLs { key := "bl:" + cat count, err := rdb.HLen(ctx, key).Result() @@ -163,6 +312,16 @@ func updateBlocklistMetrics() { } blocklistHashSizes.WithLabelValues(cat).Set(float64(count)) } + + // Whitelist gesamt (wenn als Keys "wl:" gespeichert) + if n, err := rdb.Keys(ctx, "wl:*").Result(); err == nil { + whitelistTotal.Set(float64(len(n))) + } + + // Manuelle Blacklist, falls vorhanden + if n, err := rdb.HLen(ctx, "bl:manual").Result(); err == nil { + manualBlacklistSize.Set(float64(n)) + } } type target struct { @@ -203,6 +362,10 @@ func fetchAndSave(client *http.Client, t target, outDir string) error { // Import-Logik func importBlocklists() error { + + startAll := time.Now() + importCycles.Inc() + client := &http.Client{Timeout: 60 * time.Second} t := target{Name: "Catalog", URL: os.Getenv("FLOD_IMPORT_URL")} if err := os.MkdirAll("/lists/", 0o755); err != nil { @@ -217,6 +380,8 @@ func importBlocklists() error { } blocklistURLs, _ = ImportListJSON("/lists/" + fileName) + catalogCategories.Set(float64(len(blocklistURLs))) + var wg sync.WaitGroup errCh := make(chan error, len(blocklistURLs)) @@ -224,15 +389,25 @@ func importBlocklists() error { wg.Add(1) go func(c, u string) { defer wg.Done() + start := time.Now() if err := importCategory(c, u); err != nil { + importErrors.WithLabelValues(c).Inc() errCh <- fmt.Errorf("%s: %v", c, err) } + importDuration.WithLabelValues(c).Observe(time.Since(start).Seconds()) }(cat, url) } wg.Wait() close(errCh) + // Erfolgstimestamp nur setzen, wenn keine Fehler: + if len(errCh) == 0 { + importLastSuccess.Set(float64(time.Now().Unix())) + } + + _ = startAll // (falls du Gesamtzeit noch extra messen willst) + for err := range errCh { fmt.Println("❌", err) } @@ -445,6 +620,7 @@ func handleTraefik(w http.ResponseWriter, r *http.Request) { if len(matches) > 0 { checkBlocked.Inc() + traefikBlocks.Inc() errorhtml(w, r) //http.Error(w, "blocked", http.StatusTooManyRequests) return @@ -553,6 +729,7 @@ func handleDownload(w http.ResponseWriter, r *http.Request) { } // Header für Download setzen + downloads.WithLabelValues(cat).Inc() w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.txt\"", cat))