Compare commits

...

4 Commits

Author SHA1 Message Date
bbccee0754 das sollte klappen!
All checks were successful
release-tag / release-image (push) Successful in 1m34s
2025-06-12 06:48:21 +02:00
27ac907eef passt, funktion unklar 2025-06-12 06:43:13 +02:00
1b2a0945ad läuft, aber langsam 2025-06-11 23:02:40 +02:00
2b9f59da02 Das klappt, lässt aber den RAM explodieren. 2025-06-11 20:45:50 +02:00
4 changed files with 1144 additions and 407 deletions

740
_main.go Normal file
View File

@@ -0,0 +1,740 @@
package main
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"net/netip"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/redis/go-redis/v9"
)
// -----------------------------------------------------------------------------
// CONFIGURATION & ENV
// -----------------------------------------------------------------------------
type Source struct {
Category string
URL []string
}
type Config struct {
RedisAddr string
Sources []Source
TTLHours int
IsWorker bool // true ⇒ lädt Blocklisten & schreibt sie nach Redis
}
func loadConfig() Config {
// default Blocklist source
srcs := []Source{{
Category: "generic",
URL: []string{
"https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/firehol_level1.netset",
"https://raw.githubusercontent.com/bitwire-it/ipblocklist/refs/heads/main/ip-list.txt",
"",
},
}}
if env := os.Getenv("BLOCKLIST_SOURCES"); env != "" {
srcs = nil
for _, spec := range strings.Split(env, ",") {
spec = strings.TrimSpace(spec)
if spec == "" {
continue
}
parts := strings.SplitN(spec, ":", 2)
if len(parts) != 2 {
continue
}
cat := strings.TrimSpace(parts[0])
raw := strings.FieldsFunc(parts[1], func(r rune) bool { return r == '|' || r == ';' })
var urls []string
for _, u := range raw {
if u = strings.TrimSpace(u); u != "" {
urls = append(urls, u)
}
}
if len(urls) > 0 {
srcs = append(srcs, Source{Category: cat, URL: urls})
}
}
}
ttl := 720
if env := os.Getenv("TTL_HOURS"); env != "" {
fmt.Sscanf(env, "%d", &ttl)
}
isWorker := strings.ToLower(os.Getenv("ROLE")) == "worker"
return Config{
//RedisAddr: getenv("REDIS_ADDR", "redis:6379"),
RedisAddr: getenv("REDIS_ADDR", "10.10.5.249:6379"),
Sources: srcs,
TTLHours: ttl,
IsWorker: isWorker,
}
}
func getenv(k, def string) string {
if v := os.Getenv(k); v != "" {
return v
}
return def
}
// -----------------------------------------------------------------------------
// REDIS KEYS
// -----------------------------------------------------------------------------
func keyBlock(cat string, p netip.Prefix) string { return "bl:" + cat + ":" + p.String() }
func keyWhite(a netip.Addr) string { return "wl:" + a.String() }
// -----------------------------------------------------------------------------
// RANGER threadsafe inmemory index
// -----------------------------------------------------------------------------
type Ranger struct {
mu sync.RWMutex
blocks map[string]map[netip.Prefix]struct{}
whites map[netip.Addr]struct{}
}
func newRanger() *Ranger {
return &Ranger{
blocks: make(map[string]map[netip.Prefix]struct{}),
whites: make(map[netip.Addr]struct{}),
}
}
func (r *Ranger) resetBlocks(m map[string]map[netip.Prefix]struct{}) {
r.mu.Lock()
r.blocks = m
r.mu.Unlock()
}
func (r *Ranger) resetWhites(set map[netip.Addr]struct{}) {
r.mu.Lock()
r.whites = set
r.mu.Unlock()
}
func (r *Ranger) addBlock(cat string, p netip.Prefix) {
r.mu.Lock()
if _, ok := r.blocks[cat]; !ok {
r.blocks[cat] = make(map[netip.Prefix]struct{})
}
r.blocks[cat][p] = struct{}{}
r.mu.Unlock()
}
func (r *Ranger) removeBlock(cat string, p netip.Prefix) {
r.mu.Lock()
if m, ok := r.blocks[cat]; ok {
delete(m, p)
}
r.mu.Unlock()
}
func (r *Ranger) addWhite(a netip.Addr) {
r.mu.Lock()
r.whites[a] = struct{}{}
r.mu.Unlock()
}
func (r *Ranger) removeWhite(a netip.Addr) {
r.mu.Lock()
delete(r.whites, a)
r.mu.Unlock()
}
func (r *Ranger) blockedInCats(a netip.Addr, cats []string) []string {
r.mu.RLock()
defer r.mu.RUnlock()
if _, ok := r.whites[a]; ok {
return nil
}
if len(cats) == 0 {
for c := range r.blocks {
cats = append(cats, c)
}
}
var res []string
for _, cat := range cats {
if m, ok := r.blocks[cat]; ok {
for p := range m {
if p.Contains(a) {
res = append(res, cat)
break
}
}
}
}
sort.Strings(res)
return res
}
// -----------------------------------------------------------------------------
// INITIAL LOAD FROM REDIS (baseline before keyspace events)
// -----------------------------------------------------------------------------
func loadFromRedis(ctx context.Context, rdb *redis.Client, r *Ranger) error {
// 1) Blocks
blocks := make(map[string]map[netip.Prefix]struct{})
iter := rdb.Scan(ctx, 0, "bl:*", 0).Iterator()
for iter.Next(ctx) {
key := iter.Val() // bl:<cat>:<cidr>
parts := strings.SplitN(key, ":", 3)
if len(parts) != 3 {
continue
}
cat, cidr := parts[1], parts[2]
p, err := netip.ParsePrefix(cidr)
if err != nil {
continue
}
if _, ok := blocks[cat]; !ok {
blocks[cat] = map[netip.Prefix]struct{}{}
}
blocks[cat][p] = struct{}{}
}
if err := iter.Err(); err != nil {
return err
}
r.resetBlocks(blocks)
// 2) Whites
whites := make(map[netip.Addr]struct{})
wIter := rdb.Scan(ctx, 0, "wl:*", 0).Iterator()
for wIter.Next(ctx) {
ip := strings.TrimPrefix(wIter.Val(), "wl:")
if a, err := netip.ParseAddr(ip); err == nil {
whites[a] = struct{}{}
}
}
if err := wIter.Err(); err != nil {
return err
}
r.resetWhites(whites)
return nil
}
// -----------------------------------------------------------------------------
// SYNC WORKER (only on ROLE=worker)
// -----------------------------------------------------------------------------
func syncLoop(ctx context.Context, cfg Config, rdb *redis.Client, ranger *Ranger) {
if err := syncOnce(ctx, cfg, rdb, ranger); err != nil {
log.Println("initial sync:", err)
}
ticker := time.NewTicker(6 * time.Hour)
for {
select {
case <-ticker.C:
if err := syncOnce(ctx, cfg, rdb, ranger); err != nil {
log.Println("sync loop:", err)
}
case <-ctx.Done():
ticker.Stop()
return
}
}
}
func syncOnce(ctx context.Context, cfg Config, rdb *redis.Client, ranger *Ranger) error {
expiry := time.Duration(cfg.TTLHours) * time.Hour
newBlocks := make(map[string]map[netip.Prefix]struct{})
for _, src := range cfg.Sources {
for _, url := range src.URL {
if err := fetchList(ctx, url, func(p netip.Prefix) {
if _, ok := newBlocks[src.Category]; !ok {
newBlocks[src.Category] = map[netip.Prefix]struct{}{}
}
newBlocks[src.Category][p] = struct{}{}
_ = rdb.Set(ctx, keyBlock(src.Category, p), "1", expiry).Err()
}); err != nil {
return err
}
}
}
ranger.resetBlocks(newBlocks)
return nil
}
func fetchList(ctx context.Context, url string, cb func(netip.Prefix)) error {
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s -> %s", url, resp.Status)
}
return parseStream(resp.Body, cb)
}
func parseStream(r io.Reader, cb func(netip.Prefix)) error {
s := bufio.NewScanner(r)
for s.Scan() {
line := strings.TrimSpace(s.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
if p, err := netip.ParsePrefix(line); err == nil {
cb(p)
continue
}
if addr, err := netip.ParseAddr(line); err == nil {
plen := 32
if addr.Is6() {
plen = 128
}
cb(netip.PrefixFrom(addr, plen))
}
}
return s.Err()
}
// -----------------------------------------------------------------------------
// KEYSPACE SUBSCRIBER instant propagation
// -----------------------------------------------------------------------------
func subscribeKeyspace(ctx context.Context, rdb *redis.Client, ranger *Ranger) {
// listen to keyevent channels (not keyspace) so msg.Payload == actual key
patterns := []string{
"__keyevent@0__:set",
"__keyevent@0__:del",
"__keyevent@0__:expired",
}
pubsub := rdb.PSubscribe(ctx, patterns...)
go func() {
for msg := range pubsub.Channel() {
key := msg.Payload // full redis key e.g. "wl:1.2.3.4" or "bl:spam:10.0.0.0/8"
switch {
case strings.HasPrefix(key, "wl:"):
ipStr := strings.TrimPrefix(key, "wl:")
addr, err := netip.ParseAddr(ipStr)
if err != nil {
continue
}
switch msg.Channel {
case "__keyevent@0__:set":
ranger.addWhite(addr)
case "__keyevent@0__:del", "__keyevent@0__:expired":
ranger.removeWhite(addr)
}
case strings.HasPrefix(key, "bl:"):
parts := strings.SplitN(key, ":", 3)
if len(parts) != 3 {
continue
}
cat, cidr := parts[1], parts[2]
p, err := netip.ParsePrefix(cidr)
if err != nil {
continue
}
switch msg.Channel {
case "__keyevent@0__:set":
ranger.addBlock(cat, p)
case "__keyevent@0__:del", "__keyevent@0__:expired":
ranger.removeBlock(cat, p)
}
}
}
}()
}
// -----------------------------------------------------------------------------
// HTTP SERVER
// -----------------------------------------------------------------------------
type Server struct {
ranger *Ranger
rdb *redis.Client
}
func (s *Server) routes() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/check/", s.handleCheck)
mux.HandleFunc("/whitelist", s.handleAddWhite)
mux.HandleFunc("/categories", s.handleCats)
return mux
}
func (s *Server) handleCheck(w http.ResponseWriter, r *http.Request) {
ipStr := strings.TrimPrefix(r.URL.Path, "/check/")
if ipStr == "" {
http.Error(w, "missing IP", http.StatusBadRequest)
return
}
addr, err := netip.ParseAddr(ipStr)
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
return
}
catsParam := strings.TrimSpace(r.URL.Query().Get("cats"))
var cats []string
if catsParam != "" {
cats = strings.Split(catsParam, ",")
}
blocked := s.ranger.blockedInCats(addr, cats)
writeJSON(w, map[string]any{
"ip": ipStr,
"blocked": len(blocked) > 0,
"categories": blocked,
})
}
// POST {"ip":"1.2.3.4"}
func (s *Server) handleAddWhite(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
var body struct {
IP string `json:"ip"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
http.Error(w, "bad json", http.StatusBadRequest)
return
}
addr, err := netip.ParseAddr(strings.TrimSpace(body.IP))
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
return
}
if err := s.rdb.Set(r.Context(), keyWhite(addr), "1", 0).Err(); err != nil {
http.Error(w, "redis", http.StatusInternalServerError)
return
}
s.ranger.addWhite(addr) // immediate local effect
writeJSON(w, map[string]string{"status": "whitelisted"})
}
func (s *Server) handleCats(w http.ResponseWriter, _ *http.Request) {
s.ranger.mu.RLock()
cats := make([]string, 0, len(s.ranger.blocks))
for c := range s.ranger.blocks {
cats = append(cats, c)
}
s.ranger.mu.RUnlock()
sort.Strings(cats)
writeJSON(w, map[string]any{"categories": cats})
}
func writeJSON(w http.ResponseWriter, v any) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(v)
}
// -----------------------------------------------------------------------------
// Class-Download from RIRs
// -----------------------------------------------------------------------------
// Liste der Delegated Files aller 5 RIRs
var rirFiles = []string{
"https://ftp.ripe.net/pub/stats/ripencc/delegated-ripencc-latest",
"https://ftp.apnic.net/stats/apnic/delegated-apnic-latest",
"https://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest",
"https://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-latest",
"https://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-latest",
}
// Hauptfunktion: gibt alle IPv4-Ranges eines Landes (CIDR) aus allen RIRs zurück
func GetIPRangesByCountry(countryCode string) ([]string, error) {
var allCIDRs []string
upperCode := strings.ToUpper(countryCode)
for _, url := range rirFiles {
resp, err := http.Get(url)
if err != nil {
return nil, fmt.Errorf("fehler beim abrufen von %s: %w", url, err)
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "2") || strings.HasPrefix(line, "#") {
continue // Kommentar oder Header
}
if strings.Contains(line, "|"+upperCode+"|ipv4|") {
fields := strings.Split(line, "|")
if len(fields) < 5 {
continue
}
ipStart := fields[3]
count, _ := strconv.Atoi(fields[4])
cidrs := summarizeCIDR(ipStart, count)
allCIDRs = append(allCIDRs, cidrs...)
}
}
}
return allCIDRs, nil
}
// Hilfsfunktion: Start-IP + Anzahl → []CIDR
func summarizeCIDR(start string, count int) []string {
var cidrs []string
ip := net.ParseIP(start).To4()
startInt := ipToInt(ip)
for count > 0 {
maxSize := 32
for maxSize > 0 {
mask := 1 << uint(32-maxSize)
if startInt%uint32(mask) == 0 && mask <= count {
break
}
maxSize--
}
cidr := fmt.Sprintf("%s/%d", intToIP(startInt), maxSize)
cidrs = append(cidrs, cidr)
count -= 1 << uint(32-maxSize)
startInt += uint32(1 << uint(32-maxSize))
}
return cidrs
}
func ipToInt(ip net.IP) uint32 {
return uint32(ip[0])<<24 + uint32(ip[1])<<16 + uint32(ip[2])<<8 + uint32(ip[3])
}
func intToIP(i uint32) net.IP {
return net.IPv4(byte(i>>24), byte(i>>16), byte(i>>8), byte(i))
}
// Alle gültigen ISO 3166-1 Alpha-2 Ländercodes (abgekürzt, reale Liste ist länger)
var allCountryCodes = []string{
"AD", "AE", "AF", "AG", "AI", "AL", "AM", "AO", "AR", "AT", "AU", "AZ",
"BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BN", "BO", "BR", "BS",
"BT", "BW", "BY", "BZ", "CA", "CD", "CF", "CG", "CH", "CI", "CL", "CM", "CN",
"CO", "CR", "CU", "CV", "CY", "CZ", "DE", "DJ", "DK", "DM", "DO", "DZ", "EC",
"EE", "EG", "ER", "ES", "ET", "FI", "FJ", "FM", "FR", "GA", "GB", "GD", "GE",
"GH", "GM", "GN", "GQ", "GR", "GT", "GW", "GY", "HK", "HN", "HR", "HT", "HU",
"ID", "IE", "IL", "IN", "IQ", "IR", "IS", "IT", "JM", "JO", "JP", "KE", "KG",
"KH", "KI", "KM", "KN", "KP", "KR", "KW", "KZ", "LA", "LB", "LC", "LI", "LK",
"LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD", "ME", "MG", "MH", "MK",
"ML", "MM", "MN", "MR", "MT", "MU", "MV", "MW", "MX", "MY", "MZ", "NA", "NE",
"NG", "NI", "NL", "NO", "NP", "NR", "NZ", "OM", "PA", "PE", "PG", "PH", "PK",
"PL", "PT", "PW", "PY", "QA", "RO", "RS", "RU", "RW", "SA", "SB", "SC", "SD",
"SE", "SG", "SI", "SK", "SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ",
"TD", "TG", "TH", "TJ", "TL", "TM", "TN", "TO", "TR", "TT", "TV", "TZ", "UA",
"UG", "US", "UY", "UZ", "VC", "VE", "VN", "VU", "WS", "YE", "ZA", "ZM", "ZW",
}
// Aufruffunktion für alle Ländercodes
func GetAllCountryIPRanges() (map[string][]string, error) {
allResults := make(map[string][]string)
for _, code := range allCountryCodes {
fmt.Printf("Verarbeite %s...\n", code)
cidrs, err := GetIPRangesByCountry(code)
if err != nil {
fmt.Printf("Fehler bei %s: %v\n", code, err)
continue
}
if len(cidrs) > 0 {
allResults[code] = cidrs
}
}
return allResults, nil
}
// Bessere und optimierte Routine
func GetAllCountryPrefixes() (map[string][]netip.Prefix, error) {
var (
resultMu sync.Mutex
results = make(map[string][]netip.Prefix)
wg sync.WaitGroup
sem = make(chan struct{}, 10) // max. 10 gleichzeitige Länder
)
for _, code := range allCountryCodes {
wg.Add(1)
sem <- struct{}{} // blockiert wenn mehr als 10 laufen
go func(countryCode string) {
defer wg.Done()
defer func() { <-sem }() // Slot freigeben
fmt.Printf("Verarbeite %s...\n", countryCode)
cidrs, err := GetIPRangesByCountry(countryCode)
if err != nil {
log.Printf("Fehler bei %s: %v", countryCode, err)
return
}
var validPrefixes []netip.Prefix
for _, c := range cidrs {
prefix, err := netip.ParsePrefix(c)
if err != nil {
log.Printf("CIDR-Fehler [%s]: %v", c, err)
continue
}
validPrefixes = append(validPrefixes, prefix)
}
if len(validPrefixes) > 0 {
resultMu.Lock()
results[countryCode] = validPrefixes
resultMu.Unlock()
}
}(code)
}
wg.Wait()
return results, nil
}
// Option 2
func LoadAllCountryPrefixesIntoRedisAndRanger(
ctx context.Context,
rdb *redis.Client,
ranger *Ranger,
ttlHours int,
) error {
var (
resultMu sync.Mutex
wg sync.WaitGroup
sem = make(chan struct{}, 10) // max. 10 gleichzeitige Downloads
)
expiry := time.Duration(ttlHours) * time.Hour
results := make(map[string][]netip.Prefix)
for _, code := range allCountryCodes {
wg.Add(1)
sem <- struct{}{} // Slot reservieren
go func(countryCode string) {
defer wg.Done()
defer func() { <-sem }() // Slot freigeben
fmt.Printf("Lade %s...\n", countryCode)
cidrs, err := GetIPRangesByCountry(countryCode)
if err != nil {
log.Printf("Fehler bei %s: %v", countryCode, err)
return
}
var validPrefixes []netip.Prefix
for _, c := range cidrs {
prefix, err := netip.ParsePrefix(c)
if err != nil {
log.Printf("CIDR ungültig [%s]: %v", c, err)
continue
}
validPrefixes = append(validPrefixes, prefix)
}
if len(validPrefixes) > 0 {
resultMu.Lock()
results[countryCode] = validPrefixes
resultMu.Unlock()
}
}(code)
}
wg.Wait()
// Nach Verarbeitung: alles in Ranger + Redis eintragen
for code, prefixes := range results {
for _, p := range prefixes {
ranger.addBlock(code, p)
key := keyBlock(code, p)
if err := rdb.Set(ctx, key, "1", expiry).Err(); err != nil {
log.Printf("Redis-Fehler bei %s: %v", key, err)
}
}
}
return nil
}
// -----------------------------------------------------------------------------
// MAIN
// -----------------------------------------------------------------------------
func main() {
cfg := loadConfig()
ctx := context.Background()
rdb := redis.NewClient(&redis.Options{Addr: cfg.RedisAddr})
if err := rdb.Ping(ctx).Err(); err != nil {
log.Fatalf("redis: %v", err)
}
// enable keyspace events (if not already set in redis.conf)
_ = rdb.ConfigSet(ctx, "notify-keyspace-events", "KEx").Err()
ranger := newRanger()
if err := loadFromRedis(ctx, rdb, ranger); err != nil {
log.Println("initial load error:", err)
}
subscribeKeyspace(ctx, rdb, ranger)
/*a, _ := GetAllCountryIPRanges()
for _, code := range allCountryCodes {
for _, b := range a[code] {
prefix, err := netip.ParsePrefix(b)
if err != nil {
log.Printf("ungültiger Prefix '%s': %v", b, err)
continue
}
ranger.addBlock(code, prefix)
}
}*/
/*allPrefixes, err := GetAllCountryPrefixes()
if err != nil {
log.Fatalf("Fehler beim Laden: %v", err)
}
// In den Ranger einfügen
for code, prefixes := range allPrefixes {
for _, p := range prefixes {
ranger.addBlock(code, p)
}
}*/
if err := LoadAllCountryPrefixesIntoRedisAndRanger(ctx, rdb, ranger, cfg.TTLHours); err != nil {
log.Fatalf("Fehler beim Laden aller Länderranges: %v", err)
}
if cfg.IsWorker {
go syncLoop(ctx, cfg, rdb, ranger)
}
srv := &Server{ranger: ranger, rdb: rdb}
log.Println("listening on :8080 (worker:", cfg.IsWorker, ")")
if err := http.ListenAndServe(":8080", srv.routes()); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatal(err)
}
}

1
go.mod
View File

@@ -5,5 +5,6 @@ go 1.24.3
require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/redis/go-redis/v9 v9.10.0 // indirect
)

2
go.sum
View File

@@ -2,5 +2,7 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs=
github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=

808
main.go
View File

@@ -3,473 +3,467 @@ package main
import (
"bufio"
"context"
"encoding/binary"
"encoding/json"
"errors"
"expvar"
"fmt"
"io"
"log"
"math/big"
"math/bits"
"net"
"net/http"
"net/netip"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/redis/go-redis/v9"
)
// -----------------------------------------------------------------------------
// CONFIGURATION & ENV
// -----------------------------------------------------------------------------
var (
ctx = context.Background()
redisAddr = getenv("REDIS_ADDR", "10.10.5.249:6379")
//redisAddr = getenv("REDIS_ADDR", "localhost:6379")
redisTTL = time.Hour * 24
cacheSize = 100_000
blocklistCats = []string{"generic"}
rdb *redis.Client
ipCache *lru.Cache[string, []string]
type Source struct {
Category string
URL []string
}
// Metrics
hits = expvar.NewInt("cache_hits")
misses = expvar.NewInt("cache_misses")
queries = expvar.NewInt("ip_queries")
)
type Config struct {
RedisAddr string
Sources []Source
TTLHours int
IsWorker bool // true ⇒ lädt Blocklisten & schreibt sie nach Redis
}
var (
totalBlockedIPs = expvar.NewInt("total_blocked_ips")
totalWhitelistEntries = expvar.NewInt("total_whitelist_entries")
)
func loadConfig() Config {
// default Blocklist source
srcs := []Source{{
Category: "generic",
URL: []string{"https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/firehol_level1.netset"},
}}
if env := os.Getenv("BLOCKLIST_SOURCES"); env != "" {
srcs = nil
for _, spec := range strings.Split(env, ",") {
spec = strings.TrimSpace(spec)
if spec == "" {
continue
}
parts := strings.SplitN(spec, ":", 2)
if len(parts) != 2 {
continue
}
cat := strings.TrimSpace(parts[0])
raw := strings.FieldsFunc(parts[1], func(r rune) bool { return r == '|' || r == ';' })
var urls []string
for _, u := range raw {
if u = strings.TrimSpace(u); u != "" {
urls = append(urls, u)
}
}
if len(urls) > 0 {
srcs = append(srcs, Source{Category: cat, URL: urls})
}
}
}
ttl := 720
if env := os.Getenv("TTL_HOURS"); env != "" {
fmt.Sscanf(env, "%d", &ttl)
}
isWorker := strings.ToLower(os.Getenv("ROLE")) == "worker"
return Config{
RedisAddr: getenv("REDIS_ADDR", "redis:6379"),
Sources: srcs,
TTLHours: ttl,
IsWorker: isWorker,
}
}
func getenv(k, def string) string {
if v := os.Getenv(k); v != "" {
return v
}
return def
}
// -----------------------------------------------------------------------------
// REDIS KEYS
// -----------------------------------------------------------------------------
func keyBlock(cat string, p netip.Prefix) string { return "bl:" + cat + ":" + p.String() }
func keyWhite(a netip.Addr) string { return "wl:" + a.String() }
// -----------------------------------------------------------------------------
// RANGER threadsafe inmemory index
// -----------------------------------------------------------------------------
type Ranger struct {
mu sync.RWMutex
blocks map[string]map[netip.Prefix]struct{}
whites map[netip.Addr]struct{}
}
func newRanger() *Ranger {
return &Ranger{
blocks: make(map[string]map[netip.Prefix]struct{}),
whites: make(map[netip.Addr]struct{}),
}
}
func (r *Ranger) resetBlocks(m map[string]map[netip.Prefix]struct{}) {
r.mu.Lock()
r.blocks = m
r.mu.Unlock()
}
func (r *Ranger) resetWhites(set map[netip.Addr]struct{}) {
r.mu.Lock()
r.whites = set
r.mu.Unlock()
}
func (r *Ranger) addBlock(cat string, p netip.Prefix) {
r.mu.Lock()
if _, ok := r.blocks[cat]; !ok {
r.blocks[cat] = make(map[netip.Prefix]struct{})
}
r.blocks[cat][p] = struct{}{}
r.mu.Unlock()
}
func (r *Ranger) removeBlock(cat string, p netip.Prefix) {
r.mu.Lock()
if m, ok := r.blocks[cat]; ok {
delete(m, p)
}
r.mu.Unlock()
}
func (r *Ranger) addWhite(a netip.Addr) {
r.mu.Lock()
r.whites[a] = struct{}{}
r.mu.Unlock()
}
func (r *Ranger) removeWhite(a netip.Addr) {
r.mu.Lock()
delete(r.whites, a)
r.mu.Unlock()
}
func (r *Ranger) blockedInCats(a netip.Addr, cats []string) []string {
r.mu.RLock()
defer r.mu.RUnlock()
if _, ok := r.whites[a]; ok {
return nil
}
if len(cats) == 0 {
for c := range r.blocks {
cats = append(cats, c)
}
}
var res []string
for _, cat := range cats {
if m, ok := r.blocks[cat]; ok {
for p := range m {
if p.Contains(a) {
res = append(res, cat)
break
}
}
}
}
sort.Strings(res)
return res
}
// -----------------------------------------------------------------------------
// INITIAL LOAD FROM REDIS (baseline before keyspace events)
// -----------------------------------------------------------------------------
func loadFromRedis(ctx context.Context, rdb *redis.Client, r *Ranger) error {
// 1) Blocks
blocks := make(map[string]map[netip.Prefix]struct{})
iter := rdb.Scan(ctx, 0, "bl:*", 0).Iterator()
for iter.Next(ctx) {
key := iter.Val() // bl:<cat>:<cidr>
parts := strings.SplitN(key, ":", 3)
if len(parts) != 3 {
continue
}
cat, cidr := parts[1], parts[2]
p, err := netip.ParsePrefix(cidr)
if err != nil {
continue
}
if _, ok := blocks[cat]; !ok {
blocks[cat] = map[netip.Prefix]struct{}{}
}
blocks[cat][p] = struct{}{}
}
if err := iter.Err(); err != nil {
return err
}
r.resetBlocks(blocks)
// 2) Whites
whites := make(map[netip.Addr]struct{})
wIter := rdb.Scan(ctx, 0, "wl:*", 0).Iterator()
for wIter.Next(ctx) {
ip := strings.TrimPrefix(wIter.Val(), "wl:")
if a, err := netip.ParseAddr(ip); err == nil {
whites[a] = struct{}{}
}
}
if err := wIter.Err(); err != nil {
return err
}
r.resetWhites(whites)
return nil
}
// -----------------------------------------------------------------------------
// SYNC WORKER (only on ROLE=worker)
// -----------------------------------------------------------------------------
func syncLoop(ctx context.Context, cfg Config, rdb *redis.Client, ranger *Ranger) {
if err := syncOnce(ctx, cfg, rdb, ranger); err != nil {
log.Println("initial sync:", err)
}
ticker := time.NewTicker(6 * time.Hour)
for {
select {
case <-ticker.C:
if err := syncOnce(ctx, cfg, rdb, ranger); err != nil {
log.Println("sync loop:", err)
}
case <-ctx.Done():
ticker.Stop()
return
}
}
}
func syncOnce(ctx context.Context, cfg Config, rdb *redis.Client, ranger *Ranger) error {
expiry := time.Duration(cfg.TTLHours) * time.Hour
newBlocks := make(map[string]map[netip.Prefix]struct{})
for _, src := range cfg.Sources {
for _, url := range src.URL {
if err := fetchList(ctx, url, func(p netip.Prefix) {
if _, ok := newBlocks[src.Category]; !ok {
newBlocks[src.Category] = map[netip.Prefix]struct{}{}
}
newBlocks[src.Category][p] = struct{}{}
_ = rdb.Set(ctx, keyBlock(src.Category, p), "1", expiry).Err()
}); err != nil {
return err
}
}
}
ranger.resetBlocks(newBlocks)
return nil
}
func fetchList(ctx context.Context, url string, cb func(netip.Prefix)) error {
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s -> %s", url, resp.Status)
}
return parseStream(resp.Body, cb)
}
func parseStream(r io.Reader, cb func(netip.Prefix)) error {
s := bufio.NewScanner(r)
for s.Scan() {
line := strings.TrimSpace(s.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
if p, err := netip.ParsePrefix(line); err == nil {
cb(p)
continue
}
if addr, err := netip.ParseAddr(line); err == nil {
plen := 32
if addr.Is6() {
plen = 128
}
cb(netip.PrefixFrom(addr, plen))
}
}
return s.Err()
}
// -----------------------------------------------------------------------------
// KEYSPACE SUBSCRIBER instant propagation
// -----------------------------------------------------------------------------
func subscribeKeyspace(ctx context.Context, rdb *redis.Client, ranger *Ranger) {
// listen to keyevent channels (not keyspace) so msg.Payload == actual key
patterns := []string{
"__keyevent@0__:set",
"__keyevent@0__:del",
"__keyevent@0__:expired",
}
pubsub := rdb.PSubscribe(ctx, patterns...)
func updateTotalsFromRedis() {
go func() {
for msg := range pubsub.Channel() {
key := msg.Payload // full redis key e.g. "wl:1.2.3.4" or "bl:spam:10.0.0.0/8"
switch {
case strings.HasPrefix(key, "wl:"):
ipStr := strings.TrimPrefix(key, "wl:")
addr, err := netip.ParseAddr(ipStr)
if err != nil {
continue
}
switch msg.Channel {
case "__keyevent@0__:set":
ranger.addWhite(addr)
case "__keyevent@0__:del", "__keyevent@0__:expired":
ranger.removeWhite(addr)
}
case strings.HasPrefix(key, "bl:"):
parts := strings.SplitN(key, ":", 3)
if len(parts) != 3 {
continue
}
cat, cidr := parts[1], parts[2]
p, err := netip.ParsePrefix(cidr)
if err != nil {
continue
}
switch msg.Channel {
case "__keyevent@0__:set":
ranger.addBlock(cat, p)
case "__keyevent@0__:del", "__keyevent@0__:expired":
ranger.removeBlock(cat, p)
}
}
blockCount := 0
iter := rdb.Scan(ctx, 0, "bl:*", 0).Iterator()
for iter.Next(ctx) {
blockCount++
}
totalBlockedIPs.Set(int64(blockCount))
whiteCount := 0
iter = rdb.Scan(ctx, 0, "wl:*", 0).Iterator()
for iter.Next(ctx) {
whiteCount++
}
totalWhitelistEntries.Set(int64(whiteCount))
}()
}
func startMetricUpdater() {
ticker := time.NewTicker(10 * time.Second)
go func() {
for {
updateTotalsFromRedis()
<-ticker.C
}
}()
}
// -----------------------------------------------------------------------------
// HTTP SERVER
// -----------------------------------------------------------------------------
// --------------------------------------------
// INIT + MAIN
// --------------------------------------------
type Server struct {
ranger *Ranger
rdb *redis.Client
}
func main() {
var err error
func (s *Server) routes() http.Handler {
mux := http.NewServeMux()
mux.HandleFunc("/check/", s.handleCheck)
mux.HandleFunc("/whitelist", s.handleAddWhite)
mux.HandleFunc("/categories", s.handleCats)
return mux
}
func (s *Server) handleCheck(w http.ResponseWriter, r *http.Request) {
ipStr := strings.TrimPrefix(r.URL.Path, "/check/")
if ipStr == "" {
http.Error(w, "missing IP", http.StatusBadRequest)
return
// Redis client
rdb = redis.NewClient(&redis.Options{Addr: redisAddr})
if err := rdb.Ping(ctx).Err(); err != nil {
log.Fatalf("redis: %v", err)
}
// LRU cache
ipCache, err = lru.New[string, []string](cacheSize)
if err != nil {
log.Fatalf("cache init: %v", err)
}
startMetricUpdater()
// Admin load all blocklists (on demand or scheduled)
go func() {
if getenv("IMPORT_RIRS", "0") == "1" {
log.Println("Lade IP-Ranges aus RIRs...")
if err := importRIRDataToRedis(); err != nil {
log.Fatalf("import error: %v", err)
}
log.Println("✅ Import abgeschlossen.")
}
}()
// Routes
http.HandleFunc("/check/", handleCheck)
http.HandleFunc("/whitelist", handleWhitelist)
http.HandleFunc("/info", handleInfo)
http.Handle("/debug/vars", http.DefaultServeMux)
log.Println("🚀 Server läuft auf :8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}
func getenv(k, fallback string) string {
if v := os.Getenv(k); v != "" {
return v
}
return fallback
}
// --------------------------------------------
// IP CHECK API
// --------------------------------------------
func handleCheck(w http.ResponseWriter, r *http.Request) {
ipStr := strings.TrimPrefix(r.URL.Path, "/check/")
addr, err := netip.ParseAddr(ipStr)
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
http.Error(w, "invalid IP", 400)
return
}
catsParam := strings.TrimSpace(r.URL.Query().Get("cats"))
var cats []string
if catsParam != "" {
cats = strings.Split(catsParam, ",")
cats := blocklistCats
if q := r.URL.Query().Get("cats"); q != "" {
cats = strings.Split(q, ",")
}
queries.Add(1)
blockedCats, err := checkIP(addr, cats)
if err != nil {
http.Error(w, "lookup error", 500)
return
}
blocked := s.ranger.blockedInCats(addr, cats)
writeJSON(w, map[string]any{
"ip": ipStr,
"blocked": len(blocked) > 0,
"categories": blocked,
"blocked": len(blockedCats) > 0,
"categories": blockedCats,
})
}
// POST {"ip":"1.2.3.4"}
func (s *Server) handleAddWhite(w http.ResponseWriter, r *http.Request) {
// liefert alle möglichen Präfixe dieser IP, beginnend beim längsten (/32 oder /128)
func supernets(ip netip.Addr) []string {
if ip.Is4() {
a := ip.As4() // Kopie addressierbar machen
u := binary.BigEndian.Uint32(a[:]) // jetzt darf man slicen
supers := make([]string, 33) // /32 … /0
for bits := 32; bits >= 0; bits-- {
mask := uint32(0xffffffff) << (32 - bits)
n := u & mask
addr := netip.AddrFrom4([4]byte{
byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n),
})
supers[32-bits] = fmt.Sprintf("%s/%d", addr, bits)
}
return supers
}
a := ip.As16() // Kopie addressierbar
supers := make([]string, 129) // /128 … /0
for bits := 128; bits >= 0; bits-- {
b := a // Wert-Kopie für Modifikation
// vollständige Bytes auf 0 setzen
full := (128 - bits) / 8
for i := 0; i < full; i++ {
b[15-i] = 0
}
// Restbits maskieren
rem := (128 - bits) % 8
if rem != 0 {
b[15-full] &= 0xFF << rem
}
addr := netip.AddrFrom16(b)
supers[128-bits] = fmt.Sprintf("%s/%d", addr, bits)
}
return supers
}
func checkIP(ip netip.Addr, cats []string) ([]string, error) {
// 1) Cache-Treffer?
if res, ok := ipCache.Get(ip.String()); ok {
hits.Add(1)
return res, nil
}
// 2) alle Supernetze der IP (≤32 bzw. ≤128 Stück)
supers := supernets(ip)
// 3) Pipeline jeweils *eine* EXISTS-Abfrage pro Kategorie
pipe := rdb.Pipeline()
existsCmds := make([]*redis.IntCmd, len(cats))
for i, cat := range cats {
keys := make([]string, len(supers))
for j, pfx := range supers {
keys[j] = "bl:" + cat + ":" + pfx
}
existsCmds[i] = pipe.Exists(ctx, keys...)
}
if _, err := pipe.Exec(ctx); err != nil && err != redis.Nil {
return nil, err
}
// 4) Ergebnis auswerten
matches := make([]string, 0, len(cats))
for i, cat := range cats {
if existsCmds[i].Val() > 0 {
matches = append(matches, cat)
}
}
// 5) Cache befüllen und zurück
misses.Add(1)
ipCache.Add(ip.String(), matches)
return matches, nil
}
// --------------------------------------------
// WHITELIST API (optional extension)
// --------------------------------------------
func handleWhitelist(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
http.Error(w, "method not allowed", 405)
return
}
var body struct {
IP string `json:"ip"`
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
http.Error(w, "bad json", http.StatusBadRequest)
http.Error(w, "bad request", 400)
return
}
addr, err := netip.ParseAddr(strings.TrimSpace(body.IP))
addr, err := netip.ParseAddr(body.IP)
if err != nil {
http.Error(w, "invalid IP", http.StatusBadRequest)
http.Error(w, "invalid IP", 400)
return
}
if err := s.rdb.Set(r.Context(), keyWhite(addr), "1", 0).Err(); err != nil {
http.Error(w, "redis", http.StatusInternalServerError)
// Add to whitelist (Redis key like wl:<ip>)
if err := rdb.Set(ctx, "wl:"+addr.String(), "1", 0).Err(); err != nil {
http.Error(w, "redis error", 500)
return
}
s.ranger.addWhite(addr) // immediate local effect
ipCache.Add(addr.String(), nil)
writeJSON(w, map[string]string{"status": "whitelisted"})
}
func (s *Server) handleCats(w http.ResponseWriter, _ *http.Request) {
s.ranger.mu.RLock()
cats := make([]string, 0, len(s.ranger.blocks))
for c := range s.ranger.blocks {
cats = append(cats, c)
// --------------------------------------------
// ADMIN INFO
// --------------------------------------------
func handleInfo(w http.ResponseWriter, _ *http.Request) {
stats := map[string]any{
"cache_size": ipCache.Len(),
"ttl_hours": redisTTL.Hours(),
"redis": redisAddr,
}
s.ranger.mu.RUnlock()
sort.Strings(cats)
writeJSON(w, map[string]any{"categories": cats})
writeJSON(w, stats)
}
// --------------------------------------------
// UTIL
// --------------------------------------------
func writeJSON(w http.ResponseWriter, v any) {
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(v)
}
// -----------------------------------------------------------------------------
// MAIN
// -----------------------------------------------------------------------------
// --------------------------------------------
// RIR DATA IMPORT (ALL COUNTRIES)
// --------------------------------------------
func main() {
cfg := loadConfig()
ctx := context.Background()
rdb := redis.NewClient(&redis.Options{Addr: cfg.RedisAddr})
if err := rdb.Ping(ctx).Err(); err != nil {
log.Fatalf("redis: %v", err)
}
// enable keyspace events (if not already set in redis.conf)
_ = rdb.ConfigSet(ctx, "notify-keyspace-events", "KEx").Err()
ranger := newRanger()
if err := loadFromRedis(ctx, rdb, ranger); err != nil {
log.Println("initial load error:", err)
}
subscribeKeyspace(ctx, rdb, ranger)
if cfg.IsWorker {
go syncLoop(ctx, cfg, rdb, ranger)
}
srv := &Server{ranger: ranger, rdb: rdb}
log.Println("listening on :8080 (worker:", cfg.IsWorker, ")")
if err := http.ListenAndServe(":8080", srv.routes()); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatal(err)
}
var rirFiles = []string{
"https://ftp.ripe.net/pub/stats/ripencc/delegated-ripencc-latest",
"https://ftp.apnic.net/stats/apnic/delegated-apnic-latest",
"https://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest",
"https://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-latest",
"https://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-extended-latest",
}
func importRIRDataToRedis() error {
wg := sync.WaitGroup{}
sem := make(chan struct{}, 5)
for _, url := range rirFiles {
wg.Add(1)
sem <- struct{}{}
go func(url string) {
defer wg.Done()
defer func() { <-sem }()
fmt.Println("Start: ", url)
if err := fetchAndStore(url); err != nil {
log.Printf("❌ Fehler bei %s: %v", url, err)
}
fmt.Println("Done: ", url)
}(url)
}
wg.Wait()
return nil
}
func fetchAndStore(url string) error {
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "#") || !strings.Contains(line, "|ipv") {
continue
}
fields := strings.Split(line, "|")
if len(fields) < 7 {
continue
}
country := strings.ToLower(fields[1])
ipType := fields[2]
start := fields[3]
count := fields[4]
if ipType != "ipv4" && ipType != "ipv6" {
continue
}
if start == "24.152.36.0" {
fmt.Printf("💡 Testing summarizeIPv4CIDRs(%s, %s)\n", start, count)
num, _ := strconv.ParseUint(count, 10, 64)
for _, cidr := range summarizeCIDRs(start, num) {
fmt.Println(" →", cidr)
}
}
//cidrList := summarizeToCIDRs(start, count, ipType)
numIPs, _ := strconv.ParseUint(count, 10, 64)
cidrList := summarizeCIDRs(start, numIPs)
//log.Printf("[%s] %s/%s (%s) → %d Netze", strings.ToUpper(country), start, count, ipType, len(cidrList))
for _, cidr := range cidrList {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
continue
}
key := "bl:" + country + ":" + prefix.String()
//fmt.Println(key)
_ = rdb.Set(ctx, key, "1", redisTTL).Err()
}
}
return scanner.Err()
}
// --------------------------------------------
// IP RANGE SUMMARIZER
// --------------------------------------------
func summarizeCIDRs(startIP string, count uint64) []string {
var result []string
if count == 0 {
return result
}
ip := net.ParseIP(startIP)
if ip == nil {
return result
}
// IPv4-Pfad ---------------------------------------------------------------
if v4 := ip.To4(); v4 != nil {
start := ip4ToUint(v4)
end := start + uint32(count) - 1
for start <= end {
prefix := 32 - uint32(bits.TrailingZeros32(start))
for (start + (1 << (32 - prefix)) - 1) > end {
prefix++
}
result = append(result,
fmt.Sprintf("%s/%d", uintToIP4(start), prefix))
start += 1 << (32 - prefix)
}
return result
}
// IPv6-Pfad ---------------------------------------------------------------
startBig := ip6ToBig(ip) // Startadresse
endBig := new(big.Int).Add(startBig, // Endadresse
new(big.Int).Sub(new(big.Int).SetUint64(count), big.NewInt(1)))
for startBig.Cmp(endBig) <= 0 {
// größter Block, der am Start ausgerichtet ist
prefix := 128 - trailingZeros128(bigToIP6(startBig))
// so lange verkleinern, bis Block in Fenster passt
for {
blockSize := new(big.Int).Lsh(big.NewInt(1), uint(128-prefix))
blockEnd := new(big.Int).Add(startBig,
new(big.Int).Sub(blockSize, big.NewInt(1)))
if blockEnd.Cmp(endBig) <= 0 {
break
}
prefix++
}
result = append(result,
fmt.Sprintf("%s/%d", bigToIP6(startBig), prefix))
// zum nächsten Subnetz springen
step := new(big.Int).Lsh(big.NewInt(1), uint(128-prefix))
startBig = new(big.Int).Add(startBig, step)
}
return result
}
/* ---------- Hilfsfunktionen IPv4 ---------- */
func ip4ToUint(ip net.IP) uint32 {
return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3])
}
func uintToIP4(v uint32) net.IP {
return net.IPv4(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
/* ---------- Hilfsfunktionen IPv6 ---------- */
func ip6ToBig(ip net.IP) *big.Int {
return new(big.Int).SetBytes(ip.To16()) // garantiert 16 Byte
}
func bigToIP6(v *big.Int) net.IP {
b := v.Bytes()
if len(b) < 16 { // von links auf 16 Byte auffüllen
pad := make([]byte, 16-len(b))
b = append(pad, b...)
}
return net.IP(b)
}
// Anzahl der Null-Bits am wenigst-signifikanten Ende (LSB) eines IPv6-Werts
func trailingZeros128(ip net.IP) int {
b := ip.To16()
tz := 0
for i := 15; i >= 0; i-- { // letzte Byte zuerst (LSB)
if b[i] == 0 {
tz += 8
} else {
tz += bits.TrailingZeros8(b[i])
break
}
}
return tz
}