// Release Hub — zentraler Manifest-Aggregator für Patch-DB Agents // // Ziele // - Zentrale Verwaltung mehrerer Agent-Endpunkte (Manifest-URLs) // - Regelmäßiges Pullen/Parsen der Manifeste // - Vereinheitlichte Release-Liste mit Filter-/Such-UI (Server-seitig gefiltert) // - Reines Go (Standardbibliothek), ohne externe Cloud/CDN // - Persistenz in lokalen JSON-Dateien (Agents + Cache) // - REST-JSON-API (/api/releases, /api/agents) // // Starten // go run . // // Hinweis zum erwarteten Manifest-Format // - Bevorzugt: // { "releases": [ { "version": "1.2.3", "released_at": "2025-10-01T10:00:00Z", ... } ] } // - Alternativ: ein reines Array [ { ... }, { ... } ] // - Außerdem unterstützt: verschachtelte JSON-Strukturen. Insbesondere das Schema // releases → branch → channel → arch → bitness → os → { release-obj } // wird erkannt und in flache Releases umgewandelt. Zusätzlich akzeptiert der Hub // gängige Alias-Felder (z. B. releasedAt/release_date/published_at → released_at, // size → size_bytes, sha256sum → sha256, ...). // - Datumsformat: RFC3339, YYYY-MM-DD, oder Sekunden seit Epoch. // - Bekannte Felder: version, build, released_at, branch, channel, os, arch, notes, assets[] // - assets: { url, sha256, size_bytes, signature_url, content_type } package main import ( "bytes" "encoding/json" "errors" "fmt" "html/template" "io" "log" "net/http" "os" "path/filepath" "sort" "strconv" "strings" "sync" "time" "unicode" ) // ---- Datenmodell ------------------------------------------------------------ type Asset struct { URL string `json:"url"` SHA256 string `json:"sha256"` Size int64 `json:"size_bytes,omitempty"` SignatureURL string `json:"signature_url,omitempty"` ContentType string `json:"content_type,omitempty"` } type FlexTime struct{ time.Time } func (ft *FlexTime) UnmarshalJSON(b []byte) error { s := strings.Trim(string(b), "\"") if s == "" || s == "null" { return nil } // Versuche RFC3339, dann Datum-only if t, err := time.Parse(time.RFC3339, s); err == nil { ft.Time = t return nil } if t, err := time.Parse("2006-01-02", s); err == nil { ft.Time = t return nil } // Fallback: versuche seconds since epoch if sec, err := strconv.ParseInt(s, 10, 64); err == nil { ft.Time = time.Unix(sec, 0).UTC() return nil } return fmt.Errorf("unsupported time format: %q", s) } // Release ist die vereinheitlichte Sicht auf Agent-Releases. // Felder sind konservativ gewählt, weitere können bei Bedarf ergänzt werden. type Release struct { Version string `json:"version"` Build string `json:"build,omitempty"` ReleasedAt FlexTime `json:"released_at"` Branch string `json:"branch,omitempty"` Channel string `json:"channel,omitempty"` OS string `json:"os,omitempty"` Arch string `json:"arch,omitempty"` Notes string `json:"notes,omitempty"` Vendor string `json:"vendor,omitempty"` Product string `json:"product,omitempty"` Assets []Asset `json:"assets,omitempty"` // Quelle (wird vom Hub gesetzt) AgentID string `json:"agent_id"` AgentName string `json:"agent_name"` } // ManifestEnvelope erlaubt sowohl {"releases": [...] } als auch reines Array // zu dekodieren. type ManifestEnvelope struct { Releases []Release `json:"releases"` } // Agent beschreibt einen registrierten Manifest-Endpunkt. type Agent struct { ID string `json:"id"` Name string `json:"name"` EndpointURL string `json:"endpoint_url"` Enabled bool `json:"enabled"` LastOK time.Time `json:"last_ok,omitempty"` LastError string `json:"last_error,omitempty"` ETag string `json:"etag,omitempty"` LastModified string `json:"last_modified,omitempty"` } // ---- Persistence ------------------------------------------------------------ type Store struct { mu sync.RWMutex agents map[string]*Agent releases []Release // zusammengeführte, letzte erfolgreiche Pulls dataDir string } func NewStore(dataDir string) *Store { return &Store{ agents: make(map[string]*Agent), dataDir: dataDir, } } func (s *Store) load() error { if err := os.MkdirAll(s.dataDir, 0o755); err != nil { return err } // Agents b, err := os.ReadFile(filepath.Join(s.dataDir, "agents.json")) if err == nil { var list []*Agent if err := json.Unmarshal(b, &list); err != nil { return err } for _, a := range list { s.agents[a.ID] = a } } // Releases Cache b, err = os.ReadFile(filepath.Join(s.dataDir, "releases-cache.json")) if err == nil { var rr []Release if err := json.Unmarshal(b, &rr); err != nil { return err } s.releases = rr } return nil } func (s *Store) saveAgents() error { s.mu.RLock() defer s.mu.RUnlock() list := make([]*Agent, 0, len(s.agents)) for _, a := range s.agents { list = append(list, a) } sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) b, _ := json.MarshalIndent(list, "", " ") return os.WriteFile(filepath.Join(s.dataDir, "agents.json"), b, 0o644) } func (s *Store) saveReleases() error { s.mu.RLock() defer s.mu.RUnlock() b, _ := json.MarshalIndent(s.releases, "", " ") return os.WriteFile(filepath.Join(s.dataDir, "releases-cache.json"), b, 0o644) } // ---- Fetching --------------------------------------------------------------- type Fetcher struct { client *http.Client store *Store interval time.Duration stopCh chan struct{} } func NewFetcher(store *Store, interval time.Duration) *Fetcher { return &Fetcher{ client: &http.Client{Timeout: 20 * time.Second}, store: store, interval: interval, stopCh: make(chan struct{}), } } func (f *Fetcher) Start() { go func() { // Initial sofortiger Lauf f.FetchOnce() ticker := time.NewTicker(f.interval) defer ticker.Stop() for { select { case <-ticker.C: f.FetchOnce() case <-f.stopCh: return } } }() } func (f *Fetcher) Stop() { close(f.stopCh) } func (f *Fetcher) FetchOnce() { f.store.mu.RLock() agents := make([]*Agent, 0, len(f.store.agents)) for _, a := range f.store.agents { if a.Enabled { agents = append(agents, a) } } f.store.mu.RUnlock() var all []Release var mu sync.Mutex var wg sync.WaitGroup for _, a := range agents { a := a wg.Add(1) go func() { defer wg.Done() rels, err := f.fetchAgent(a) if err != nil { log.Printf("fetch agent %s: %v", a.Name, err) f.store.mu.Lock() a.LastError = err.Error() f.store.mu.Unlock() return } for i := range rels { rels[i].AgentID = a.ID rels[i].AgentName = a.Name } mu.Lock() all = append(all, rels...) mu.Unlock() f.store.mu.Lock() a.LastError = "" a.LastOK = time.Now().UTC() f.store.mu.Unlock() }() } wg.Wait() // Vereinheitlichte Sortierung: neueste zuerst sort.Slice(all, func(i, j int) bool { return all[i].ReleasedAt.Time.After(all[j].ReleasedAt.Time) }) f.store.mu.Lock() f.store.releases = all f.store.mu.Unlock() _ = f.store.saveReleases() } func (f *Fetcher) fetchAgent(a *Agent) ([]Release, error) { req, err := http.NewRequest(http.MethodGet, a.EndpointURL, nil) if err != nil { return nil, err } if a.ETag != "" { req.Header.Set("If-None-Match", a.ETag) } if a.LastModified != "" { req.Header.Set("If-Modified-Since", a.LastModified) } resp, err := f.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusNotModified { f.store.mu.RLock() var cached []Release for _, r := range f.store.releases { if r.AgentID == a.ID { cached = append(cached, r) } } f.store.mu.RUnlock() return cached, nil } if resp.StatusCode != http.StatusOK { b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) return nil, fmt.Errorf("unexpected status %s: %s", resp.Status, string(b)) } if et := resp.Header.Get("ETag"); et != "" { a.ETag = et } if lm := resp.Header.Get("Last-Modified"); lm != "" { a.LastModified = lm } raw, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // 1) {"releases": [...]} var env ManifestEnvelope if err := json.Unmarshal(raw, &env); err == nil && len(env.Releases) > 0 { return env.Releases, nil } // 2) Direktes Array var list []Release if err := json.Unmarshal(raw, &list); err == nil && len(list) > 0 { return list, nil } // 3) Generisch durchsuchen (beliebig verschachtelt, Aliasse normalisieren) if rr, err := parseAnyReleases(raw); err == nil && len(rr) > 0 { return cleanReleases(rr), nil } return nil, errors.New("unrecognized manifest format: expected 'releases' or array") } // ---- Filtering & Query ------------------------------------------------------ type ReleaseFilter struct { Q string Channel string Branch string OS string Arch string Vendor string Product string From time.Time To time.Time } func (f ReleaseFilter) match(r Release) bool { if f.Q != "" { q := strings.ToLower(f.Q) hay := strings.ToLower(strings.Join([]string{ r.Version, r.Build, r.Branch, r.Channel, r.OS, r.Arch, r.Notes, r.Vendor, r.Product, r.AgentName, }, "")) if !strings.Contains(hay, q) { return false } } if f.Channel != "" && !equalFoldOrDash(r.Channel, f.Channel) { return false } if f.Branch != "" && !equalFoldOrDash(r.Branch, f.Branch) { return false } if f.OS != "" && !equalFoldOrDash(r.OS, f.OS) { return false } if f.Arch != "" && !equalFoldOrDash(r.Arch, f.Arch) { return false } if f.Vendor != "" && !equalFoldOrDash(r.Vendor, f.Vendor) { return false } if f.Product != "" && !equalFoldOrDash(r.Product, f.Product) { return false } if !f.From.IsZero() && r.ReleasedAt.Time.Before(f.From) { return false } if !f.To.IsZero() && r.ReleasedAt.Time.After(f.To) { return false } return true } func equalFoldOrDash(a, b string) bool { a = strings.TrimSpace(a) b = strings.TrimSpace(b) if a == "" && (b == "-" || b == "") { return true } if b == "" && (a == "-" || a == "") { return true } return strings.EqualFold(a, b) } // ---- HTTP/UI --------------------------------------------------------------- type Server struct { store *Store fetcher *Fetcher tz *time.Location } func NewServer(store *Store, fetcher *Fetcher) *Server { tz, _ := time.LoadLocation("Europe/Berlin") return &Server{store: store, fetcher: fetcher, tz: tz} } func (s *Server) routesAdmin(mux *http.ServeMux) { mux.HandleFunc("/", s.handlerList(false)) mux.HandleFunc("/agents", s.handleAgents) mux.HandleFunc("/agents/add", s.handleAgentAdd) mux.HandleFunc("/agents/toggle", s.handleAgentToggle) mux.HandleFunc("/agents/delete", s.handleAgentDelete) mux.HandleFunc("/refresh", s.handleRefresh) mux.HandleFunc("/api/releases", s.handleAPIReleases) mux.HandleFunc("/api/agents", s.handleAPIAgents) mux.HandleFunc("/healthz", s.handleHealth) } func (s *Server) routesPublic(mux *http.ServeMux) { // Read-only Oberfläche, keine Admin- oder API-Endpunkte mux.HandleFunc("/", s.handlerList(true)) mux.HandleFunc("/healthz", s.handleHealth) } func (s *Server) handleRefresh(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } go s.fetcher.FetchOnce() http.Redirect(w, r, "/", http.StatusSeeOther) } func (s *Server) handleAPIReleases(w http.ResponseWriter, r *http.Request) { s.store.mu.RLock() rels := append([]Release(nil), s.store.releases...) s.store.mu.RUnlock() // Optional Filter über Query-Params rf := parseFilter(r) out := make([]Release, 0, len(rels)) for _, v := range rels { if rf.match(v) { out = append(out, v) } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(out) } func (s *Server) handleAPIAgents(w http.ResponseWriter, r *http.Request) { s.store.mu.RLock() list := make([]*Agent, 0, len(s.store.agents)) for _, a := range s.store.agents { list = append(list, a) } s.store.mu.RUnlock() sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(list) } func parseFilter(r *http.Request) ReleaseFilter { q := strings.TrimSpace(r.URL.Query().Get("q")) ch := strings.TrimSpace(r.URL.Query().Get("channel")) br := strings.TrimSpace(r.URL.Query().Get("branch")) osv := strings.TrimSpace(r.URL.Query().Get("os")) ar := strings.TrimSpace(r.URL.Query().Get("arch")) vndr := strings.TrimSpace(r.URL.Query().Get("vendor")) prd := strings.TrimSpace(r.URL.Query().Get("product")) var from, to time.Time if v := strings.TrimSpace(r.URL.Query().Get("from")); v != "" { from, _ = time.Parse("2006-01-02", v) } if v := strings.TrimSpace(r.URL.Query().Get("to")); v != "" { to, _ = time.Parse("2006-01-02", v) if !to.IsZero() { to = to.Add(24 * time.Hour).Add(-time.Nanosecond) } } return ReleaseFilter{Q: q, Channel: ch, Branch: br, OS: osv, Arch: ar, Vendor: vndr, Product: prd, From: from, To: to} } func (s *Server) handlerList(readOnly bool) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { rf := parseFilter(r) s.store.mu.RLock() rels := append([]Release(nil), s.store.releases...) s.store.mu.RUnlock() filtered := make([]Release, 0, len(rels)) for _, v := range rels { if rf.match(v) { filtered = append(filtered, v) } } data := struct { Releases []Release CountTotal int CountShown int Q, Channel, Branch, OS, Arch, Vendor, Product string From, To string Now string BaseStyle string ReadOnly bool }{ Releases: filtered, CountTotal: len(rels), CountShown: len(filtered), Q: rf.Q, Channel: rf.Channel, Branch: rf.Branch, OS: rf.OS, Arch: rf.Arch, Vendor: rf.Vendor, Product: rf.Product, From: dateStr(rf.From), To: dateStr(rf.To), Now: time.Now().In(s.tz).Format("02.01.2006 15:04 MST"), BaseStyle: baseStyle, ReadOnly: readOnly, } var buf bytes.Buffer if err := pageTmpl.Execute(&buf, data); err != nil { http.Error(w, err.Error(), 500) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Write(buf.Bytes()) } } func (s *Server) handleAgents(w http.ResponseWriter, r *http.Request) { s.store.mu.RLock() list := make([]*Agent, 0, len(s.store.agents)) for _, a := range s.store.agents { list = append(list, a) } s.store.mu.RUnlock() sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) var buf bytes.Buffer if err := agentsTmpl.Execute(&buf, struct{ Agents []*Agent }{list}); err != nil { http.Error(w, err.Error(), 500) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Write(buf.Bytes()) } func (s *Server) handleAgentAdd(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } name := strings.TrimSpace(r.FormValue("name")) url := strings.TrimSpace(r.FormValue("url")) if name == "" || url == "" { http.Error(w, "name and url required", 400) return } id := genID() a := &Agent{ID: id, Name: name, EndpointURL: url, Enabled: true} s.store.mu.Lock() s.store.agents[a.ID] = a s.store.mu.Unlock() _ = s.store.saveAgents() go s.fetcher.FetchOnce() http.Redirect(w, r, "/agents", http.StatusSeeOther) } func (s *Server) handleAgentToggle(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } id := strings.TrimSpace(r.FormValue("id")) s.store.mu.Lock() a, ok := s.store.agents[id] if !ok { s.store.mu.Unlock() http.Error(w, "not found", 404) return } a.Enabled = !a.Enabled s.store.mu.Unlock() // ⚠️ Vor saveAgents entsperren, sonst Deadlock mit RWMutex _ = s.store.saveAgents() http.Redirect(w, r, "/agents", http.StatusSeeOther) } func (s *Server) handleAgentDelete(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) return } id := strings.TrimSpace(r.FormValue("id")) s.store.mu.Lock() delete(s.store.agents, id) s.store.mu.Unlock() _ = s.store.saveAgents() // Entferne Releases dieses Agents aus Cache s.store.mu.Lock() filtered := s.store.releases[:0] for _, r := range s.store.releases { if r.AgentID != id { filtered = append(filtered, r) } } s.store.releases = filtered s.store.mu.Unlock() _ = s.store.saveReleases() http.Redirect(w, r, "/agents", http.StatusSeeOther) } // ---- Utils ----------------------------------------------------------------- // parseAnyReleases durchsucht beliebig verschachtelte JSON-Strukturen nach Arrays von // Release-ähnlichen Objekten. Gefundene Objekte werden hinsichtlich bekannter Alias-Felder // normalisiert und dann in []Release dekodiert. func parseAnyReleases(raw []byte) ([]Release, error) { var v any if err := json.Unmarshal(raw, &v); err != nil { return nil, err } // 3a) Spezielles hierarchisches SEND.NRW-Schema (releases→branch→channel→arch→bit→os→obj) if arr := parseHierarchicalFromRoot(v); len(arr) > 0 { b, _ := json.Marshal(arr) var rr []Release if err := json.Unmarshal(b, &rr); err == nil && len(rr) > 0 { return rr, nil } } // 3b) Generisch: beliebige Arrays von Objekt-Maps einsammeln und normalisieren var buckets [][]map[string]any collectObjectArrays(v, &buckets) var all []Release for _, arr := range buckets { norm := normalizeReleaseSlice(arr) b, _ := json.Marshal(norm) var rr []Release if err := json.Unmarshal(b, &rr); err == nil { keep := 0 for _, r := range rr { if r.Version != "" || len(r.Assets) > 0 { keep++ } } if keep > 0 { all = append(all, rr...) } } } return all, nil } // parseHierarchicalFromRoot erkennt Strukturen vom Typ // // releases → branch → channel → arch → bitness → os → { release-obj } // // und liefert normalisierte Release-Maps zurück. func parseHierarchicalFromRoot(root any) []map[string]any { m, ok := root.(map[string]any) if !ok { return nil } var defBranch, defChannel, defVendor, defProduct string if s, ok := m["default_branch"].(string); ok { defBranch = s } if s, ok := m["default_channel"].(string); ok { defChannel = s } if s, ok := m["vendor"].(string); ok { defVendor = s } if s, ok := m["product"].(string); ok { defProduct = s } relRoot, ok := m["releases"].(map[string]any) if !ok { return nil } out := make([]map[string]any, 0) for branchKey, channelsV := range relRoot { channels, ok := channelsV.(map[string]any) if !ok { continue } bk := branchKey if bk == "" { bk = defBranch } for channelKey, archsV := range channels { archs, ok := archsV.(map[string]any) if !ok { continue } ck := channelKey if ck == "" { ck = defChannel } for archKey, bitsV := range archs { bits, ok := bitsV.(map[string]any) if !ok { continue } ak := archKey for _, osesV := range bits { oses, ok := osesV.(map[string]any) if !ok { continue } for osKey, leafV := range oses { leaf, ok := leafV.(map[string]any) if !ok { continue } nm := normalizeReleaseMap(leaf) if _, ok := nm["branch"]; !ok && bk != "" { nm["branch"] = bk } if _, ok := nm["channel"]; !ok && ck != "" { nm["channel"] = ck } if _, ok := nm["arch"]; !ok && ak != "" { nm["arch"] = ak } if _, ok := nm["os"]; !ok && osKey != "" { nm["os"] = osKey } if _, ok := nm["vendor"]; !ok && defVendor != "" { nm["vendor"] = defVendor } if _, ok := nm["product"]; !ok && defProduct != "" { nm["product"] = defProduct } out = append(out, nm) } } } } } return out } func collectObjectArrays(v any, out *[][]map[string]any) { switch x := v.(type) { case []any: // Prüfe, ob es ein Array von Objekten ist var objArr []map[string]any ok := true for _, e := range x { m, isObj := e.(map[string]any) if !isObj { ok = false break } objArr = append(objArr, m) } if ok && len(objArr) > 0 { *out = append(*out, objArr) } // Rekursiv weitersuchen for _, e := range x { collectObjectArrays(e, out) } case map[string]any: for _, e := range x { collectObjectArrays(e, out) } } } func normalizeReleaseSlice(arr []map[string]any) []map[string]any { out := make([]map[string]any, 0, len(arr)) for _, m := range arr { out = append(out, normalizeReleaseMap(m)) } return out } func normalizeReleaseMap(m map[string]any) map[string]any { // Keys in snake_case bringen & Aliasse mappen out := make(map[string]any, len(m)) for k, v := range m { out[camelToSnake(k)] = v } alias := func(dst string, alts ...string) { if _, ok := out[dst]; ok { return } for _, a := range alts { if val, ok := out[a]; ok { out[dst] = val return } } } // Release-Felder alias("version", "ver", "v") alias("build", "build_id") alias("released_at", "release_date", "releasedAt", "published_at", "publish_date", "date", "time", "timestamp") alias("branch", "br") alias("channel", "chan") alias("os", "platform") alias("arch", "architecture") alias("notes", "changelog", "summary") // Assets normalisieren (falls vorhanden) if raw, ok := out["assets"].([]any); ok { var aset []map[string]any for _, e := range raw { if am, ok := e.(map[string]any); ok { aset = append(aset, normalizeAssetMap(am)) } } out["assets"] = aset } return out } func cleanReleases(in []Release) []Release { for i := range in { for j := range in[i].Assets { if in[i].Assets[j].SHA256 == "-" { in[i].Assets[j].SHA256 = "" } if in[i].Assets[j].SignatureURL == "-" { in[i].Assets[j].SignatureURL = "" } } } return in } func normalizeAssetMap(m map[string]any) map[string]any { out := make(map[string]any, len(m)) for k, v := range m { out[camelToSnake(k)] = v } alias := func(dst string, alts ...string) { if _, ok := out[dst]; ok { return } for _, a := range alts { if val, ok := out[a]; ok { out[dst] = val return } } } alias("url", "href", "link") alias("sha256", "sha256sum", "checksum", "hash") alias("size_bytes", "size", "bytes") alias("signature_url", "sig", "signature", "signatureUrl") alias("content_type", "mime", "mimetype", "type") return out } func camelToSnake(s string) string { // sehr einfacher CamelCase→snake_case Konverter var b strings.Builder for i, r := range s { if unicode.IsUpper(r) { if i > 0 { b.WriteByte('_') } b.WriteRune(unicode.ToLower(r)) } else { b.WriteRune(r) } } return b.String() } func genID() string { // simple time-based id, ausreichend für lokale Tooling-Zwecke return fmt.Sprintf("a_%d", time.Now().UnixNano()) } func dateStr(t time.Time) string { if t.IsZero() { return "" } return t.Format("2006-01-02") } // ---- Templates & Styles (inline, keine externen CDNs) ---------------------- var baseStyle = ` :root{--bg:#0b1324;--card:#111a30;--muted:#93a0b0;--txt:#e9eef7;--acc:#4cc9f0;--ok:#22c55e;--warn:#f59e0b;--err:#ef4444} *{box-sizing:border-box} body{margin:0;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,"Helvetica Neue";background:var(--bg);color:var(--txt)} .header{position:sticky;top:0;z-index:10;background:linear-gradient(90deg,#0b1324,#0e1a33);border-bottom:1px solid #1c2947;padding:16px} .container{max-width:90%;margin:0 auto;padding:16px} .card{background:var(--card);border:1px solid #1c2947;border-radius:16px;box-shadow:0 6px 18px rgba(0,0,0,.25)} .row{display:flex;gap:12px;flex-wrap:wrap} .btn{display:inline-block;padding:10px 14px;border-radius:12px;border:1px solid #2a3a63;background:#152448;color:var(--txt);text-decoration:none;cursor:pointer} .btn:hover{background:#19305a} .btn-acc{border-color:#2b9bc2} .badge{padding:2px 8px;border-radius:999px;border:1px solid #2a3a63;color:var(--muted);font-size:12px} .input, select{background:#0b1429;border:1px solid #2a3a63;border-radius:12px;padding:10px;color:var(--txt);width:100%} label{font-size:12px;color:var(--muted)} .table{width:100%;border-collapse:separate;border-spacing:0} .table th, .table td{padding:10px 12px;border-bottom:1px solid #1c2947} .table th{position:sticky;top:0;background:#122042;text-align:left} .kv{display:grid;grid-template-columns:120px 1fr;gap:6px 12px} .small{font-size:12px;color:var(--muted)} .code{font-family:ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;font-size:12px} hr{border:0;border-top:1px solid #1c2947;margin:12px 0} ` var pageTmpl = template.Must(template.New("page").Funcs(template.FuncMap{ "fmtTime": func(t FlexTime) string { if t.IsZero() { return "" } return t.Time.Format("02.01.2006 15:04") }, }).Parse(` Release Hub
Release Hub
Unified Release · Version from: {{.Now}}
{{if .ReadOnly}}
{{else}}
⚙ Agents
{{end}}
Show {{.CountShown}} from {{.CountTotal}} Releases
{{range .Releases}} {{end}}
Date Version Branch Channel OS Arch Vendor Product Agent Assets Notes
{{fmtTime .ReleasedAt}} {{.Version}} {{if .Build}}({{.Build}}){{end}} {{.Branch}} {{.Channel}} {{.OS}} {{.Arch}} {{.Vendor}} {{.Product}} {{.AgentName}} {{if .Assets}}
{{range .Assets}}
Asset
Download {{.ContentType}} {{if .Size}}· {{.Size}} B{{end}} {{if .SHA256}}· sha256:{{.SHA256}}{{end}}
{{end}}
{{else}} - {{end}}
{{.Notes}}
`)) var agentsTmpl = template.Must(template.New("agents").Parse(` Agents · Release Hub
Agenten
Manage Manifest-Endpoint
← Overview
{{range .Agents}} {{end}}
NameURLStatusActions
{{.Name}} {{.EndpointURL}} {{if .Enabled}}active{{else}}inactive{{end}} {{if .LastOK}}· OK: {{.LastOK}}{{end}} {{if .LastError}}· Error: {{.LastError}}{{end}}
`)) // ---- main ------------------------------------------------------------------ func getenv(k, d string) string { if v := os.Getenv(k); v != "" { return v } return d } func getduration(k string, d time.Duration) time.Duration { v, ok := os.LookupEnv(k) if !ok { return d } v = strings.TrimSpace(v) if v == "" { return d } if dur, err := time.ParseDuration(v); err == nil { return dur } if n, err := strconv.ParseInt(v, 10, 64); err == nil { return time.Duration(n) * time.Second } return d } func enabled(k string, def bool) bool { b, err := strconv.ParseBool(strings.ToLower(os.Getenv(k))) if err != nil { return def } return b } func main() { //flag.Parse() envAddr := getenv("HTTP_ADMIN", ":9090") envPublicAddr := getenv("HTTP_PUBLIC", ":8080") envDataDir := getenv("APP_DATADIR", "/data") envRefresh := getduration("APP_REFRESH", 10*time.Minute) store := NewStore(envDataDir) if err := store.load(); err != nil { log.Fatalf("load store: %v", err) } fetcher := NewFetcher(store, envRefresh) fetcher.Start() defer fetcher.Stop() srv := NewServer(store, fetcher) // Admin-Server muxAdmin := http.NewServeMux() srv.routesAdmin(muxAdmin) errCh := make(chan error, 2) go func() { log.Printf("Admin (voll) listening on %s", envAddr) errCh <- http.ListenAndServe(envAddr, muxAdmin) }() // Öffentlicher Read-only-Server (optional) if envPublicAddr != "" { muxPublic := http.NewServeMux() srv.routesPublic(muxPublic) go func() { log.Printf("Public (read-only) listening on %s", envPublicAddr) errCh <- http.ListenAndServe(envPublicAddr, muxPublic) }() } log.Fatal(<-errCh) } // Healthcheck func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte("ok")) }