diff --git a/.gitea/workflows/registry.yml b/.gitea/workflows/registry.yml new file mode 100644 index 0000000..20912ac --- /dev/null +++ b/.gitea/workflows/registry.yml @@ -0,0 +1,51 @@ +name: release-tag +on: + push: + branches: + - 'main' +jobs: + release-image: + runs-on: ubuntu-fast + env: + DOCKER_ORG: ${{ vars.DOCKER_ORG }} + DOCKER_LATEST: latest + RUNNER_TOOL_CACHE: /toolcache + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker BuildX + uses: docker/setup-buildx-action@v2 + with: # replace it with your local IP + config-inline: | + [registry."${{ vars.DOCKER_REGISTRY }}"] + http = true + insecure = true + + - name: Login to DockerHub + uses: docker/login-action@v2 + with: + registry: ${{ vars.DOCKER_REGISTRY }} # replace it with your local IP + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Get Meta + id: meta + run: | + echo REPO_NAME=$(echo ${GITHUB_REPOSITORY} | awk -F"/" '{print $2}') >> $GITHUB_OUTPUT + echo REPO_VERSION=$(git describe --tags --always | sed 's/^v//') >> $GITHUB_OUTPUT + + - name: Build and push + uses: docker/build-push-action@v4 + with: + context: . + file: ./Dockerfile + platforms: | + linux/amd64 + push: true + tags: | # replace it with your local IP and tags + ${{ vars.DOCKER_REGISTRY }}/${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ steps.meta.outputs.REPO_VERSION }} + ${{ vars.DOCKER_REGISTRY }}/${{ env.DOCKER_ORG }}/${{ steps.meta.outputs.REPO_NAME }}:${{ env.DOCKER_LATEST }} \ No newline at end of file diff --git a/.gitea/workflows/release.yml b/.gitea/workflows/release.yml new file mode 100644 index 0000000..2099c6e --- /dev/null +++ b/.gitea/workflows/release.yml @@ -0,0 +1,124 @@ +# Git(tea) Actions workflow: Build and publish standalone binaries **plus** bundled `static/` assets +# ──────────────────────────────────────────────────────────────────── +# ✧ Builds the Go‑based WoL server for four targets **and** packt das Verzeichnis +# `static` zusammen mit der Binary, sodass es relativ zur ausführbaren Datei +# liegt (wichtig für die eingebauten Bootstrap‑Assets & favicon). +# +# • linux/amd64 → wol-server-linux-amd64.tar.gz +# • linux/arm64 → wol-server-linux-arm64.tar.gz +# • linux/arm/v7 → wol-server-linux-armv7.tar.gz +# • windows/amd64 → wol-server-windows-amd64.zip +# +# ✧ Artefakte landen im Workflow und – bei Tag‑Push (vX.Y.Z) – als Release‑Assets. +# +# Secrets/variables: +# GITEA_TOKEN – optional, falls default token keine Release‑Rechte hat. +# ──────────────────────────────────────────────────────────────────── + +name: build-binaries + +on: + push: + branches: [ "main" ] + tags: [ "v*" ] + +jobs: + build: + if: startsWith(github.ref, 'refs/tags/') + runs-on: ubuntu-fast + + strategy: + matrix: + include: + - goos: linux + goarch: amd64 + ext: "" + - goos: linux + goarch: arm64 + ext: "" + - goos: linux + goarch: arm + goarm: "7" + ext: "" + - goos: windows + goarch: amd64 + ext: ".exe" + + env: + GO_VERSION: "1.25" + BINARY_NAME: release-hub + + steps: + - name: Checkout source + uses: actions/checkout@v3 + + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + cache: true + + - name: Build ${{ matrix.goos }}/${{ matrix.goarch }}${{ matrix.goarm && format('/v{0}', matrix.goarm) || '' }} + shell: bash + run: | + set -e + mkdir -p dist/package + if [ -n "${{ matrix.goarm }}" ]; then export GOARM=${{ matrix.goarm }}; fi + CGO_ENABLED=0 GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} go build -trimpath -ldflags "-s -w" \ + -o "dist/package/${BINARY_NAME}${{ matrix.ext }}" . + # Assets: statisches Verzeichnis beilegen + # cp -r static dist/package/ + + - name: Package archive with static assets + shell: bash + run: | + set -e + cd dist + if [ "${{ matrix.goos }}" == "windows" ]; then + ZIP_NAME="${BINARY_NAME}-windows-amd64.zip" + (cd package && zip -r "../$ZIP_NAME" .) + else + ARCH_SUFFIX="${{ matrix.goarch }}" + if [ "${{ matrix.goarch }}" == "arm" ]; then ARCH_SUFFIX="armv${{ matrix.goarm }}"; fi + TAR_NAME="${BINARY_NAME}-${{ matrix.goos }}-${ARCH_SUFFIX}.tar.gz" + tar -czf "$TAR_NAME" -C package . + fi + + - name: Upload workflow artifact + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.goos }}-${{ matrix.goarch }}${{ matrix.goarm && format('v{0}', matrix.goarm) || '' }} + path: dist/*.tar.gz + if-no-files-found: ignore + - uses: actions/upload-artifact@v3 + with: + name: windows-amd64 + path: dist/*.zip + if-no-files-found: ignore + + # Release Schritt für Tag‑Pushes + release: + if: startsWith(github.ref, 'refs/tags/') + needs: build + runs-on: ubuntu-fast + permissions: + contents: write + + steps: + - name: Download artifacts + uses: actions/download-artifact@v3 + with: + path: ./dist + + - name: Create / Update release + uses: softprops/action-gh-release@v2 + env: + GITHUB_TOKEN: ${{ secrets.GITEA_TOKEN || github.token }} + with: + name: "Release ${{ github.ref_name }}" + tag_name: ${{ github.ref_name }} + draft: false + prerelease: false + files: | + dist/**/release-hub-*.tar.gz + dist/**/release-hub-*.zip diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..30b60ef --- /dev/null +++ b/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.25.3 AS build +WORKDIR /src +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -o /out/release-hub +#FROM gcr.io/distroless/static:nonroot +FROM alpine:3.22.2 +WORKDIR / +RUN mkdir /data +VOLUME ["/data"] +EXPOSE 8080 +ENV HTTP_ADMIN=":9090" \ + HTTP_PUBLIC=":8080" \ + APP_DATADIR="/data" \ + APP_REFRESH="30m" + +COPY --from=build /out/release-hub /release-hub +ENTRYPOINT ["/release-hub"] \ No newline at end of file diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a2c5c09 --- /dev/null +++ b/go.mod @@ -0,0 +1,3 @@ +module git.send.nrw/patchping/release-hub + +go 1.25.3 diff --git a/main.go b/main.go new file mode 100644 index 0000000..81e4f87 --- /dev/null +++ b/main.go @@ -0,0 +1,1189 @@ +// Release Hub — zentraler Manifest-Aggregator für Patch-DB Agents +// +// Ziele +// - Zentrale Verwaltung mehrerer Agent-Endpunkte (Manifest-URLs) +// - Regelmäßiges Pullen/Parsen der Manifeste +// - Vereinheitlichte Release-Liste mit Filter-/Such-UI (Server-seitig gefiltert) +// - Reines Go (Standardbibliothek), ohne externe Cloud/CDN +// - Persistenz in lokalen JSON-Dateien (Agents + Cache) +// - REST-JSON-API (/api/releases, /api/agents) +// +// Starten +// go run . +// +// Hinweis zum erwarteten Manifest-Format +// - Bevorzugt: +// { "releases": [ { "version": "1.2.3", "released_at": "2025-10-01T10:00:00Z", ... } ] } +// - Alternativ: ein reines Array [ { ... }, { ... } ] +// - Außerdem unterstützt: verschachtelte JSON-Strukturen. Insbesondere das Schema +// releases → branch → channel → arch → bitness → os → { release-obj } +// wird erkannt und in flache Releases umgewandelt. Zusätzlich akzeptiert der Hub +// gängige Alias-Felder (z. B. releasedAt/release_date/published_at → released_at, +// size → size_bytes, sha256sum → sha256, ...). +// - Datumsformat: RFC3339, YYYY-MM-DD, oder Sekunden seit Epoch. +// - Bekannte Felder: version, build, released_at, branch, channel, os, arch, notes, assets[] +// - assets: { url, sha256, size_bytes, signature_url, content_type } + +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +// ---- Datenmodell ------------------------------------------------------------ + +type Asset struct { + URL string `json:"url"` + SHA256 string `json:"sha256"` + Size int64 `json:"size_bytes,omitempty"` + SignatureURL string `json:"signature_url,omitempty"` + ContentType string `json:"content_type,omitempty"` +} + +type FlexTime struct{ time.Time } + +func (ft *FlexTime) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), "\"") + if s == "" || s == "null" { + return nil + } + // Versuche RFC3339, dann Datum-only + if t, err := time.Parse(time.RFC3339, s); err == nil { + ft.Time = t + return nil + } + if t, err := time.Parse("2006-01-02", s); err == nil { + ft.Time = t + return nil + } + // Fallback: versuche seconds since epoch + if sec, err := strconv.ParseInt(s, 10, 64); err == nil { + ft.Time = time.Unix(sec, 0).UTC() + return nil + } + return fmt.Errorf("unsupported time format: %q", s) +} + +// Release ist die vereinheitlichte Sicht auf Agent-Releases. +// Felder sind konservativ gewählt, weitere können bei Bedarf ergänzt werden. +type Release struct { + Version string `json:"version"` + Build string `json:"build,omitempty"` + ReleasedAt FlexTime `json:"released_at"` + Branch string `json:"branch,omitempty"` + Channel string `json:"channel,omitempty"` + OS string `json:"os,omitempty"` + Arch string `json:"arch,omitempty"` + Notes string `json:"notes,omitempty"` + Vendor string `json:"vendor,omitempty"` + Product string `json:"product,omitempty"` + Assets []Asset `json:"assets,omitempty"` + + // Quelle (wird vom Hub gesetzt) + AgentID string `json:"agent_id"` + AgentName string `json:"agent_name"` +} + +// ManifestEnvelope erlaubt sowohl {"releases": [...] } als auch reines Array +// zu dekodieren. +type ManifestEnvelope struct { + Releases []Release `json:"releases"` +} + +// Agent beschreibt einen registrierten Manifest-Endpunkt. +type Agent struct { + ID string `json:"id"` + Name string `json:"name"` + EndpointURL string `json:"endpoint_url"` + Enabled bool `json:"enabled"` + LastOK time.Time `json:"last_ok,omitempty"` + LastError string `json:"last_error,omitempty"` + ETag string `json:"etag,omitempty"` + LastModified string `json:"last_modified,omitempty"` +} + +// ---- Persistence ------------------------------------------------------------ + +type Store struct { + mu sync.RWMutex + agents map[string]*Agent + releases []Release // zusammengeführte, letzte erfolgreiche Pulls + + dataDir string +} + +func NewStore(dataDir string) *Store { + return &Store{ + agents: make(map[string]*Agent), + dataDir: dataDir, + } +} + +func (s *Store) load() error { + if err := os.MkdirAll(s.dataDir, 0o755); err != nil { + return err + } + + // Agents + b, err := os.ReadFile(filepath.Join(s.dataDir, "agents.json")) + if err == nil { + var list []*Agent + if err := json.Unmarshal(b, &list); err != nil { + return err + } + for _, a := range list { + s.agents[a.ID] = a + } + } + + // Releases Cache + b, err = os.ReadFile(filepath.Join(s.dataDir, "releases-cache.json")) + if err == nil { + var rr []Release + if err := json.Unmarshal(b, &rr); err != nil { + return err + } + s.releases = rr + } + return nil +} + +func (s *Store) saveAgents() error { + s.mu.RLock() + defer s.mu.RUnlock() + list := make([]*Agent, 0, len(s.agents)) + for _, a := range s.agents { + list = append(list, a) + } + sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) + b, _ := json.MarshalIndent(list, "", " ") + return os.WriteFile(filepath.Join(s.dataDir, "agents.json"), b, 0o644) +} + +func (s *Store) saveReleases() error { + s.mu.RLock() + defer s.mu.RUnlock() + b, _ := json.MarshalIndent(s.releases, "", " ") + return os.WriteFile(filepath.Join(s.dataDir, "releases-cache.json"), b, 0o644) +} + +// ---- Fetching --------------------------------------------------------------- + +type Fetcher struct { + client *http.Client + store *Store + interval time.Duration + stopCh chan struct{} +} + +func NewFetcher(store *Store, interval time.Duration) *Fetcher { + return &Fetcher{ + client: &http.Client{Timeout: 20 * time.Second}, + store: store, + interval: interval, + stopCh: make(chan struct{}), + } +} + +func (f *Fetcher) Start() { + go func() { + // Initial sofortiger Lauf + f.FetchOnce() + ticker := time.NewTicker(f.interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + f.FetchOnce() + case <-f.stopCh: + return + } + } + }() +} + +func (f *Fetcher) Stop() { close(f.stopCh) } + +func (f *Fetcher) FetchOnce() { + f.store.mu.RLock() + agents := make([]*Agent, 0, len(f.store.agents)) + for _, a := range f.store.agents { + if a.Enabled { + agents = append(agents, a) + } + } + f.store.mu.RUnlock() + + var all []Release + var mu sync.Mutex + var wg sync.WaitGroup + + for _, a := range agents { + a := a + wg.Add(1) + go func() { + defer wg.Done() + rels, err := f.fetchAgent(a) + if err != nil { + log.Printf("fetch agent %s: %v", a.Name, err) + f.store.mu.Lock() + a.LastError = err.Error() + f.store.mu.Unlock() + return + } + for i := range rels { + rels[i].AgentID = a.ID + rels[i].AgentName = a.Name + } + mu.Lock() + all = append(all, rels...) + mu.Unlock() + + f.store.mu.Lock() + a.LastError = "" + a.LastOK = time.Now().UTC() + f.store.mu.Unlock() + }() + } + + wg.Wait() + + // Vereinheitlichte Sortierung: neueste zuerst + sort.Slice(all, func(i, j int) bool { + return all[i].ReleasedAt.Time.After(all[j].ReleasedAt.Time) + }) + + f.store.mu.Lock() + f.store.releases = all + f.store.mu.Unlock() + + _ = f.store.saveReleases() +} + +func (f *Fetcher) fetchAgent(a *Agent) ([]Release, error) { + req, err := http.NewRequest(http.MethodGet, a.EndpointURL, nil) + if err != nil { + return nil, err + } + if a.ETag != "" { + req.Header.Set("If-None-Match", a.ETag) + } + if a.LastModified != "" { + req.Header.Set("If-Modified-Since", a.LastModified) + } + + resp, err := f.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotModified { + f.store.mu.RLock() + var cached []Release + for _, r := range f.store.releases { + if r.AgentID == a.ID { + cached = append(cached, r) + } + } + f.store.mu.RUnlock() + return cached, nil + } + + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return nil, fmt.Errorf("unexpected status %s: %s", resp.Status, string(b)) + } + + if et := resp.Header.Get("ETag"); et != "" { + a.ETag = et + } + if lm := resp.Header.Get("Last-Modified"); lm != "" { + a.LastModified = lm + } + + raw, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // 1) {"releases": [...]} + var env ManifestEnvelope + if err := json.Unmarshal(raw, &env); err == nil && len(env.Releases) > 0 { + return env.Releases, nil + } + + // 2) Direktes Array + var list []Release + if err := json.Unmarshal(raw, &list); err == nil && len(list) > 0 { + return list, nil + } + + // 3) Generisch durchsuchen (beliebig verschachtelt, Aliasse normalisieren) + if rr, err := parseAnyReleases(raw); err == nil && len(rr) > 0 { + return cleanReleases(rr), nil + } + + return nil, errors.New("unrecognized manifest format: expected 'releases' or array") +} + +// ---- Filtering & Query ------------------------------------------------------ + +type ReleaseFilter struct { + Q string + Channel string + Branch string + OS string + Arch string + Vendor string + Product string + From time.Time + To time.Time +} + +func (f ReleaseFilter) match(r Release) bool { + if f.Q != "" { + q := strings.ToLower(f.Q) + hay := strings.ToLower(strings.Join([]string{ + r.Version, r.Build, r.Branch, r.Channel, r.OS, r.Arch, r.Notes, r.Vendor, r.Product, r.AgentName, + }, "")) + if !strings.Contains(hay, q) { + return false + } + } + if f.Channel != "" && !equalFoldOrDash(r.Channel, f.Channel) { + return false + } + if f.Branch != "" && !equalFoldOrDash(r.Branch, f.Branch) { + return false + } + if f.OS != "" && !equalFoldOrDash(r.OS, f.OS) { + return false + } + if f.Arch != "" && !equalFoldOrDash(r.Arch, f.Arch) { + return false + } + if f.Vendor != "" && !equalFoldOrDash(r.Vendor, f.Vendor) { + return false + } + if f.Product != "" && !equalFoldOrDash(r.Product, f.Product) { + return false + } + if !f.From.IsZero() && r.ReleasedAt.Time.Before(f.From) { + return false + } + if !f.To.IsZero() && r.ReleasedAt.Time.After(f.To) { + return false + } + return true +} + +func equalFoldOrDash(a, b string) bool { + a = strings.TrimSpace(a) + b = strings.TrimSpace(b) + if a == "" && (b == "-" || b == "") { + return true + } + if b == "" && (a == "-" || a == "") { + return true + } + return strings.EqualFold(a, b) +} + +// ---- HTTP/UI --------------------------------------------------------------- + +type Server struct { + store *Store + fetcher *Fetcher + tz *time.Location +} + +func NewServer(store *Store, fetcher *Fetcher) *Server { + tz, _ := time.LoadLocation("Europe/Berlin") + return &Server{store: store, fetcher: fetcher, tz: tz} +} + +func (s *Server) routesAdmin(mux *http.ServeMux) { + mux.HandleFunc("/", s.handlerList(false)) + mux.HandleFunc("/agents", s.handleAgents) + mux.HandleFunc("/agents/add", s.handleAgentAdd) + mux.HandleFunc("/agents/toggle", s.handleAgentToggle) + mux.HandleFunc("/agents/delete", s.handleAgentDelete) + mux.HandleFunc("/refresh", s.handleRefresh) + + mux.HandleFunc("/api/releases", s.handleAPIReleases) + mux.HandleFunc("/api/agents", s.handleAPIAgents) + mux.HandleFunc("/healthz", s.handleHealth) +} + +func (s *Server) routesPublic(mux *http.ServeMux) { + // Read-only Oberfläche, keine Admin- oder API-Endpunkte + mux.HandleFunc("/", s.handlerList(true)) + mux.HandleFunc("/healthz", s.handleHealth) +} + +func (s *Server) handleRefresh(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + go s.fetcher.FetchOnce() + http.Redirect(w, r, "/", http.StatusSeeOther) +} + +func (s *Server) handleAPIReleases(w http.ResponseWriter, r *http.Request) { + s.store.mu.RLock() + rels := append([]Release(nil), s.store.releases...) + s.store.mu.RUnlock() + + // Optional Filter über Query-Params + rf := parseFilter(r) + out := make([]Release, 0, len(rels)) + for _, v := range rels { + if rf.match(v) { + out = append(out, v) + } + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(out) +} + +func (s *Server) handleAPIAgents(w http.ResponseWriter, r *http.Request) { + s.store.mu.RLock() + list := make([]*Agent, 0, len(s.store.agents)) + for _, a := range s.store.agents { + list = append(list, a) + } + s.store.mu.RUnlock() + + sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(list) +} + +func parseFilter(r *http.Request) ReleaseFilter { + q := strings.TrimSpace(r.URL.Query().Get("q")) + ch := strings.TrimSpace(r.URL.Query().Get("channel")) + br := strings.TrimSpace(r.URL.Query().Get("branch")) + osv := strings.TrimSpace(r.URL.Query().Get("os")) + ar := strings.TrimSpace(r.URL.Query().Get("arch")) + vndr := strings.TrimSpace(r.URL.Query().Get("vendor")) + prd := strings.TrimSpace(r.URL.Query().Get("product")) + + var from, to time.Time + if v := strings.TrimSpace(r.URL.Query().Get("from")); v != "" { + from, _ = time.Parse("2006-01-02", v) + } + if v := strings.TrimSpace(r.URL.Query().Get("to")); v != "" { + to, _ = time.Parse("2006-01-02", v) + if !to.IsZero() { + to = to.Add(24 * time.Hour).Add(-time.Nanosecond) + } + } + + return ReleaseFilter{Q: q, Channel: ch, Branch: br, OS: osv, Arch: ar, Vendor: vndr, Product: prd, From: from, To: to} +} + +func (s *Server) handlerList(readOnly bool) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + rf := parseFilter(r) + + s.store.mu.RLock() + rels := append([]Release(nil), s.store.releases...) + s.store.mu.RUnlock() + + filtered := make([]Release, 0, len(rels)) + for _, v := range rels { + if rf.match(v) { + filtered = append(filtered, v) + } + } + + data := struct { + Releases []Release + CountTotal int + CountShown int + Q, Channel, Branch, OS, Arch, Vendor, Product string + From, To string + Now string + BaseStyle string + ReadOnly bool + }{ + Releases: filtered, + CountTotal: len(rels), + CountShown: len(filtered), + Q: rf.Q, Channel: rf.Channel, Branch: rf.Branch, OS: rf.OS, Arch: rf.Arch, Vendor: rf.Vendor, Product: rf.Product, + From: dateStr(rf.From), To: dateStr(rf.To), + Now: time.Now().In(s.tz).Format("02.01.2006 15:04 MST"), + BaseStyle: baseStyle, + ReadOnly: readOnly, + } + + var buf bytes.Buffer + if err := pageTmpl.Execute(&buf, data); err != nil { + http.Error(w, err.Error(), 500) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write(buf.Bytes()) + } +} + +func (s *Server) handleAgents(w http.ResponseWriter, r *http.Request) { + s.store.mu.RLock() + list := make([]*Agent, 0, len(s.store.agents)) + for _, a := range s.store.agents { + list = append(list, a) + } + s.store.mu.RUnlock() + sort.Slice(list, func(i, j int) bool { return list[i].Name < list[j].Name }) + + var buf bytes.Buffer + if err := agentsTmpl.Execute(&buf, struct{ Agents []*Agent }{list}); err != nil { + http.Error(w, err.Error(), 500) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.Write(buf.Bytes()) +} + +func (s *Server) handleAgentAdd(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + name := strings.TrimSpace(r.FormValue("name")) + url := strings.TrimSpace(r.FormValue("url")) + if name == "" || url == "" { + http.Error(w, "name and url required", 400) + return + } + + id := genID() + a := &Agent{ID: id, Name: name, EndpointURL: url, Enabled: true} + s.store.mu.Lock() + s.store.agents[a.ID] = a + s.store.mu.Unlock() + _ = s.store.saveAgents() + go s.fetcher.FetchOnce() + http.Redirect(w, r, "/agents", http.StatusSeeOther) +} + +func (s *Server) handleAgentToggle(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + id := strings.TrimSpace(r.FormValue("id")) + + s.store.mu.Lock() + a, ok := s.store.agents[id] + if !ok { + s.store.mu.Unlock() + http.Error(w, "not found", 404) + return + } + a.Enabled = !a.Enabled + s.store.mu.Unlock() // ⚠️ Vor saveAgents entsperren, sonst Deadlock mit RWMutex + + _ = s.store.saveAgents() + http.Redirect(w, r, "/agents", http.StatusSeeOther) +} + +func (s *Server) handleAgentDelete(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + id := strings.TrimSpace(r.FormValue("id")) + s.store.mu.Lock() + delete(s.store.agents, id) + s.store.mu.Unlock() + _ = s.store.saveAgents() + + // Entferne Releases dieses Agents aus Cache + s.store.mu.Lock() + filtered := s.store.releases[:0] + for _, r := range s.store.releases { + if r.AgentID != id { + filtered = append(filtered, r) + } + } + s.store.releases = filtered + s.store.mu.Unlock() + _ = s.store.saveReleases() + + http.Redirect(w, r, "/agents", http.StatusSeeOther) +} + +// ---- Utils ----------------------------------------------------------------- + +// parseAnyReleases durchsucht beliebig verschachtelte JSON-Strukturen nach Arrays von +// Release-ähnlichen Objekten. Gefundene Objekte werden hinsichtlich bekannter Alias-Felder +// normalisiert und dann in []Release dekodiert. +func parseAnyReleases(raw []byte) ([]Release, error) { + var v any + if err := json.Unmarshal(raw, &v); err != nil { + return nil, err + } + + // 3a) Spezielles hierarchisches SEND.NRW-Schema (releases→branch→channel→arch→bit→os→obj) + if arr := parseHierarchicalFromRoot(v); len(arr) > 0 { + b, _ := json.Marshal(arr) + var rr []Release + if err := json.Unmarshal(b, &rr); err == nil && len(rr) > 0 { + return rr, nil + } + } + + // 3b) Generisch: beliebige Arrays von Objekt-Maps einsammeln und normalisieren + var buckets [][]map[string]any + collectObjectArrays(v, &buckets) + var all []Release + for _, arr := range buckets { + norm := normalizeReleaseSlice(arr) + b, _ := json.Marshal(norm) + var rr []Release + if err := json.Unmarshal(b, &rr); err == nil { + keep := 0 + for _, r := range rr { + if r.Version != "" || len(r.Assets) > 0 { + keep++ + } + } + if keep > 0 { + all = append(all, rr...) + } + } + } + return all, nil +} + +// parseHierarchicalFromRoot erkennt Strukturen vom Typ +// +// releases → branch → channel → arch → bitness → os → { release-obj } +// +// und liefert normalisierte Release-Maps zurück. +func parseHierarchicalFromRoot(root any) []map[string]any { + m, ok := root.(map[string]any) + if !ok { + return nil + } + + var defBranch, defChannel, defVendor, defProduct string + if s, ok := m["default_branch"].(string); ok { + defBranch = s + } + if s, ok := m["default_channel"].(string); ok { + defChannel = s + } + if s, ok := m["vendor"].(string); ok { + defVendor = s + } + if s, ok := m["product"].(string); ok { + defProduct = s + } + + relRoot, ok := m["releases"].(map[string]any) + if !ok { + return nil + } + + out := make([]map[string]any, 0) + for branchKey, channelsV := range relRoot { + channels, ok := channelsV.(map[string]any) + if !ok { + continue + } + bk := branchKey + if bk == "" { + bk = defBranch + } + for channelKey, archsV := range channels { + archs, ok := archsV.(map[string]any) + if !ok { + continue + } + ck := channelKey + if ck == "" { + ck = defChannel + } + for archKey, bitsV := range archs { + bits, ok := bitsV.(map[string]any) + if !ok { + continue + } + ak := archKey + for _, osesV := range bits { + oses, ok := osesV.(map[string]any) + if !ok { + continue + } + for osKey, leafV := range oses { + leaf, ok := leafV.(map[string]any) + if !ok { + continue + } + nm := normalizeReleaseMap(leaf) + if _, ok := nm["branch"]; !ok && bk != "" { + nm["branch"] = bk + } + if _, ok := nm["channel"]; !ok && ck != "" { + nm["channel"] = ck + } + if _, ok := nm["arch"]; !ok && ak != "" { + nm["arch"] = ak + } + if _, ok := nm["os"]; !ok && osKey != "" { + nm["os"] = osKey + } + if _, ok := nm["vendor"]; !ok && defVendor != "" { + nm["vendor"] = defVendor + } + if _, ok := nm["product"]; !ok && defProduct != "" { + nm["product"] = defProduct + } + out = append(out, nm) + } + } + } + } + } + return out +} + +func collectObjectArrays(v any, out *[][]map[string]any) { + switch x := v.(type) { + case []any: + // Prüfe, ob es ein Array von Objekten ist + var objArr []map[string]any + ok := true + for _, e := range x { + m, isObj := e.(map[string]any) + if !isObj { + ok = false + break + } + objArr = append(objArr, m) + } + if ok && len(objArr) > 0 { + *out = append(*out, objArr) + } + // Rekursiv weitersuchen + for _, e := range x { + collectObjectArrays(e, out) + } + case map[string]any: + for _, e := range x { + collectObjectArrays(e, out) + } + } +} + +func normalizeReleaseSlice(arr []map[string]any) []map[string]any { + out := make([]map[string]any, 0, len(arr)) + for _, m := range arr { + out = append(out, normalizeReleaseMap(m)) + } + return out +} + +func normalizeReleaseMap(m map[string]any) map[string]any { + // Keys in snake_case bringen & Aliasse mappen + out := make(map[string]any, len(m)) + for k, v := range m { + out[camelToSnake(k)] = v + } + + alias := func(dst string, alts ...string) { + if _, ok := out[dst]; ok { + return + } + for _, a := range alts { + if val, ok := out[a]; ok { + out[dst] = val + return + } + } + } + + // Release-Felder + alias("version", "ver", "v") + alias("build", "build_id") + alias("released_at", "release_date", "releasedAt", "published_at", "publish_date", "date", "time", "timestamp") + alias("branch", "br") + alias("channel", "chan") + alias("os", "platform") + alias("arch", "architecture") + alias("notes", "changelog", "summary") + + // Assets normalisieren (falls vorhanden) + if raw, ok := out["assets"].([]any); ok { + var aset []map[string]any + for _, e := range raw { + if am, ok := e.(map[string]any); ok { + aset = append(aset, normalizeAssetMap(am)) + } + } + out["assets"] = aset + } + + return out +} + +func cleanReleases(in []Release) []Release { + for i := range in { + for j := range in[i].Assets { + if in[i].Assets[j].SHA256 == "-" { + in[i].Assets[j].SHA256 = "" + } + if in[i].Assets[j].SignatureURL == "-" { + in[i].Assets[j].SignatureURL = "" + } + } + } + return in +} + +func normalizeAssetMap(m map[string]any) map[string]any { + out := make(map[string]any, len(m)) + for k, v := range m { + out[camelToSnake(k)] = v + } + alias := func(dst string, alts ...string) { + if _, ok := out[dst]; ok { + return + } + for _, a := range alts { + if val, ok := out[a]; ok { + out[dst] = val + return + } + } + } + alias("url", "href", "link") + alias("sha256", "sha256sum", "checksum", "hash") + alias("size_bytes", "size", "bytes") + alias("signature_url", "sig", "signature", "signatureUrl") + alias("content_type", "mime", "mimetype", "type") + return out +} + +func camelToSnake(s string) string { + // sehr einfacher CamelCase→snake_case Konverter + var b strings.Builder + for i, r := range s { + if unicode.IsUpper(r) { + if i > 0 { + b.WriteByte('_') + } + b.WriteRune(unicode.ToLower(r)) + } else { + b.WriteRune(r) + } + } + return b.String() +} + +func genID() string { + // simple time-based id, ausreichend für lokale Tooling-Zwecke + return fmt.Sprintf("a_%d", time.Now().UnixNano()) +} + +func dateStr(t time.Time) string { + if t.IsZero() { + return "" + } + return t.Format("2006-01-02") +} + +// ---- Templates & Styles (inline, keine externen CDNs) ---------------------- + +var baseStyle = ` +:root{--bg:#0b1324;--card:#111a30;--muted:#93a0b0;--txt:#e9eef7;--acc:#4cc9f0;--ok:#22c55e;--warn:#f59e0b;--err:#ef4444} +*{box-sizing:border-box} body{margin:0;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,"Helvetica Neue";background:var(--bg);color:var(--txt)} +.header{position:sticky;top:0;z-index:10;background:linear-gradient(90deg,#0b1324,#0e1a33);border-bottom:1px solid #1c2947;padding:16px} +.container{max-width:90%;margin:0 auto;padding:16px} +.card{background:var(--card);border:1px solid #1c2947;border-radius:16px;box-shadow:0 6px 18px rgba(0,0,0,.25)} +.row{display:flex;gap:12px;flex-wrap:wrap} +.btn{display:inline-block;padding:10px 14px;border-radius:12px;border:1px solid #2a3a63;background:#152448;color:var(--txt);text-decoration:none;cursor:pointer} +.btn:hover{background:#19305a} +.btn-acc{border-color:#2b9bc2} +.badge{padding:2px 8px;border-radius:999px;border:1px solid #2a3a63;color:var(--muted);font-size:12px} +.input, select{background:#0b1429;border:1px solid #2a3a63;border-radius:12px;padding:10px;color:var(--txt);width:100%} +label{font-size:12px;color:var(--muted)} +.table{width:100%;border-collapse:separate;border-spacing:0} +.table th, .table td{padding:10px 12px;border-bottom:1px solid #1c2947} +.table th{position:sticky;top:0;background:#122042;text-align:left} +.kv{display:grid;grid-template-columns:120px 1fr;gap:6px 12px} +.small{font-size:12px;color:var(--muted)} +.code{font-family:ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;font-size:12px} +hr{border:0;border-top:1px solid #1c2947;margin:12px 0} +` + +var pageTmpl = template.Must(template.New("page").Funcs(template.FuncMap{ + "fmtTime": func(t FlexTime) string { + if t.IsZero() { + return "" + } + return t.Time.Format("02.01.2006 15:04") + }, +}).Parse(` + +
+ +| Date | +Version | +Branch | +Channel | +OS | +Arch | +Vendor | +Product | +Agent | +Assets | +Notes | +
|---|---|---|---|---|---|---|---|---|---|---|
| {{fmtTime .ReleasedAt}} | +{{.Version}} {{if .Build}}({{.Build}}){{end}} | +{{.Branch}} | +{{.Channel}} | +{{.OS}} | +{{.Arch}} | +{{.Vendor}} | +{{.Product}} | +{{.AgentName}} | +
+ {{if .Assets}}
+
+ {{range .Assets}}
+
+ {{else}}
+ -
+ {{end}}
+ Asset
+ Download {{.ContentType}} {{if .Size}}· {{.Size}} B{{end}} {{if .SHA256}}· sha256:{{.SHA256}}{{end}}
+ {{end}}
+ |
+ {{.Notes}} | +
| Name | URL | Status | Actions |
|---|---|---|---|
| {{.Name}} | +{{.EndpointURL}} | ++ {{if .Enabled}}active{{else}}inactive{{end}} + {{if .LastOK}}· OK: {{.LastOK}}{{end}} {{if .LastError}}· Error: {{.LastError}}{{end}} + | ++ + + | +