Compare commits
34 Commits
b5ad30cfbd
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 42b0493adb | |||
| b3c1f37632 | |||
| 40ded4d4db | |||
| 291cfa33a9 | |||
| 74fef30251 | |||
| b7729e8b39 | |||
| 9122ebf2f1 | |||
| 36d1c1512a | |||
| aaa95410e4 | |||
| 0bf22b6120 | |||
| b142a0b1a5 | |||
| baedff9e9d | |||
| 92e222f648 | |||
| 43f1d01a8a | |||
| d125e3dd54 | |||
| 6a0dc578ea | |||
| 8c05ad6ffe | |||
| 7d0f4befe1 | |||
| 10d729250f | |||
| 2bb7927d2b | |||
| 53a99625fb | |||
| 61c0646cfd | |||
| f49a2b31bb | |||
| 9f983d29eb | |||
| 4acf615a65 | |||
| 26a8596329 | |||
| 52a9cf08ee | |||
| d7877046cd | |||
| c5ee51eedd | |||
| e701583410 | |||
| f06b9ce284 | |||
| f9d12564df | |||
| 6320fcd938 | |||
| de22d7c0f2 |
12
Dockerfile
12
Dockerfile
@@ -1,13 +1,13 @@
|
||||
# ---------- build ----------
|
||||
FROM golang:1.24-alpine AS build
|
||||
WORKDIR /src
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/filesvc ./cmd/filesvc
|
||||
RUN CGO_ENABLED=0 go build -trimpath -ldflags="-s -w" -o /out/unified ./cmd/unified
|
||||
|
||||
# ---------- run ----------
|
||||
FROM alpine:3.22
|
||||
RUN adduser -D -u 10001 filesvc && apk add --no-cache ca-certificates
|
||||
USER filesvc
|
||||
RUN adduser -D -u 10001 unified && apk add --no-cache ca-certificates
|
||||
USER unified
|
||||
WORKDIR /app
|
||||
|
||||
# Daten & Schlüssel landen unter /data (als Volume mounten)
|
||||
@@ -17,6 +17,6 @@ ENV CONFIG_DIR=/data \
|
||||
BEACON_ADDR=:9443 \
|
||||
MAX_BODY_BYTES=8388608
|
||||
|
||||
COPY --from=build /out/filesvc /app/filesvc
|
||||
COPY --from=build /out/unified /app/unified
|
||||
EXPOSE 8080 8443 9443
|
||||
ENTRYPOINT ["/app/filesvc"]
|
||||
ENTRYPOINT ["/app/unified"]
|
||||
|
||||
@@ -1,363 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/store"
|
||||
)
|
||||
|
||||
//go:embed ui/*
|
||||
var uiFS embed.FS
|
||||
|
||||
type Config struct {
|
||||
ListenAddr string
|
||||
DataDir string
|
||||
APIKey string
|
||||
}
|
||||
|
||||
func (c Config) BlobDir() string { return filepath.Join(c.DataDir, "blobs") }
|
||||
func (c Config) MetaDir() string { return filepath.Join(c.DataDir, "meta") }
|
||||
func (c Config) TempDir() string { return filepath.Join(c.DataDir, "tmp") }
|
||||
|
||||
func getenv(k, d string) string {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func LoadConfig() Config {
|
||||
addr := getenv("FILESVC_LISTEN", ":8085")
|
||||
datadir := getenv("FILESVC_DATA", "/data")
|
||||
key := os.Getenv("FILESVC_API_KEY")
|
||||
if key == "" {
|
||||
log.Println("[warn] FILESVC_API_KEY is empty — set it for protection")
|
||||
}
|
||||
return Config{ListenAddr: addr, DataDir: datadir, APIKey: key}
|
||||
}
|
||||
|
||||
type App struct {
|
||||
cfg Config
|
||||
store *store.Store
|
||||
}
|
||||
|
||||
func main() {
|
||||
cfg := LoadConfig()
|
||||
for _, p := range []string{cfg.DataDir, cfg.BlobDir(), cfg.MetaDir(), cfg.TempDir()} {
|
||||
if err := os.MkdirAll(p, 0o755); err != nil {
|
||||
log.Fatalf("mkdir %s: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
st, err := store.Open(cfg.BlobDir(), cfg.MetaDir(), cfg.TempDir())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
app := &App{cfg: cfg, store: st}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
// API routes
|
||||
mux.HandleFunc("/healthz", app.health)
|
||||
mux.HandleFunc("/v1/files", app.with(app.files))
|
||||
mux.HandleFunc("/v1/files/", app.with(app.fileByID)) // /v1/files/{id}[ /meta]
|
||||
mux.HandleFunc("/v1/uploads", app.with(app.uploadsRoot)) // POST init
|
||||
mux.HandleFunc("/v1/uploads/", app.with(app.uploadsByID)) // parts/complete/abort
|
||||
|
||||
// UI routes (embedded)
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
http.ServeFileFS(w, r, uiFS, "ui/index.html")
|
||||
})
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServerFS(uiFS)))
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: cfg.ListenAddr,
|
||||
Handler: logMiddleware(securityHeaders(mux)),
|
||||
ReadTimeout: 60 * time.Second,
|
||||
ReadHeaderTimeout: 10 * time.Second,
|
||||
WriteTimeout: 0,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.Printf("file-service listening on %s", cfg.ListenAddr)
|
||||
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatalf("server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
stop := make(chan os.Signal, 1)
|
||||
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
|
||||
<-stop
|
||||
log.Println("shutting down...")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
_ = srv.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func (a *App) with(h func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if a.cfg.APIKey != "" {
|
||||
key := r.Header.Get("X-API-Key")
|
||||
if subtle.ConstantTimeCompare([]byte(key), []byte(a.cfg.APIKey)) != 1 {
|
||||
http.Error(w, "unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
}
|
||||
h(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func logMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
log.Printf("%s %s %s", r.Method, r.URL.Path, time.Since(start))
|
||||
})
|
||||
}
|
||||
|
||||
func securityHeaders(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// immer sinnvolle Sicherheits-Header
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Set("X-Frame-Options", "DENY")
|
||||
w.Header().Set("Referrer-Policy", "no-referrer")
|
||||
|
||||
// Für UI (/, /static/...) dürfen CSS/JS & XHR von "self" laden.
|
||||
if r.URL.Path == "/" || strings.HasPrefix(r.URL.Path, "/static/") {
|
||||
w.Header().Set("Content-Security-Policy",
|
||||
"default-src 'self'; script-src 'self'; style-src 'self'; img-src 'self' data:; connect-src 'self'; object-src 'none'; base-uri 'self'; frame-ancestors 'none'")
|
||||
} else {
|
||||
// Für API schön streng
|
||||
w.Header().Set("Content-Security-Policy",
|
||||
"default-src 'none'; object-src 'none'; base-uri 'none'; frame-ancestors 'none'")
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) writeJSON(w http.ResponseWriter, status int, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(status)
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
func (a *App) health(w http.ResponseWriter, _ *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
}
|
||||
|
||||
// --- Routes ---
|
||||
// /v1/files (GET list, POST upload)
|
||||
func (a *App) files(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
q := r.URL.Query().Get("q")
|
||||
off := atoiDefault(r.URL.Query().Get("offset"), 0)
|
||||
lim := atoiDefault(r.URL.Query().Get("limit"), 50)
|
||||
items, next, err := a.store.List(r.Context(), q, off, lim)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 200, map[string]any{"items": items, "next": next})
|
||||
case http.MethodPost:
|
||||
r.Body = http.MaxBytesReader(w, r.Body, 1<<34) // ~16GiB
|
||||
ct := r.Header.Get("Content-Type")
|
||||
name := r.Header.Get("X-Filename")
|
||||
meta := r.URL.Query().Get("meta")
|
||||
if strings.HasPrefix(ct, "multipart/") {
|
||||
if err := r.ParseMultipartForm(32 << 20); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
f, hdr, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
if hdr != nil {
|
||||
name = hdr.Filename
|
||||
}
|
||||
rec, err := a.store.Put(r.Context(), f, name, meta)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 201, rec)
|
||||
return
|
||||
}
|
||||
rec, err := a.store.Put(r.Context(), r.Body, name, meta)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 201, rec)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// /v1/files/{id} and /v1/files/{id}/meta
|
||||
func (a *App) fileByID(w http.ResponseWriter, r *http.Request) {
|
||||
// path after /v1/files/
|
||||
rest := strings.TrimPrefix(r.URL.Path, "/v1/files/")
|
||||
parts := strings.Split(rest, "/")
|
||||
if len(parts) == 0 || parts[0] == "" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
id := parts[0]
|
||||
|
||||
if len(parts) == 2 && parts[1] == "meta" {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
rec, err := a.store.GetMeta(r.Context(), id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 404)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 200, rec)
|
||||
case http.MethodPut:
|
||||
var m map[string]string
|
||||
if err := json.NewDecoder(r.Body).Decode(&m); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
rec, err := a.store.UpdateMeta(r.Context(), id, m)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 200, rec)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// /v1/files/{id}
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
f, rec, err := a.store.Open(r.Context(), id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 404)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
ctype := rec.ContentType
|
||||
if ctype == "" {
|
||||
ctype = mime.TypeByExtension(filepath.Ext(rec.Name))
|
||||
}
|
||||
if ctype == "" {
|
||||
ctype = "application/octet-stream"
|
||||
}
|
||||
w.Header().Set("Content-Type", ctype)
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(rec.Size, 10))
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if r.URL.Query().Get("download") == "1" {
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+rec.SafeName()+"\"")
|
||||
}
|
||||
http.ServeContent(w, r, rec.SafeName(), rec.CreatedAt, f)
|
||||
case http.MethodDelete:
|
||||
if err := a.store.Delete(r.Context(), id); err != nil {
|
||||
http.Error(w, err.Error(), 404)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(204)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
// /v1/uploads (POST) and /v1/uploads/{uid}/ ...
|
||||
func (a *App) uploadsRoot(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
name := r.URL.Query().Get("name")
|
||||
meta := r.URL.Query().Get("meta")
|
||||
u, err := a.store.UploadInit(r.Context(), name, meta)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 201, u)
|
||||
}
|
||||
|
||||
func (a *App) uploadsByID(w http.ResponseWriter, r *http.Request) {
|
||||
rest := strings.TrimPrefix(r.URL.Path, "/v1/uploads/")
|
||||
parts := strings.Split(rest, "/")
|
||||
if len(parts) < 1 || parts[0] == "" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
uid := parts[0]
|
||||
|
||||
if len(parts) == 3 && parts[1] == "parts" {
|
||||
n := atoiDefault(parts[2], -1)
|
||||
if r.Method != http.MethodPut || n < 1 {
|
||||
http.Error(w, "invalid part", 400)
|
||||
return
|
||||
}
|
||||
if err := a.store.UploadPart(r.Context(), uid, n, r.Body); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(204)
|
||||
return
|
||||
}
|
||||
|
||||
if len(parts) == 2 && parts[1] == "complete" {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
rec, err := a.store.UploadComplete(r.Context(), uid)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
a.writeJSON(w, 201, rec)
|
||||
return
|
||||
}
|
||||
|
||||
if len(parts) == 1 && r.Method == http.MethodDelete {
|
||||
if err := a.store.UploadAbort(r.Context(), uid); err != nil {
|
||||
http.Error(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(204)
|
||||
return
|
||||
}
|
||||
|
||||
http.NotFound(w, r)
|
||||
}
|
||||
|
||||
func atoiDefault(s string, d int) int {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return d
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
(function() {
|
||||
const $ = sel => document.querySelector(sel);
|
||||
const $$ = sel => Array.from(document.querySelectorAll(sel));
|
||||
const state = { offset: 0, limit: 20, total: null };
|
||||
|
||||
function loadCfg() {
|
||||
try { return JSON.parse(localStorage.getItem('cfg')) || {}; } catch { return {}; }
|
||||
}
|
||||
function saveCfg(cfg) { localStorage.setItem('cfg', JSON.stringify(cfg)); }
|
||||
const cfg = loadCfg();
|
||||
$('#apiKey').value = cfg.apiKey || '';
|
||||
$('#baseUrl').value = cfg.baseUrl || '';
|
||||
$('#saveCfg').onclick = () => {
|
||||
cfg.apiKey = $('#apiKey').value.trim();
|
||||
cfg.baseUrl = $('#baseUrl').value.trim();
|
||||
saveCfg(cfg);
|
||||
refresh();
|
||||
};
|
||||
|
||||
function api(path, opts = {}) {
|
||||
const base = cfg.baseUrl || '';
|
||||
opts.headers = Object.assign({ 'X-API-Key': cfg.apiKey || '' }, opts.headers || {});
|
||||
return fetch(base + path, opts).then(r => {
|
||||
if (!r.ok) throw new Error(`${r.status} ${r.statusText}`);
|
||||
const ct = r.headers.get('content-type') || '';
|
||||
if (ct.includes('application/json')) return r.json();
|
||||
return r.text();
|
||||
});
|
||||
}
|
||||
|
||||
async function refresh() {
|
||||
const q = encodeURIComponent($('#q').value || '');
|
||||
try {
|
||||
const data = await api(`/v1/files?limit=${state.limit}&offset=${state.offset}&q=${q}`);
|
||||
renderTable(data.items || []);
|
||||
const next = data.next || 0;
|
||||
state.hasNext = next > 0;
|
||||
state.nextOffset = next;
|
||||
$('#pageInfo').textContent = `offset ${state.offset}`;
|
||||
} catch (e) {
|
||||
alert('List failed: ' + e.message);
|
||||
}
|
||||
}
|
||||
|
||||
function renderTable(items) {
|
||||
const tbody = $('#files tbody');
|
||||
tbody.innerHTML = '';
|
||||
const tpl = $('#rowTpl').content;
|
||||
for (const it of items) {
|
||||
const tr = tpl.cloneNode(true);
|
||||
tr.querySelector('.id').textContent = it.id;
|
||||
tr.querySelector('.name').textContent = it.name;
|
||||
tr.querySelector('.size').textContent = human(it.size);
|
||||
tr.querySelector('.created').textContent = new Date(it.createdAt).toLocaleString();
|
||||
const act = tr.querySelector('.actions');
|
||||
|
||||
const dl = btn('Download', async () => {
|
||||
const base = cfg.baseUrl || '';
|
||||
const url = `${base}/v1/files/${it.id}?download=1`;
|
||||
const a = document.createElement('a');
|
||||
a.href = url; a.download = '';
|
||||
a.click();
|
||||
});
|
||||
const meta = btn('Meta', async () => showMeta(it.id));
|
||||
const del = btn('Delete', async () => {
|
||||
if (!confirm('Delete file?')) return;
|
||||
try { await api(`/v1/files/${it.id}`, { method:'DELETE' }); refresh(); } catch(e){ alert('Delete failed: '+e.message); }
|
||||
});
|
||||
act.append(dl, meta, del);
|
||||
tbody.appendChild(tr);
|
||||
}
|
||||
}
|
||||
|
||||
function btn(text, on) { const b = document.createElement('button'); b.textContent = text; b.onclick = on; return b; }
|
||||
function human(n) { if (n < 1024) return n + ' B'; const u=['KB','MB','GB','TB']; let i=-1; do { n/=1024; i++; } while(n>=1024 && i<u.length-1); return n.toFixed(1)+' '+u[i]; }
|
||||
|
||||
$('#refresh').onclick = () => { state.offset = 0; refresh(); };
|
||||
$('#q').addEventListener('keydown', e => { if (e.key==='Enter') { state.offset=0; refresh(); } });
|
||||
$('#prev').onclick = () => { state.offset = Math.max(0, state.offset - state.limit); refresh(); };
|
||||
$('#next').onclick = () => { if (state.hasNext) { state.offset = state.nextOffset; refresh(); } };
|
||||
|
||||
// Upload form
|
||||
$('#uploadForm').addEventListener('submit', async (e) => {
|
||||
e.preventDefault();
|
||||
const f = $('#fileInput').files[0];
|
||||
if (!f) return alert('Pick a file');
|
||||
const meta = $('#metaInput').value.trim();
|
||||
const fd = new FormData();
|
||||
fd.append('file', f);
|
||||
fd.append('meta', meta);
|
||||
try { await api('/v1/files?meta='+encodeURIComponent(meta), { method: 'POST', body: fd }); refresh(); } catch(e){ alert('Upload failed: '+e.message); }
|
||||
});
|
||||
|
||||
// Chunked upload
|
||||
$('#chunkInit').onclick = async () => {
|
||||
try {
|
||||
const name = $('#chunkName').value.trim() || 'file';
|
||||
const meta = $('#chunkMeta').value.trim();
|
||||
const r = await api(`/v1/uploads?name=${encodeURIComponent(name)}&meta=${encodeURIComponent(meta)}`, { method:'POST' });
|
||||
$('#chunkId').textContent = r.id;
|
||||
} catch(e){ alert('Init failed: '+e.message); }
|
||||
};
|
||||
$('#chunkPut').onclick = async () => {
|
||||
const uid = $('#chunkId').textContent.trim();
|
||||
const part = parseInt($('#chunkPart').value,10) || 1;
|
||||
const file = $('#chunkFile').files[0];
|
||||
if (!uid) return alert('Init first');
|
||||
if (!file) return alert('Choose a file (this will send the whole file as one part).');
|
||||
try { await api(`/v1/uploads/${uid}/parts/${part}`, { method:'PUT', body: file }); alert('Part uploaded'); } catch(e){ alert('PUT failed: '+e.message); }
|
||||
};
|
||||
$('#chunkComplete').onclick = async () => {
|
||||
const uid = $('#chunkId').textContent.trim(); if (!uid) return;
|
||||
try { await api(`/v1/uploads/${uid}/complete`, { method:'POST' }); refresh(); } catch(e){ alert('Complete failed: '+e.message); }
|
||||
};
|
||||
$('#chunkAbort').onclick = async () => {
|
||||
const uid = $('#chunkId').textContent.trim(); if (!uid) return;
|
||||
try { await api(`/v1/uploads/${uid}`, { method:'DELETE' }); $('#chunkId').textContent=''; alert('Aborted'); } catch(e){ alert('Abort failed: '+e.message); }
|
||||
};
|
||||
|
||||
async function showMeta(id) {
|
||||
try {
|
||||
const rec = await api(`/v1/files/${id}/meta`);
|
||||
const json = prompt('Edit meta as JSON (object of string:string)', JSON.stringify(rec.meta||{}));
|
||||
if (json == null) return;
|
||||
const obj = JSON.parse(json);
|
||||
await api(`/v1/files/${id}/meta`, { method:'PUT', headers:{'Content-Type':'application/json'}, body: JSON.stringify(obj) });
|
||||
refresh();
|
||||
} catch(e){ alert('Meta failed: '+e.message); }
|
||||
}
|
||||
|
||||
refresh();
|
||||
})();
|
||||
@@ -1,77 +0,0 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>File Service UI</title>
|
||||
<link rel="stylesheet" href="/static/ui/style.css" />
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<h1>File Service</h1>
|
||||
<div class="cfg">
|
||||
<label>API Base <input id="baseUrl" value="" placeholder="(same origin)"/></label>
|
||||
<label>API Key <input id="apiKey" placeholder="X-API-Key"/></label>
|
||||
<button id="saveCfg">Save</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main>
|
||||
<section class="card">
|
||||
<h2>Upload</h2>
|
||||
<form id="uploadForm">
|
||||
<input type="file" id="fileInput" name="file" required />
|
||||
<input type="text" id="metaInput" placeholder="meta e.g. project=alpha,owner=alice" />
|
||||
<button type="submit">Upload</button>
|
||||
</form>
|
||||
<details>
|
||||
<summary>Chunked upload</summary>
|
||||
<div class="chunk">
|
||||
<input type="text" id="chunkName" placeholder="filename"/>
|
||||
<input type="text" id="chunkMeta" placeholder="meta key=val,..."/>
|
||||
<button id="chunkInit">Init</button>
|
||||
<span id="chunkId"></span>
|
||||
<div>
|
||||
<input type="file" id="chunkFile"/>
|
||||
<input type="number" id="chunkPart" min="1" value="1"/>
|
||||
<button id="chunkPut">PUT Part</button>
|
||||
<button id="chunkComplete">Complete</button>
|
||||
<button id="chunkAbort">Abort</button>
|
||||
</div>
|
||||
</div>
|
||||
</details>
|
||||
</section>
|
||||
|
||||
<section class="card">
|
||||
<h2>Files</h2>
|
||||
<div class="toolbar">
|
||||
<input type="search" id="q" placeholder="search by name"/>
|
||||
<button id="refresh">Refresh</button>
|
||||
</div>
|
||||
<table id="files">
|
||||
<thead>
|
||||
<tr><th>ID</th><th>Name</th><th>Size</th><th>Created</th><th>Actions</th></tr>
|
||||
</thead>
|
||||
<tbody></tbody>
|
||||
</table>
|
||||
<div class="pager">
|
||||
<button id="prev">Prev</button>
|
||||
<span id="pageInfo"></span>
|
||||
<button id="next">Next</button>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<template id="rowTpl">
|
||||
<tr>
|
||||
<td class="mono id"></td>
|
||||
<td class="name"></td>
|
||||
<td class="size"></td>
|
||||
<td class="created"></td>
|
||||
<td class="actions"></td>
|
||||
</tr>
|
||||
</template>
|
||||
|
||||
<script src="/static/ui/app.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,19 +0,0 @@
|
||||
:root { --bg: #0b0f14; --fg: #e6eef8; --muted: #9bb0c8; --card: #121923; --accent: #5aa9ff; }
|
||||
* { box-sizing: border-box; }
|
||||
body { margin: 0; font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, sans-serif; background: var(--bg); color: var(--fg); }
|
||||
header { display: flex; justify-content: space-between; align-items: center; padding: 16px 20px; background: #0e141b; border-bottom: 1px solid #2a3543; }
|
||||
h1 { margin: 0; font-size: 20px; }
|
||||
.cfg label { margin-right: 8px; font-size: 12px; color: var(--muted); }
|
||||
.cfg input { margin-left: 6px; padding: 6px 8px; background: #0c1219; border: 1px solid #2a3543; color: var(--fg); border-radius: 6px; }
|
||||
button { padding: 8px 12px; border: 1px solid #2a3543; background: #111a24; color: var(--fg); border-radius: 8px; cursor: pointer; }
|
||||
button:hover { border-color: var(--accent); }
|
||||
main { padding: 20px; max-width: 1100px; margin: 0 auto; }
|
||||
.card { background: var(--card); border: 1px solid #1f2a38; border-radius: 14px; padding: 16px; margin-bottom: 16px; box-shadow: 0 6px 20px rgba(0,0,0,.25); }
|
||||
.toolbar { display:flex; gap: 8px; align-items: center; margin-bottom: 10px; }
|
||||
table { width: 100%; border-collapse: collapse; }
|
||||
th, td { text-align: left; padding: 8px; border-bottom: 1px solid #213043; }
|
||||
.mono { font-family: ui-monospace, SFMono-Regular, Menlo, Consolas, monospace; font-size: 12px; }
|
||||
.name { max-width: 340px; overflow: hidden; text-overflow: ellipsis; white-space: nowrap; }
|
||||
.pager { display:flex; gap: 8px; align-items:center; justify-content:flex-end; padding-top: 8px; }
|
||||
.actions button { margin-right: 6px; }
|
||||
summary { cursor: pointer; }
|
||||
728
cmd/unified/main.go
Normal file
728
cmd/unified/main.go
Normal file
@@ -0,0 +1,728 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
// ← Passe diese Import-Pfade an dein go.mod an
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/admin"
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/blobfs"
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/filesvc"
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/mesh"
|
||||
)
|
||||
|
||||
func parseDuration(s string, def time.Duration) time.Duration {
|
||||
d, err := time.ParseDuration(strings.TrimSpace(s))
|
||||
if err != nil || d <= 0 {
|
||||
return def
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
/*** Config ***/
|
||||
func loadConfig() AppConfig {
|
||||
// HTTP
|
||||
httpAddr := getenvDefault("ADDR", ":8080")
|
||||
|
||||
// API
|
||||
apiKey := os.Getenv("FILE_SERVICE_API_KEY")
|
||||
|
||||
// Admin UI (BasicAuth optional)
|
||||
adminUser := os.Getenv("ADMIN_USER")
|
||||
adminPass := os.Getenv("ADMIN_PASS")
|
||||
|
||||
// Mesh (mit sinnvollen Defaults)
|
||||
m := mesh.Config{
|
||||
BindAddr: getenvDefault("MESH_BIND", ":9090"),
|
||||
AdvertURL: os.Getenv("MESH_ADVERT"), // kann leer sein → wir leiten ab
|
||||
Seeds: splitCSV(os.Getenv("MESH_SEEDS")),
|
||||
ClusterSecret: os.Getenv("MESH_CLUSTER_SECRET"),
|
||||
EnableDiscovery: parseBoolEnv("MESH_ENABLE_DISCOVERY", false),
|
||||
DiscoveryAddress: getenvDefault("MESH_DISCOVERY_ADDR", "239.8.8.8:9898"),
|
||||
}
|
||||
|
||||
m.PeerTTL = parseDuration(os.Getenv("MESH_PEER_TTL"), 2*time.Minute)
|
||||
m.PruneInterval = parseDuration(os.Getenv("MESH_PRUNE_INTERVAL"), 30*time.Second)
|
||||
|
||||
m.HelloInterval = parseDuration(os.Getenv("MESH_HELLO_INTERVAL"), 20*time.Second)
|
||||
m.HelloFanout = parseIntEnv(os.Getenv("MESH_HELLO_FANOUT"), 8)
|
||||
|
||||
m.BlobTimeout = parseDuration(os.Getenv("MESH_BLOB_TIMEOUT"), 0)
|
||||
|
||||
// Wenn keine AdvertURL gesetzt ist, versuche eine sinnvolle Herleitung:
|
||||
if strings.TrimSpace(m.AdvertURL) == "" {
|
||||
m.AdvertURL = inferAdvertURL(m.BindAddr)
|
||||
log.Printf("[mesh] MESH_ADVERT nicht gesetzt – abgeleitet: %s", m.AdvertURL)
|
||||
}
|
||||
|
||||
// Minimal-Validierung mit hilfreicher Meldung
|
||||
if strings.TrimSpace(m.BindAddr) == "" {
|
||||
log.Fatal("MESH_BIND fehlt (z.B. :9090)")
|
||||
}
|
||||
if strings.TrimSpace(m.AdvertURL) == "" {
|
||||
log.Fatal("MESH_ADVERT fehlt und konnte nicht abgeleitet werden (z.B. http://unified_a:9090)")
|
||||
}
|
||||
if strings.TrimSpace(m.ClusterSecret) == "" {
|
||||
log.Printf("[mesh] WARN: MESH_CLUSTER_SECRET ist leer – für produktive Netze unbedingt setzen!")
|
||||
}
|
||||
|
||||
return AppConfig{
|
||||
HTTPAddr: httpAddr,
|
||||
APIKey: apiKey,
|
||||
AdminUser: adminUser,
|
||||
AdminPass: adminPass,
|
||||
Mesh: m,
|
||||
PublicDownloads: parseBoolEnv("PUBLIC_DOWNLOADS", false),
|
||||
PublicPath: getenvDefault("PUBLIC_DOWNLOAD_PATH", "/dl"),
|
||||
}
|
||||
}
|
||||
|
||||
// --- Helpers
|
||||
|
||||
func getenvDefault(k, def string) string {
|
||||
v := os.Getenv(k)
|
||||
if v == "" {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func parseBoolEnv(k string, def bool) bool {
|
||||
v := strings.ToLower(strings.TrimSpace(os.Getenv(k)))
|
||||
if v == "" {
|
||||
return def
|
||||
}
|
||||
return v == "1" || v == "true" || v == "yes" || v == "on"
|
||||
}
|
||||
|
||||
func parseIntEnv(k string, def int) int {
|
||||
v := strings.TrimSpace(os.Getenv(k))
|
||||
if v == "" {
|
||||
return def
|
||||
}
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func splitCSV(s string) []string {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
parts := strings.Split(s, ",")
|
||||
out := make([]string, 0, len(parts))
|
||||
seen := map[string]struct{}{}
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[p]; ok {
|
||||
continue
|
||||
}
|
||||
seen[p] = struct{}{}
|
||||
out = append(out, p)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// inferAdvertURL baut eine brauchbare Default-AdvertURL:
|
||||
// - MESH_ADVERT_HOST (wenn gesetzt) → z.B. Service-Name aus Compose
|
||||
// - sonst HOSTNAME
|
||||
// - sonst "localhost"
|
||||
//
|
||||
// Port wird aus MESH_BIND entnommen (z.B. ":9090" → 9090)
|
||||
func inferAdvertURL(meshBind string) string {
|
||||
host := strings.TrimSpace(os.Getenv("MESH_ADVERT_HOST"))
|
||||
if host == "" {
|
||||
host = strings.TrimSpace(os.Getenv("HOSTNAME"))
|
||||
}
|
||||
if host == "" {
|
||||
host = "localhost"
|
||||
}
|
||||
port := "9090"
|
||||
if i := strings.LastIndex(meshBind, ":"); i != -1 && len(meshBind) > i+1 {
|
||||
port = meshBind[i+1:]
|
||||
}
|
||||
return fmt.Sprintf("http://%s:%s", host, port)
|
||||
}
|
||||
|
||||
type AppConfig struct {
|
||||
HTTPAddr string
|
||||
APIKey string
|
||||
AdminUser string
|
||||
AdminPass string
|
||||
Mesh mesh.Config
|
||||
|
||||
PublicDownloads bool // ENV: PUBLIC_DOWNLOADS (default false)
|
||||
PublicPath string // ENV: PUBLIC_DOWNLOAD_PATH (default "/dl")
|
||||
}
|
||||
|
||||
/*** Middleware ***/
|
||||
|
||||
func authMiddleware(apiKey string, next http.Handler) http.Handler {
|
||||
// Dev-Mode: ohne API-Key kein Auth-Zwang
|
||||
if strings.TrimSpace(apiKey) == "" {
|
||||
return next
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
got := r.Header.Get("Authorization")
|
||||
if !strings.HasPrefix(got, "Bearer ") || strings.TrimPrefix(got, "Bearer ") != apiKey {
|
||||
http.Error(w, "unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func cors(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Passe nach Bedarf an (Origin-Whitelist etc.)
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET,POST,PUT,DELETE,OPTIONS")
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func accessLog(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
log.Printf("%s %s %s", r.Method, r.URL.Path, time.Since(start))
|
||||
})
|
||||
}
|
||||
|
||||
/*** HTTP helpers ***/
|
||||
|
||||
func writeJSON(w http.ResponseWriter, code int, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
/*** API-Routen ***/
|
||||
|
||||
func fileRoutes(mux *http.ServeMux, store filesvc.MeshStore, blobs blobfs.Store) {
|
||||
// Health
|
||||
mux.HandleFunc("/api/v1/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, http.StatusOK, map[string]string{"status": "ok"})
|
||||
})
|
||||
|
||||
// List + Create
|
||||
mux.HandleFunc("/api/v1/items", func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
nextStr := strings.TrimSpace(r.URL.Query().Get("next"))
|
||||
next := filesvc.ID(nextStr)
|
||||
items, nextOut, err := store.List(r.Context(), next, 100)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, map[string]any{"items": items, "next": nextOut})
|
||||
case http.MethodPost:
|
||||
var in struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&in); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid json"})
|
||||
return
|
||||
}
|
||||
it, err := store.Create(r.Context(), in.Name)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusCreated, it)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
|
||||
// Rename
|
||||
mux.HandleFunc("/api/v1/items/rename", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var in struct {
|
||||
ID filesvc.ID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&in); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid json"})
|
||||
return
|
||||
}
|
||||
it, err := store.Rename(r.Context(), in.ID, in.Name)
|
||||
if err != nil {
|
||||
if errors.Is(err, filesvc.ErrForbidden) {
|
||||
writeJSON(w, http.StatusForbidden, map[string]string{"error": "only owner may modify"})
|
||||
return
|
||||
}
|
||||
status := http.StatusBadRequest
|
||||
if errors.Is(err, filesvc.ErrNotFound) {
|
||||
status = http.StatusNotFound
|
||||
}
|
||||
writeJSON(w, status, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, it)
|
||||
})
|
||||
|
||||
// Delete
|
||||
mux.HandleFunc("/api/v1/items/delete", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var in struct {
|
||||
ID filesvc.ID `json:"id"`
|
||||
}
|
||||
if err := json.NewDecoder(r.Body).Decode(&in); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid json"})
|
||||
return
|
||||
}
|
||||
it, err := store.Delete(r.Context(), in.ID)
|
||||
_ = blobs.Delete(r.Context(), string(in.ID))
|
||||
if err != nil {
|
||||
if errors.Is(err, filesvc.ErrForbidden) {
|
||||
writeJSON(w, http.StatusForbidden, map[string]string{"error": "only owner may modify"})
|
||||
return
|
||||
}
|
||||
status := http.StatusBadRequest
|
||||
if errors.Is(err, filesvc.ErrNotFound) {
|
||||
status = http.StatusNotFound
|
||||
}
|
||||
writeJSON(w, status, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, it)
|
||||
})
|
||||
}
|
||||
|
||||
// apiFiles wires upload/download endpoints
|
||||
func apiFiles(mux *http.ServeMux, store filesvc.MeshStore, blobs blobfs.Store, meshNode *mesh.Node) {
|
||||
// Multipart-Upload
|
||||
mux.HandleFunc("/api/v1/files/upload", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := r.ParseMultipartForm(128 << 20); err != nil { // 128MB
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "bad form"})
|
||||
return
|
||||
}
|
||||
fh, hdr, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing file"})
|
||||
return
|
||||
}
|
||||
defer fh.Close()
|
||||
name := strings.TrimSpace(r.FormValue("name"))
|
||||
if name == "" {
|
||||
name = hdr.Filename
|
||||
}
|
||||
|
||||
it, err := store.Create(r.Context(), name)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
meta, err := blobs.Save(r.Context(), string(it.ID), name, fh)
|
||||
if err != nil {
|
||||
_, _ = store.Delete(r.Context(), it.ID)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
_ = meshNode.SyncNow(r.Context())
|
||||
writeJSON(w, http.StatusCreated, map[string]any{
|
||||
"file": it,
|
||||
"blob": meta,
|
||||
})
|
||||
})
|
||||
|
||||
// Download
|
||||
mux.HandleFunc("/api/v1/files/", func(w http.ResponseWriter, r *http.Request) {
|
||||
parts := strings.Split(strings.TrimPrefix(r.URL.Path, "/api/v1/files/"), "/")
|
||||
if len(parts) != 2 || parts[1] != "download" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
id := parts[0]
|
||||
if strings.TrimSpace(id) == "" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// 1) Metadaten prüfen
|
||||
it, err := store.Get(r.Context(), filesvc.ID(id))
|
||||
if err != nil || it.Deleted {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// 2) lokal
|
||||
if rc, meta, err := blobs.Open(r.Context(), id); err == nil {
|
||||
defer rc.Close()
|
||||
serveBlob(w, r, rc, meta, it.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// 3) remote holen & cachen
|
||||
it1, _ := store.Get(r.Context(), filesvc.ID(id))
|
||||
peers := meshNode.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
if cfg := meshNode.Config(); cfg.PeerTTL > 0 {
|
||||
ttl = cfg.PeerTTL
|
||||
}
|
||||
if !isOwnerActive(it1.Owner, peers, ttl) {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
rrc, _, _, _, err := meshNode.FetchBlobAny(r.Context(), id)
|
||||
if err != nil {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
defer rrc.Close()
|
||||
if _, err := blobs.Save(r.Context(), id, it1.Name, rrc); err != nil {
|
||||
http.Error(w, "cache failed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// 4) lokal streamen
|
||||
lrc, meta, err := blobs.Open(r.Context(), id)
|
||||
if err != nil {
|
||||
http.Error(w, "open failed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer lrc.Close()
|
||||
serveBlob(w, r, lrc, meta, it1.Name)
|
||||
})
|
||||
}
|
||||
|
||||
/*** Mesh <-> Store Mapping (falls Typen getrennt sind) ***/
|
||||
|
||||
func toMeshSnapshot(s filesvc.Snapshot) mesh.Snapshot {
|
||||
out := mesh.Snapshot{Items: make([]mesh.Item, 0, len(s.Items))}
|
||||
for _, it := range s.Items {
|
||||
out.Items = append(out.Items, mesh.Item{
|
||||
ID: it.ID,
|
||||
Name: it.Name,
|
||||
UpdatedAt: it.UpdatedAt,
|
||||
Deleted: it.Deleted,
|
||||
Owner: it.Owner,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func fromMeshSnapshot(ms mesh.Snapshot) filesvc.Snapshot {
|
||||
out := filesvc.Snapshot{Items: make([]filesvc.File, 0, len(ms.Items))}
|
||||
for _, it := range ms.Items {
|
||||
out.Items = append(out.Items, filesvc.File{
|
||||
ID: it.ID,
|
||||
Name: it.Name,
|
||||
UpdatedAt: it.UpdatedAt,
|
||||
Deleted: it.Deleted,
|
||||
Owner: it.Owner,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// isOwnerActive prüft, ob der Owner in der Peer-Liste als "aktiv" gilt.
|
||||
func isOwnerActive(owner string, peers []mesh.Peer, ttl time.Duration) bool {
|
||||
owner = strings.TrimSpace(owner)
|
||||
if owner == "" {
|
||||
return true
|
||||
}
|
||||
cutoff := time.Now().Add(-ttl)
|
||||
for _, p := range peers {
|
||||
if strings.TrimSpace(p.URL) != owner {
|
||||
continue
|
||||
}
|
||||
// Self ist per Definition aktiv
|
||||
if p.Self {
|
||||
return true
|
||||
}
|
||||
// ohne LastSeen: als inaktiv behandeln
|
||||
if p.LastSeen.IsZero() {
|
||||
return false
|
||||
}
|
||||
return p.LastSeen.After(cutoff)
|
||||
}
|
||||
// Owner ist nicht mal in der Liste: inaktiv
|
||||
return false
|
||||
}
|
||||
|
||||
/*** main ***/
|
||||
|
||||
func main() {
|
||||
cfg := loadConfig()
|
||||
|
||||
// Domain-Store (mesh-fähig)
|
||||
nodeID := strings.TrimSpace(cfg.Mesh.AdvertURL)
|
||||
//st := filesvc.NewMemStore(nodeID)
|
||||
|
||||
// Mesh starten
|
||||
//mcfg := mesh.FromEnv()
|
||||
blobs := blobfs.New(getenvDefault("DATA_DIR", "./data"))
|
||||
dataDir := getenvDefault("DATA_DIR", "./data")
|
||||
metaPath := filepath.Join(dataDir, "meta", "items.json")
|
||||
st := filesvc.NewMemStorePersistent(nodeID, metaPath)
|
||||
|
||||
mnode, err := mesh.New(cfg.Mesh, mesh.Callbacks{
|
||||
GetSnapshot: func(ctx context.Context) (mesh.Snapshot, error) {
|
||||
s, err := st.Snapshot(ctx)
|
||||
if err != nil {
|
||||
return mesh.Snapshot{}, err
|
||||
}
|
||||
return toMeshSnapshot(s), nil
|
||||
},
|
||||
ApplyRemote: func(ctx context.Context, s mesh.Snapshot) error {
|
||||
return st.ApplyRemote(ctx, fromMeshSnapshot(s))
|
||||
},
|
||||
BlobOpen: func(ctx context.Context, id string) (io.ReadCloser, string, string, int64, error) { //5588
|
||||
it, err := st.Get(ctx, filesvc.ID(id))
|
||||
if err != nil || it.Deleted {
|
||||
return nil, "", "", 0, fmt.Errorf("not found")
|
||||
}
|
||||
rc, meta, err := blobs.Open(ctx, id)
|
||||
if err != nil {
|
||||
return nil, "", "", 0, err
|
||||
}
|
||||
return rc, it.Name, meta.ContentType, meta.Size, nil
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("mesh init: %v", err)
|
||||
}
|
||||
|
||||
// Hintergrund-Pruner starten
|
||||
mnode.StartPeerPruner()
|
||||
|
||||
go func() {
|
||||
log.Printf("[mesh] listening on %s advertise %s seeds=%v discovery=%v",
|
||||
cfg.Mesh.BindAddr, cfg.Mesh.AdvertURL, cfg.Mesh.Seeds, cfg.Mesh.EnableDiscovery)
|
||||
if err := mnode.Serve(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatalf("mesh serve: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
// aktive Owner bestimmen
|
||||
peers := mnode.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
if cfg := mnode.Config(); cfg.PeerTTL > 0 {
|
||||
ttl = cfg.PeerTTL
|
||||
}
|
||||
cutoff := time.Now().Add(-ttl)
|
||||
|
||||
active := map[string]bool{}
|
||||
for _, p := range peers {
|
||||
if p.Self {
|
||||
active[p.URL] = true
|
||||
continue
|
||||
}
|
||||
if !p.LastSeen.IsZero() && p.LastSeen.After(cutoff) {
|
||||
active[p.URL] = true
|
||||
}
|
||||
}
|
||||
|
||||
// alle Items durchgehen; Blobs von Offline-Ownern löschen
|
||||
var next filesvc.ID
|
||||
for {
|
||||
items, nextOut, _ := st.List(context.Background(), next, 1000)
|
||||
for _, it := range items {
|
||||
if it.Owner == "" || active[it.Owner] {
|
||||
continue
|
||||
}
|
||||
_ = blobs.Delete(context.Background(), it.ID)
|
||||
}
|
||||
if nextOut == "" {
|
||||
break
|
||||
}
|
||||
next = nextOut
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Root-Mux
|
||||
root := http.NewServeMux()
|
||||
|
||||
// API (Bearer-Auth)
|
||||
//blobs := blobfs.New(getenvDefault("DATA_DIR", "./data"))
|
||||
apiMux := http.NewServeMux()
|
||||
fileRoutes(apiMux, st, blobs)
|
||||
apiFiles(apiMux, st, blobs, mnode)
|
||||
root.Handle("/api/", authMiddleware(cfg.APIKey, apiMux))
|
||||
|
||||
if cfg.PublicDownloads {
|
||||
registerPublicDownloads(root, st, blobs, mnode, cfg.PublicPath)
|
||||
}
|
||||
|
||||
// Admin-UI (optional BasicAuth via ADMIN_USER/ADMIN_PASS)
|
||||
adminRoot := http.NewServeMux()
|
||||
admin.Register(adminRoot, admin.Deps{Store: st, Mesh: mnode, Blob: blobs})
|
||||
adminUser := os.Getenv("ADMIN_USER")
|
||||
adminPass := os.Getenv("ADMIN_PASS")
|
||||
if strings.TrimSpace(adminUser) != "" {
|
||||
wrapped := admin.BasicAuth(adminUser, adminPass, adminRoot)
|
||||
root.Handle("/admin", wrapped)
|
||||
root.Handle("/admin/", wrapped)
|
||||
} else {
|
||||
root.Handle("/admin", adminRoot)
|
||||
root.Handle("/admin/", adminRoot)
|
||||
}
|
||||
|
||||
// Startseite → /admin
|
||||
root.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "/admin", http.StatusFound)
|
||||
})
|
||||
|
||||
// Finaler Handler-Stack
|
||||
handler := cors(accessLog(root))
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: cfg.HTTPAddr,
|
||||
Handler: handler,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
go func() {
|
||||
log.Printf("http listening on %s (api=/api/v1, admin=/admin)", cfg.HTTPAddr)
|
||||
log.Printf("mesh listening on %s", cfg.Mesh.BindAddr)
|
||||
if err := srv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
log.Fatalf("http server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// OS-Signale abfangen
|
||||
stop := make(chan os.Signal, 1)
|
||||
signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-stop
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
_ = srv.Shutdown(ctx)
|
||||
_ = mnode.Close(ctx)
|
||||
log.Println("shutdown complete")
|
||||
}
|
||||
|
||||
// Public: GET {base}/{id}
|
||||
// Beispiel: /dl/1
|
||||
func registerPublicDownloads(mux *http.ServeMux, store filesvc.MeshStore, blobs blobfs.Store, meshNode *mesh.Node, base string) {
|
||||
if !strings.HasPrefix(base, "/") {
|
||||
base = "/" + base
|
||||
}
|
||||
mux.HandleFunc(base+"/", func(w http.ResponseWriter, r *http.Request) {
|
||||
id := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, base+"/"))
|
||||
if id == "" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// 1) Metadaten prüfen
|
||||
it, err := store.Get(r.Context(), filesvc.ID(id))
|
||||
if err != nil || it.Deleted {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// 2) Lokal versuchen
|
||||
if rc, meta, err := blobs.Open(context.Background(), id); err == nil {
|
||||
defer rc.Close()
|
||||
serveBlob(w, r, rc, meta, it.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// (Optional) Owner-Online-Check — wenn du auch bei offline Ownern liefern willst, block auskommentieren
|
||||
{
|
||||
peers := meshNode.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
if !isOwnerActive(it.Owner, peers, ttl) {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 3) Aus Mesh holen — EIGENER Timeout-Kontext, NICHT r.Context()
|
||||
rrc, remoteName, _, _, err := meshNode.FetchBlobAny(context.Background(), id)
|
||||
if err != nil {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
defer rrc.Close()
|
||||
|
||||
filename := strings.TrimSpace(remoteName)
|
||||
if filename == "" {
|
||||
filename = it.Name
|
||||
}
|
||||
|
||||
// 4) Lokal cachen — KEIN Request-Kontext, damit Save nicht abbricht
|
||||
if _, err := blobs.Save(context.Background(), id, filename, rrc); err != nil {
|
||||
log.Printf("[public] cache save failed id=%s name=%q: %v", id, filename, err)
|
||||
http.Error(w, "cache failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// 5) Erneut lokal öffnen und streamen
|
||||
lrc, meta, err := blobs.Open(context.Background(), id)
|
||||
if err != nil {
|
||||
http.Error(w, "open failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer lrc.Close()
|
||||
serveBlob(w, r, lrc, meta, filename)
|
||||
})
|
||||
}
|
||||
|
||||
// Hilfsfunktion: setzt sinnvolle Header und streamt die Datei
|
||||
func serveBlob(w http.ResponseWriter, r *http.Request, rc io.ReadSeeker, meta blobfs.Meta, downloadName string) {
|
||||
if meta.SHA256 != "" {
|
||||
etag := `W/"` + meta.SHA256 + `"`
|
||||
if r.Header.Get("If-None-Match") == etag {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
w.Header().Set("ETag", etag)
|
||||
}
|
||||
if meta.ContentType == "" {
|
||||
meta.ContentType = "application/octet-stream"
|
||||
}
|
||||
if downloadName == "" {
|
||||
downloadName = meta.Name
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", meta.ContentType)
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(meta.Size, 10))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, downloadName))
|
||||
w.Header().Set("Access-Control-Expose-Headers", "Content-Disposition")
|
||||
w.Header().Set("X-Robots-Tag", "noindex")
|
||||
_, _ = io.Copy(w, rc)
|
||||
}
|
||||
361
internal/admin/admin.go
Normal file
361
internal/admin/admin.go
Normal file
@@ -0,0 +1,361 @@
|
||||
package admin
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/blobfs"
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/filesvc"
|
||||
"git.send.nrw/sendnrw/decent-webui/internal/mesh"
|
||||
)
|
||||
|
||||
/*** Templates einbetten ***/
|
||||
|
||||
//go:embed tpl/layout.html
|
||||
var layoutHTML string
|
||||
|
||||
//go:embed tpl/partials_items.html
|
||||
var itemsPartialHTML string
|
||||
|
||||
//go:embed tpl/partials_peers.html
|
||||
var peersPartialHTML string
|
||||
|
||||
var (
|
||||
tplLayout = template.Must(template.New("layout").Parse(layoutHTML))
|
||||
tplItems = template.Must(template.New("items").Funcs(template.FuncMap{
|
||||
"timeRFC3339": func(unixNano int64) string {
|
||||
if unixNano == 0 {
|
||||
return ""
|
||||
}
|
||||
return time.Unix(0, unixNano).UTC().Format(time.RFC3339)
|
||||
},
|
||||
}).Parse(itemsPartialHTML))
|
||||
tplPeers = template.Must(template.New("peers").Parse(peersPartialHTML))
|
||||
)
|
||||
|
||||
type Deps struct {
|
||||
Store filesvc.MeshStore
|
||||
Mesh *mesh.Node
|
||||
Blob blobfs.Store
|
||||
}
|
||||
|
||||
// Register hängt alle /admin Routen ein.
|
||||
// Auth liegt optional VOR Register (BasicAuth-Middleware), siehe main.go.
|
||||
func Register(mux *http.ServeMux, d Deps) {
|
||||
// Dashboard
|
||||
mux.HandleFunc("/admin", func(w http.ResponseWriter, r *http.Request) {
|
||||
renderLayout(w, r, "Files", "/admin/items")
|
||||
})
|
||||
|
||||
// Partials
|
||||
mux.HandleFunc("/admin/items", func(w http.ResponseWriter, r *http.Request) {
|
||||
nextID := filesvc.ID(strings.TrimSpace(r.URL.Query().Get("next")))
|
||||
items, nextOut, _ := d.Store.List(r.Context(), nextID, 100)
|
||||
|
||||
type row struct {
|
||||
ID string
|
||||
Name string
|
||||
UpdatedAt int64
|
||||
HasBlob bool
|
||||
Size int64
|
||||
Owner string
|
||||
OwnerActive bool
|
||||
}
|
||||
rows := make([]row, 0, len(items))
|
||||
|
||||
peers := d.Mesh.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
if cfg := d.Mesh.Config(); cfg.PeerTTL > 0 {
|
||||
ttl = cfg.PeerTTL
|
||||
}
|
||||
|
||||
for _, it := range items {
|
||||
meta, ok, _ := d.Blob.Stat(r.Context(), it.ID)
|
||||
rows = append(rows, row{
|
||||
ID: it.ID,
|
||||
Name: it.Name,
|
||||
UpdatedAt: it.UpdatedAt,
|
||||
HasBlob: ok,
|
||||
Size: meta.Size,
|
||||
Owner: it.Owner,
|
||||
OwnerActive: isOwnerActive(it.Owner, peers, ttl),
|
||||
})
|
||||
}
|
||||
|
||||
_ = tplItems.Execute(w, map[string]any{
|
||||
"Items": rows,
|
||||
"Next": nextOut,
|
||||
})
|
||||
})
|
||||
|
||||
mux.HandleFunc("/admin/items/takeover", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
id := strings.TrimSpace(r.FormValue("id"))
|
||||
if id != "" {
|
||||
// Nur zulassen, wenn Owner tatsächlich offline ist
|
||||
it, err := d.Store.Get(r.Context(), filesvc.ID(id))
|
||||
if err == nil {
|
||||
peers := d.Mesh.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
if cfg := d.Mesh.Config(); cfg.PeerTTL > 0 {
|
||||
ttl = cfg.PeerTTL
|
||||
}
|
||||
if !isOwnerActive(it.Owner, peers, ttl) {
|
||||
// eigene URL aus PeerList ermitteln
|
||||
self := ""
|
||||
for _, p := range peers {
|
||||
if p.Self {
|
||||
self = p.URL
|
||||
break
|
||||
}
|
||||
}
|
||||
if self == "" {
|
||||
self = "unknown-self"
|
||||
}
|
||||
if _, err := d.Store.TakeoverOwner(r.Context(), filesvc.ID(id), self); err == nil {
|
||||
_ = d.Mesh.SyncNow(r.Context())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
renderItemsPartial(w, r, d)
|
||||
})
|
||||
|
||||
// Upload (multipart/form-data, Feldname "file", optional name-Override)
|
||||
mux.HandleFunc("/admin/files/upload", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
if err := r.ParseMultipartForm(64 << 20); err != nil { // 64MB
|
||||
http.Error(w, "bad form", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fh, hdr, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
http.Error(w, "missing file", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer fh.Close()
|
||||
name := strings.TrimSpace(r.FormValue("name"))
|
||||
if name == "" {
|
||||
name = hdr.Filename
|
||||
}
|
||||
|
||||
// 1) Metadatei anlegen (ID beziehen)
|
||||
it, err := d.Store.Create(r.Context(), name)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// 2) Blob speichern
|
||||
if _, err := d.Blob.Save(r.Context(), (it.ID), name, fh); err != nil {
|
||||
// zurückrollen (Tombstone)
|
||||
_, _ = d.Store.Delete(r.Context(), it.ID)
|
||||
http.Error(w, "save failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_ = d.Mesh.SyncNow(r.Context()) // best-effort Push
|
||||
http.Redirect(w, r, "/admin", http.StatusSeeOther)
|
||||
})
|
||||
|
||||
// Download (Admin – BasicAuth schützt ggf.)
|
||||
mux.HandleFunc("/admin/files/", func(w http.ResponseWriter, r *http.Request) {
|
||||
// /admin/files/{id}/download
|
||||
parts := strings.Split(strings.TrimPrefix(r.URL.Path, "/admin/files/"), "/")
|
||||
if len(parts) != 2 || parts[1] != "download" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
id := parts[0]
|
||||
if strings.TrimSpace(id) == "" {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// 1) lokal versuchen
|
||||
if rc, meta, err := d.Blob.Open(r.Context(), id); err == nil {
|
||||
defer rc.Close()
|
||||
w.Header().Set("Content-Type", meta.ContentType)
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(meta.Size, 10))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, meta.Name))
|
||||
_, _ = io.Copy(w, rc)
|
||||
return
|
||||
}
|
||||
|
||||
// 2) Remote über Mesh holen
|
||||
rrc, name, _, _, err := d.Mesh.FetchBlobAny(r.Context(), id)
|
||||
if err != nil {
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
defer rrc.Close()
|
||||
|
||||
// 3) lokal cachen (Save konsumiert den Stream)
|
||||
if _, err := d.Blob.Save(r.Context(), id, name, rrc); err != nil {
|
||||
http.Error(w, "cache failed: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// 4) aus lokalem Store ausliefern (saubere Größe/CT)
|
||||
lrc, meta, err := d.Blob.Open(r.Context(), id)
|
||||
if err != nil {
|
||||
http.Error(w, "open failed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer lrc.Close()
|
||||
w.Header().Set("Content-Type", meta.ContentType)
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(meta.Size, 10))
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, meta.Name))
|
||||
_, _ = io.Copy(w, lrc)
|
||||
})
|
||||
|
||||
mux.HandleFunc("/admin/peers", func(w http.ResponseWriter, r *http.Request) {
|
||||
peers := d.Mesh.PeerList()
|
||||
_ = tplPeers.Execute(w, map[string]any{
|
||||
"Peers": peers,
|
||||
"Now": time.Now(),
|
||||
})
|
||||
})
|
||||
|
||||
// CREATE
|
||||
mux.HandleFunc("/admin/items/create", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
name := strings.TrimSpace(r.FormValue("name"))
|
||||
if name != "" {
|
||||
_, _ = d.Store.Create(r.Context(), name)
|
||||
_ = d.Mesh.SyncNow(r.Context())
|
||||
}
|
||||
// statt Redirect:
|
||||
renderItemsPartial(w, r, d)
|
||||
})
|
||||
|
||||
// RENAME
|
||||
mux.HandleFunc("/admin/items/rename", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
id := strings.TrimSpace(r.FormValue("id"))
|
||||
newName := strings.TrimSpace(r.FormValue("name"))
|
||||
if id != "" && newName != "" {
|
||||
_, _ = d.Store.Rename(r.Context(), filesvc.ID(id), newName)
|
||||
_ = d.Mesh.SyncNow(r.Context())
|
||||
}
|
||||
renderItemsPartial(w, r, d)
|
||||
})
|
||||
|
||||
// DELETE
|
||||
mux.HandleFunc("/admin/items/delete", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
id := strings.TrimSpace(r.FormValue("id"))
|
||||
if id != "" {
|
||||
_, _ = d.Store.Delete(r.Context(), filesvc.ID(id))
|
||||
_ = d.Blob.Delete(r.Context(), id)
|
||||
_ = d.Mesh.SyncNow(r.Context())
|
||||
}
|
||||
renderItemsPartial(w, r, d)
|
||||
})
|
||||
|
||||
mux.HandleFunc("/admin/mesh/syncnow", func(w http.ResponseWriter, r *http.Request) {
|
||||
_ = d.Mesh.SyncNow(context.Background())
|
||||
http.Redirect(w, r, "/admin/peers", http.StatusSeeOther)
|
||||
})
|
||||
}
|
||||
|
||||
func renderLayout(w http.ResponseWriter, _ *http.Request, active string, initial string) {
|
||||
_ = tplLayout.Execute(w, map[string]any{
|
||||
"Active": active,
|
||||
"Init": initial, // initialer HTMX Swap-Endpunkt
|
||||
})
|
||||
}
|
||||
|
||||
/*** Optional: einfache BasicAuth (siehe main.go) ***/
|
||||
func BasicAuth(user, pass string, next http.Handler) http.Handler {
|
||||
if strings.TrimSpace(user) == "" {
|
||||
return next // deaktiviert
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
u, p, ok := r.BasicAuth()
|
||||
if !ok || u != user || p != pass {
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="admin"`)
|
||||
http.Error(w, "unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// rebuild & render items partial for HTMX swaps
|
||||
func renderItemsPartial(w http.ResponseWriter, r *http.Request, d Deps) {
|
||||
type row struct {
|
||||
ID string
|
||||
Name string
|
||||
UpdatedAt int64
|
||||
HasBlob bool
|
||||
Size int64
|
||||
Owner string
|
||||
OwnerActive bool
|
||||
}
|
||||
|
||||
nextID := filesvc.ID(strings.TrimSpace(r.URL.Query().Get("next")))
|
||||
items, nextOut, _ := d.Store.List(r.Context(), nextID, 100)
|
||||
|
||||
peers := d.Mesh.PeerList()
|
||||
ttl := 2 * time.Minute
|
||||
|
||||
rows := make([]row, 0, len(items))
|
||||
for _, it := range items {
|
||||
meta, ok, _ := d.Blob.Stat(r.Context(), (it.ID))
|
||||
rows = append(rows, row{
|
||||
ID: (it.ID),
|
||||
Name: it.Name,
|
||||
UpdatedAt: it.UpdatedAt,
|
||||
HasBlob: ok,
|
||||
Size: meta.Size,
|
||||
Owner: it.Owner,
|
||||
OwnerActive: isOwnerActive(it.Owner, peers, ttl),
|
||||
})
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_ = tplItems.Execute(w, map[string]any{
|
||||
"Items": rows,
|
||||
"Next": nextOut,
|
||||
})
|
||||
}
|
||||
|
||||
func isOwnerActive(owner string, peers []mesh.Peer, ttl time.Duration) bool {
|
||||
if strings.TrimSpace(owner) == "" {
|
||||
return true
|
||||
}
|
||||
cutoff := time.Now().Add(-ttl)
|
||||
for _, p := range peers {
|
||||
if strings.TrimSpace(p.URL) == strings.TrimSpace(owner) {
|
||||
// Self ist immer aktiv, sonst nach LastSeen
|
||||
if p.Self {
|
||||
return true
|
||||
}
|
||||
if p.LastSeen.IsZero() {
|
||||
return false
|
||||
}
|
||||
return p.LastSeen.After(cutoff)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
39
internal/admin/tpl/layout.html
Normal file
39
internal/admin/tpl/layout.html
Normal file
@@ -0,0 +1,39 @@
|
||||
<!doctype html>
|
||||
<html lang="de">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1"/>
|
||||
<title>Admin</title>
|
||||
<script src="https://unpkg.com/htmx.org@1.9.12"></script>
|
||||
<style>
|
||||
:root { --bg:#0b1220; --card:#121a2b; --muted:#94a3b8; --text:#e5e7eb; --accent:#4f46e5; }
|
||||
html,body { margin:0; background:var(--bg); color:var(--text); font-family: ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Ubuntu; }
|
||||
.wrap { max-width: 1200px; margin: 40px auto; padding: 0 16px; }
|
||||
.nav { display:flex; gap:12px; margin-bottom:16px; }
|
||||
.btn { background:var(--card); border:1px solid #243044; padding:8px 12px; border-radius:10px; color:var(--text); cursor:pointer; }
|
||||
.btn:hover { border-color:#3b4a66; }
|
||||
.btn-primary { background: var(--accent); border-color: var(--accent); color:white; }
|
||||
.card { background:var(--card); border:1px solid #1f2937; border-radius:16px; padding:16px; box-shadow: 0 6px 30px rgba(0,0,0,.25); }
|
||||
.row { display:flex; gap:16px; flex-wrap:wrap; }
|
||||
table { width:100%; border-collapse: collapse; }
|
||||
th, td { border-bottom: 1px solid #1f2937; padding:10px; text-align:left; }
|
||||
input[type="text"] { background:#0f1626; border:1px solid #263246; color:var(--text); padding:8px 10px; border-radius:10px; width:100%; }
|
||||
small { color: var(--muted); }
|
||||
.muted { color: var(--muted); }
|
||||
.pill { font-size: 12px; padding: 2px 8px; border:1px solid #2b364b; border-radius:999px; background:#0f1626; color:#bcd; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="wrap">
|
||||
<h1 style="margin:0 0 10px 0;">Unified Admin</h1>
|
||||
<div class="nav">
|
||||
<button class="btn" hx-get="/admin/items" hx-target="#main" hx-swap="innerHTML">Dateien</button>
|
||||
<button class="btn" hx-get="/admin/peers" hx-target="#main" hx-swap="innerHTML">Mesh</button>
|
||||
</div>
|
||||
<div id="main" class="card" hx-get="{{.Init}}" hx-trigger="load" hx-swap="innerHTML">
|
||||
<div class="muted">Lade…</div>
|
||||
</div>
|
||||
<div style="margin-top:14px"><small>© Admin UI</small></div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
100
internal/admin/tpl/partials_items.html
Normal file
100
internal/admin/tpl/partials_items.html
Normal file
@@ -0,0 +1,100 @@
|
||||
<div class="row">
|
||||
<div style="flex: 1 1 360px">
|
||||
<form hx-post="/admin/items/create" hx-target="#items" hx-swap="outerHTML">
|
||||
<label>Neue leere Datei (nur Metadaten)</label>
|
||||
<div style="display:flex; gap:8px; margin-top:6px">
|
||||
<input type="text" name="name" placeholder="z.B. notes.txt" required>
|
||||
<button class="btn" type="submit">Anlegen</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<div style="flex: 1 1 360px">
|
||||
<form action="/admin/files/upload" method="post" enctype="multipart/form-data">
|
||||
<label>Datei hochladen</label>
|
||||
<div style="display:flex; gap:8px; margin-top:6px">
|
||||
<input type="file" name="file" required>
|
||||
<input type="text" name="name" placeholder="Name (optional)">
|
||||
<button class="btn btn-primary" type="submit">Upload</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="items" style="margin-top:14px">
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>ID</th>
|
||||
<th>Name</th>
|
||||
<th>Updated</th>
|
||||
<th>Blob</th>
|
||||
<th>Owner</th>
|
||||
<th>Status</th>
|
||||
<th style="width:320px">Aktionen</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ range .Items }}
|
||||
<tr>
|
||||
<td><code style="font-size:12px">{{ .ID }}</code></td>
|
||||
<td>{{ .Name }}</td>
|
||||
<td><small class="muted">{{ printf "%.19s" (timeRFC3339 .UpdatedAt) }}</small></td>
|
||||
<td>
|
||||
{{ if .HasBlob }}
|
||||
<span class="pill">vorhanden</span>
|
||||
<small class="muted">{{ .Size }} B</small>
|
||||
{{ else }}
|
||||
<span class="pill" style="background:#3a0b0b;border-color:#5b1a1a;color:#fbb;">fehlt</span>
|
||||
{{ end }}
|
||||
</td>
|
||||
<td><small class="muted">{{ .Owner }}</small></td>
|
||||
<td>
|
||||
{{ if .OwnerActive }}
|
||||
<span class="pill">online</span>
|
||||
{{ else }}
|
||||
<span class="pill" style="background:#5b1a1a;border-color:#7a2b2b;color:#fbb">offline</span>
|
||||
<!-- Owner-Handover nur wenn offline -->
|
||||
<form style="display:inline"
|
||||
hx-post="/admin/items/takeover"
|
||||
hx-target="#items" hx-swap="outerHTML"
|
||||
onsubmit="return confirm('Owner übernehmen?');">
|
||||
<input type="hidden" name="id" value="{{ .ID }}">
|
||||
<button class="btn" type="submit">Übernehmen</button>
|
||||
</form>
|
||||
{{ end }}
|
||||
</td>
|
||||
<td>
|
||||
<form style="display:inline-flex; gap:6px"
|
||||
hx-post="/admin/items/rename"
|
||||
hx-target="#items" hx-swap="outerHTML">
|
||||
<input type="hidden" name="id" value="{{ .ID }}">
|
||||
<input type="text" name="name" placeholder="Neuer Name">
|
||||
<button class="btn" type="submit">Rename</button>
|
||||
</form>
|
||||
<form style="display:inline"
|
||||
hx-post="/admin/items/delete"
|
||||
hx-target="#items" hx-swap="outerHTML"
|
||||
onsubmit="return confirm('Wirklich löschen (inkl. Blob)?');">
|
||||
<input type="hidden" name="id" value="{{ .ID }}">
|
||||
<button class="btn" type="submit">Delete</button>
|
||||
</form>
|
||||
{{ if .HasBlob }}
|
||||
<a class="btn" href="/admin/files/{{ .ID }}/download">Download</a>
|
||||
{{ end }}
|
||||
</td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan="7" class="muted">Keine Dateien vorhanden.</td></tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{{ if .Next }}
|
||||
<div style="margin-top:10px">
|
||||
<button class="btn"
|
||||
hx-get="/admin/items?next={{ .Next }}"
|
||||
hx-target="#items" hx-swap="outerHTML">Mehr laden</button>
|
||||
<span class="pill">next={{ .Next }}</span>
|
||||
</div>
|
||||
{{ end }}
|
||||
</div>
|
||||
23
internal/admin/tpl/partials_peers.html
Normal file
23
internal/admin/tpl/partials_peers.html
Normal file
@@ -0,0 +1,23 @@
|
||||
<div style="display:flex; justify-content:space-between; align-items:center">
|
||||
<h3 style="margin:0">Mesh Peers</h3>
|
||||
<form hx-post="/admin/mesh/syncnow" hx-target="#peers" hx-get="/admin/peers" hx-swap="outerHTML">
|
||||
<button class="btn btn-primary" type="submit">Jetzt synchronisieren</button>
|
||||
</form>
|
||||
</div>
|
||||
<div id="peers" style="margin-top:10px">
|
||||
<table>
|
||||
<thead><tr><th>URL</th><th>Self</th><th>Last Seen</th></tr></thead>
|
||||
<tbody>
|
||||
{{ range .Peers }}
|
||||
<tr>
|
||||
<td>{{ .URL }}</td>
|
||||
<td>{{ if .Self }}✅{{ else }}🔗{{ end }}</td>
|
||||
<td><small class="muted">{{ .LastSeen }}</small></td>
|
||||
</tr>
|
||||
{{ else }}
|
||||
<tr><td colspan="3" class="muted">⚠️ Keine Peers vorhanden.</td></tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
<div class="muted" style="margin-top:8px"><small>Stand: {{ .Now }}</small></div>
|
||||
</div>
|
||||
173
internal/blobfs/blobfs.go
Normal file
173
internal/blobfs/blobfs.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package blobfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Meta struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
SHA256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
type Store interface {
|
||||
Save(ctx context.Context, id string, filename string, r io.Reader) (Meta, error)
|
||||
Open(ctx context.Context, id string) (io.ReadSeekCloser, Meta, error)
|
||||
Stat(ctx context.Context, id string) (Meta, bool, error)
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
||||
type FS struct{ root string }
|
||||
|
||||
func New(root string) *FS { return &FS{root: root} }
|
||||
|
||||
func (fs *FS) dir(id string) string { return filepath.Join(fs.root, "files", sanitizeID(id)) }
|
||||
func (fs *FS) metaPath(id string) string { return filepath.Join(fs.dir(id), "meta.json") }
|
||||
func (fs *FS) blobPath(id string, name string) string {
|
||||
return filepath.Join(fs.dir(id), "blob"+safeExt(name))
|
||||
}
|
||||
|
||||
func sanitizeID(id string) string {
|
||||
// nur 0-9a-zA-Z- zulassen; Rest mit '_' ersetzen
|
||||
b := make([]rune, 0, len(id))
|
||||
for _, r := range id {
|
||||
if (r >= '0' && r <= '9') || (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '-' {
|
||||
b = append(b, r)
|
||||
} else {
|
||||
b = append(b, '_')
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func safeExt(name string) string {
|
||||
ext := filepath.Ext(name)
|
||||
if len(ext) > 16 { // unrealistisch lange Exts beschneiden
|
||||
ext = ext[:16]
|
||||
}
|
||||
return ext
|
||||
}
|
||||
|
||||
func (fs *FS) Save(_ context.Context, id string, filename string, r io.Reader) (Meta, error) {
|
||||
if strings.TrimSpace(filename) == "" {
|
||||
return Meta{}, errors.New("filename required")
|
||||
}
|
||||
if err := os.MkdirAll(fs.dir(id), 0o755); err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
tmp := filepath.Join(fs.dir(id), "blob.tmp")
|
||||
out, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
hasher := sha256.New()
|
||||
tee := io.TeeReader(r, hasher)
|
||||
|
||||
// Content-Type sniffen: ersten 512 Bytes puffern
|
||||
buf := make([]byte, 512)
|
||||
n, _ := io.ReadFull(tee, buf)
|
||||
if n > 0 {
|
||||
if _, err := out.Write(buf[:n]); err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
}
|
||||
// Rest kopieren
|
||||
size := int64(n)
|
||||
written, err := io.Copy(out, tee)
|
||||
if err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
size += written
|
||||
|
||||
// Hash
|
||||
sum := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
ct := http.DetectContentType(buf[:n])
|
||||
if ct == "application/octet-stream" {
|
||||
// Versuch über Dateiendung
|
||||
if ext := filepath.Ext(filename); ext != "" {
|
||||
if byExt := mime.TypeByExtension(ext); byExt != "" {
|
||||
ct = byExt
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finaler Ort
|
||||
final := fs.blobPath(id, filename)
|
||||
if err := os.Rename(tmp, final); err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
|
||||
meta := Meta{Name: filename, Size: size, ContentType: ct, SHA256: sum}
|
||||
if err := writeJSON(fs.metaPath(id), meta); err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func (fs *FS) Open(_ context.Context, id string) (io.ReadSeekCloser, Meta, error) {
|
||||
meta, ok, err := fs.Stat(context.Background(), id)
|
||||
if err != nil {
|
||||
return nil, Meta{}, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, Meta{}, os.ErrNotExist
|
||||
}
|
||||
f, err := os.Open(fs.blobPath(id, meta.Name))
|
||||
return f, meta, err
|
||||
}
|
||||
|
||||
func (fs *FS) Stat(_ context.Context, id string) (Meta, bool, error) {
|
||||
b, err := os.ReadFile(fs.metaPath(id))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return Meta{}, false, nil
|
||||
}
|
||||
return Meta{}, false, err
|
||||
}
|
||||
var m Meta
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return Meta{}, false, err
|
||||
}
|
||||
// Größe aus FS gegenprüfen (falls manipuliert)
|
||||
info, err := os.Stat(fs.blobPath(id, m.Name))
|
||||
if err == nil {
|
||||
m.Size = info.Size()
|
||||
}
|
||||
return m, true, nil
|
||||
}
|
||||
|
||||
func (fs *FS) Delete(_ context.Context, id string) error {
|
||||
return os.RemoveAll(fs.dir(id))
|
||||
}
|
||||
|
||||
func writeJSON(path string, v any) error {
|
||||
tmp := path + ".tmp"
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.NewEncoder(f).Encode(v); err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
return os.Rename(tmp, path)
|
||||
}
|
||||
322
internal/filesvc/memstore.go
Normal file
322
internal/filesvc/memstore.go
Normal file
@@ -0,0 +1,322 @@
|
||||
package filesvc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type MemStore struct {
|
||||
mu sync.Mutex
|
||||
items map[ID]File
|
||||
self string
|
||||
// optionales Eventing
|
||||
subs []chan ChangeEvent
|
||||
persistPath string
|
||||
}
|
||||
|
||||
func NewMemStore(self string) *MemStore {
|
||||
return &MemStore{self: strings.TrimSpace(self), items: make(map[ID]File)}
|
||||
}
|
||||
|
||||
func NewMemStorePersistent(self, path string) *MemStore {
|
||||
m := NewMemStore(self)
|
||||
m.persistPath = strings.TrimSpace(path)
|
||||
// beim Start versuchen zu laden
|
||||
_ = m.loadFromDisk()
|
||||
return m
|
||||
}
|
||||
|
||||
// --- Persistenz-Helper (NEU) ---
|
||||
|
||||
func (m *MemStore) loadFromDisk() error {
|
||||
if m.persistPath == "" {
|
||||
return nil
|
||||
}
|
||||
f, err := os.Open(m.persistPath)
|
||||
if err != nil {
|
||||
return nil // Datei existiert beim ersten Start nicht – ok
|
||||
}
|
||||
defer f.Close()
|
||||
var snap Snapshot
|
||||
if err := json.NewDecoder(f).Decode(&snap); err != nil {
|
||||
return err
|
||||
}
|
||||
m.mu.Lock()
|
||||
for _, it := range snap.Items {
|
||||
m.items[it.ID] = it
|
||||
}
|
||||
m.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemStore) saveLocked() error {
|
||||
if m.persistPath == "" {
|
||||
return nil
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(m.persistPath), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
// Snapshot aus Map bauen
|
||||
snap := Snapshot{Items: make([]File, 0, len(m.items))}
|
||||
for _, it := range m.items {
|
||||
snap.Items = append(snap.Items, it)
|
||||
}
|
||||
// atomar schreiben
|
||||
tmp := m.persistPath + ".tmp"
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc := json.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
if err := enc.Encode(&snap); err != nil {
|
||||
f.Close()
|
||||
_ = os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
f.Close()
|
||||
_ = os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
_ = os.Remove(tmp)
|
||||
return err
|
||||
}
|
||||
return os.Rename(tmp, m.persistPath)
|
||||
}
|
||||
|
||||
/*** Store ***/
|
||||
|
||||
func (m *MemStore) Get(_ context.Context, id ID) (File, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
it, ok := m.items[id]
|
||||
if !ok || it.Deleted {
|
||||
return File{}, ErrNotFound
|
||||
}
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) List(_ context.Context, next ID, limit int) ([]File, ID, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if limit <= 0 || limit > 1000 {
|
||||
limit = 100
|
||||
}
|
||||
|
||||
// sortiere deterministisch nach UpdatedAt, dann ID
|
||||
all := make([]File, 0, len(m.items))
|
||||
for _, v := range m.items {
|
||||
all = append(all, v)
|
||||
}
|
||||
slices.SortFunc(all, func(a, b File) int {
|
||||
if a.UpdatedAt == b.UpdatedAt {
|
||||
if a.ID == b.ID {
|
||||
return 0
|
||||
}
|
||||
if a.ID < b.ID {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if a.UpdatedAt < b.UpdatedAt {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
})
|
||||
|
||||
start := 0
|
||||
if next != "" {
|
||||
for i, it := range all {
|
||||
if it.ID >= next {
|
||||
start = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
end := start + limit
|
||||
if end > len(all) {
|
||||
end = len(all)
|
||||
}
|
||||
out := make([]File, 0, end-start)
|
||||
for _, it := range all[start:end] {
|
||||
if !it.Deleted {
|
||||
out = append(out, it)
|
||||
}
|
||||
}
|
||||
var nextOut ID
|
||||
if end < len(all) {
|
||||
nextOut = all[end].ID
|
||||
}
|
||||
return out, nextOut, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) Create(_ context.Context, name string) (File, error) {
|
||||
name = strings.TrimSpace(name)
|
||||
if name == "" {
|
||||
return File{}, ErrBadInput
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
now := time.Now().UnixNano()
|
||||
uid, err := NewUUIDv4()
|
||||
if err != nil {
|
||||
return File{}, err
|
||||
}
|
||||
it := File{ID: uid, Name: name, UpdatedAt: now, Owner: m.self}
|
||||
m.items[it.ID] = it
|
||||
_ = m.saveLocked()
|
||||
m.emit(it)
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) Rename(_ context.Context, id ID, newName string) (File, error) {
|
||||
newName = strings.TrimSpace(newName)
|
||||
if newName == "" {
|
||||
return File{}, ErrBadInput
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
it, ok := m.items[id]
|
||||
if !ok || it.Deleted {
|
||||
return File{}, ErrNotFound
|
||||
}
|
||||
if it.Owner != "" && it.Owner != m.self { // ← nur Owner
|
||||
return File{}, ErrForbidden
|
||||
}
|
||||
it.Name = strings.TrimSpace(newName)
|
||||
it.UpdatedAt = time.Now().UnixNano()
|
||||
m.items[id] = it
|
||||
_ = m.saveLocked()
|
||||
m.emit(it)
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) Delete(_ context.Context, id ID) (File, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
it, ok := m.items[id]
|
||||
if !ok {
|
||||
return File{}, ErrNotFound
|
||||
}
|
||||
if it.Owner != "" && it.Owner != m.self { // ← nur Owner
|
||||
return File{}, ErrForbidden
|
||||
}
|
||||
if it.Deleted {
|
||||
return it, nil
|
||||
}
|
||||
it.Deleted = true
|
||||
it.UpdatedAt = time.Now().UnixNano()
|
||||
m.items[id] = it
|
||||
_ = m.saveLocked()
|
||||
m.emit(it)
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) TakeoverOwner(_ context.Context, id ID, newOwner string) (File, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
it, ok := m.items[id]
|
||||
if !ok || it.Deleted {
|
||||
return File{}, ErrNotFound
|
||||
}
|
||||
newOwner = strings.TrimSpace(newOwner)
|
||||
if newOwner == "" {
|
||||
return File{}, ErrBadInput
|
||||
}
|
||||
// Sicherheit: nur für sich selbst übernehmen
|
||||
if newOwner != m.self {
|
||||
return File{}, ErrForbidden
|
||||
}
|
||||
if it.Owner == newOwner {
|
||||
return it, nil
|
||||
}
|
||||
it.Owner = newOwner
|
||||
it.UpdatedAt = time.Now().UnixNano()
|
||||
m.items[id] = it
|
||||
_ = m.saveLocked()
|
||||
m.emitLocked(it)
|
||||
return it, nil
|
||||
}
|
||||
|
||||
/*** Replicable ***/
|
||||
|
||||
func (m *MemStore) Snapshot(_ context.Context) (Snapshot, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
s := Snapshot{Items: make([]File, 0, len(m.items))}
|
||||
for _, it := range m.items {
|
||||
s.Items = append(s.Items, it) // inkl. Tombstones
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (m *MemStore) ApplyRemote(_ context.Context, s Snapshot) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
changed := false
|
||||
for _, ri := range s.Items {
|
||||
li, ok := m.items[ri.ID]
|
||||
if !ok || ri.UpdatedAt > li.UpdatedAt {
|
||||
// Owner nie überschreiben, außer er ist leer
|
||||
if ok && li.Owner != "" && ri.Owner != "" && ri.Owner != li.Owner {
|
||||
ri.Owner = li.Owner
|
||||
}
|
||||
m.items[ri.ID] = ri
|
||||
changed = true
|
||||
m.emitLocked(ri)
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
_ = m.saveLocked() // ← NEU
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Watchable (optional) ***/
|
||||
|
||||
func (m *MemStore) Watch(stop <-chan struct{}) <-chan ChangeEvent {
|
||||
ch := make(chan ChangeEvent, 32)
|
||||
|
||||
m.mu.Lock()
|
||||
m.subs = append(m.subs, ch)
|
||||
m.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
<-stop
|
||||
m.mu.Lock()
|
||||
// entferne ch aus subs
|
||||
for i, s := range m.subs {
|
||||
if s == ch {
|
||||
m.subs = append(m.subs[:i], m.subs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
m.mu.Unlock()
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (m *MemStore) emit(it File) {
|
||||
m.emitLocked(it) // mu wird im Aufrufer gehalten
|
||||
}
|
||||
func (m *MemStore) emitLocked(it File) {
|
||||
ev := ChangeEvent{At: time.Now(), Item: it}
|
||||
for _, s := range m.subs {
|
||||
select {
|
||||
case s <- ev:
|
||||
default: /* drop wenn voll */
|
||||
}
|
||||
}
|
||||
}
|
||||
89
internal/filesvc/store.go
Normal file
89
internal/filesvc/store.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package filesvc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*** Domain ***/
|
||||
|
||||
type ID = string
|
||||
|
||||
type File struct {
|
||||
ID ID `json:"id"`
|
||||
Name string `json:"name"`
|
||||
UpdatedAt int64 `json:"updatedAt"` // UnixNano für LWW
|
||||
Deleted bool `json:"deleted"` // Tombstone für Mesh-Delete
|
||||
Owner string `json:"owner"` //AdvertURL/NodeID des Erzeugers
|
||||
}
|
||||
|
||||
/*** Fehler ***/
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("file not found")
|
||||
ErrBadInput = errors.New("bad input")
|
||||
ErrConflict = errors.New("conflict")
|
||||
ErrForbidden = errors.New("forbidden")
|
||||
ErrTransient = errors.New("transient")
|
||||
)
|
||||
|
||||
/*** Basis-API (lokal nutzbar) ***/
|
||||
|
||||
type Store interface {
|
||||
// Lesen & Auflisten
|
||||
Get(ctx context.Context, id ID) (File, error)
|
||||
List(ctx context.Context, next ID, limit int) (items []File, nextOut ID, err error)
|
||||
Create(ctx context.Context, name string) (File, error)
|
||||
Rename(ctx context.Context, id ID, newName string) (File, error) // nur Owner darf
|
||||
Delete(ctx context.Context, id ID) (File, error) // nur Owner darf
|
||||
TakeoverOwner(ctx context.Context, id ID, newOwner string) (File, error)
|
||||
}
|
||||
|
||||
/*** Mesh-Replikation ***/
|
||||
|
||||
type Snapshot struct {
|
||||
Items []File `json:"items"`
|
||||
}
|
||||
|
||||
type Replicable interface {
|
||||
// Snapshot liefert den vollständigen aktuellen Stand (inkl. Tombstones).
|
||||
Snapshot(ctx context.Context) (Snapshot, error)
|
||||
// ApplyRemote wendet LWW an. next-ID wird dabei korrekt fortgeschrieben.
|
||||
ApplyRemote(ctx context.Context, s Snapshot) error
|
||||
}
|
||||
|
||||
/*** Events (optional) ***/
|
||||
|
||||
// ChangeEvent kann genutzt werden, um proaktive Mesh-Pushes zu triggern.
|
||||
// Bei deiner Pull-basierten Anti-Entropy ist es optional.
|
||||
type ChangeEvent struct {
|
||||
At time.Time
|
||||
Item File
|
||||
}
|
||||
|
||||
// Watch gibt Änderungen aus; close(stop) beendet den Stream.
|
||||
// Eine Noop-Implementierung ist erlaubt, wenn Pull-Sync genügt.
|
||||
type Watchable interface {
|
||||
Watch(stop <-chan struct{}) <-chan ChangeEvent
|
||||
}
|
||||
|
||||
/*** Kombiniertes Interface ***/
|
||||
|
||||
type MeshStore interface {
|
||||
Store
|
||||
Replicable
|
||||
Watchable // optional – kann Noop sein
|
||||
}
|
||||
|
||||
func NewUUIDv4() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
b[6] = (b[6] & 0x0f) | 0x40 // Version 4
|
||||
b[8] = (b[8] & 0x3f) | 0x80 // Variant RFC4122
|
||||
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:16]), nil
|
||||
}
|
||||
785
internal/mesh/mesh.go
Normal file
785
internal/mesh/mesh.go
Normal file
@@ -0,0 +1,785 @@
|
||||
package mesh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand/v2"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*** Types & Config ***/
|
||||
|
||||
type Config struct {
|
||||
BindAddr string // e.g. ":9090"
|
||||
AdvertURL string // e.g. "http://10.0.0.5:9090"
|
||||
Seeds []string // other peers' mesh base URLs
|
||||
ClusterSecret string // HMAC key
|
||||
EnableDiscovery bool
|
||||
DiscoveryAddress string // "239.8.8.8:9898"
|
||||
PeerTTL time.Duration // wie lange darf ein Peer inaktiv sein (Default siehe unten)
|
||||
PruneInterval time.Duration // wie oft wird gepruned
|
||||
|
||||
SyncInterval time.Duration
|
||||
Fanout int
|
||||
|
||||
// NEU:
|
||||
HelloInterval time.Duration // wie oft pingen
|
||||
HelloFanout int // wie viele Peers pro Tick
|
||||
|
||||
BlobTimeout time.Duration
|
||||
}
|
||||
|
||||
type Peer struct {
|
||||
URL string `json:"url"`
|
||||
LastSeen time.Time `json:"lastSeen"`
|
||||
Self bool `json:"self"`
|
||||
OwnerHint int `json:"ownerHint"` // optional
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
UpdatedAt int64 `json:"updatedAt"`
|
||||
Deleted bool `json:"deleted"` // <— NEU: Tombstone für Deletes
|
||||
Owner string `json:"owner"`
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Items []Item `json:"items"`
|
||||
}
|
||||
|
||||
// Callbacks that your app provides
|
||||
type Callbacks struct {
|
||||
GetSnapshot func(ctx context.Context) (Snapshot, error)
|
||||
ApplyRemote func(ctx context.Context, s Snapshot) error
|
||||
BlobOpen func(ctx context.Context, id string) (io.ReadCloser, string, string, int64, error)
|
||||
}
|
||||
|
||||
/*** Node ***/
|
||||
|
||||
type Node struct {
|
||||
cfg Config
|
||||
cbs Callbacks
|
||||
self Peer
|
||||
mu sync.RWMutex
|
||||
peers map[string]*Peer
|
||||
client *http.Client
|
||||
srv *http.Server
|
||||
stop chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// RemovePeer löscht einen Peer aus der Peer-Tabelle. Seeds werden standardmäßig nicht entfernt.
|
||||
func (n *Node) RemovePeer(url string) bool {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
if url == "" || url == n.self.URL {
|
||||
return false
|
||||
}
|
||||
// Seeds schützen
|
||||
if n.isSeed(url) {
|
||||
return false
|
||||
}
|
||||
if _, ok := n.peers[url]; ok {
|
||||
delete(n.peers, url)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *Node) Config() Config { return n.cfg }
|
||||
|
||||
// PruneNow entfernt alle Peers, deren LastSeen vor cutoff liegt (Seeds bleiben).
|
||||
func (n *Node) PruneNow(cutoff time.Time) int {
|
||||
n.mu.Lock()
|
||||
defer n.mu.Unlock()
|
||||
removed := 0
|
||||
for url, p := range n.peers {
|
||||
if url == n.self.URL || n.isSeed(url) {
|
||||
continue
|
||||
}
|
||||
if p.LastSeen.IsZero() || p.LastSeen.Before(cutoff) {
|
||||
delete(n.peers, url)
|
||||
removed++
|
||||
}
|
||||
}
|
||||
return removed
|
||||
}
|
||||
|
||||
// StartPeerPruner startet den Hintergrundjob (stoppt automatisch bei n.stop).
|
||||
func (n *Node) StartPeerPruner() {
|
||||
go n.loopPrunePeers()
|
||||
}
|
||||
|
||||
func (n *Node) loopPrunePeers() {
|
||||
ttl := n.cfg.PeerTTL
|
||||
if ttl <= 0 {
|
||||
ttl = 2 * time.Minute
|
||||
}
|
||||
interval := n.cfg.PruneInterval
|
||||
if interval <= 0 {
|
||||
interval = 30 * time.Second
|
||||
}
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
cutoff := time.Now().Add(-ttl)
|
||||
_ = n.PruneNow(cutoff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper: ist url ein Seed?
|
||||
func (n *Node) isSeed(url string) bool {
|
||||
for _, s := range n.cfg.Seeds {
|
||||
if strings.TrimSpace(s) == strings.TrimSpace(url) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func New(cfg Config, cbs Callbacks) (*Node, error) {
|
||||
if cfg.BindAddr == "" || cfg.AdvertURL == "" {
|
||||
return nil, errors.New("mesh: BindAddr and AdvertURL required")
|
||||
}
|
||||
if cfg.ClusterSecret == "" {
|
||||
return nil, errors.New("mesh: ClusterSecret required")
|
||||
}
|
||||
n := &Node{
|
||||
cfg: cfg,
|
||||
cbs: cbs,
|
||||
self: Peer{URL: cfg.AdvertURL, LastSeen: time.Now(), Self: true},
|
||||
peers: make(map[string]*Peer),
|
||||
client: &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
/*** HMAC helpers ***/
|
||||
|
||||
func (n *Node) sign(b []byte) string {
|
||||
m := hmac.New(sha256.New, []byte(n.cfg.ClusterSecret))
|
||||
m.Write(b)
|
||||
return hex.EncodeToString(m.Sum(nil))
|
||||
}
|
||||
|
||||
func (n *Node) verify(b []byte, sig string) bool {
|
||||
want := n.sign(b)
|
||||
return hmac.Equal([]byte(want), []byte(sig))
|
||||
}
|
||||
|
||||
/*** HTTP handlers (control plane) ***/
|
||||
|
||||
func (n *Node) helloHandler(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
if !n.verify(body, r.Header.Get("X-Mesh-Sig")) {
|
||||
http.Error(w, "bad signature", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
var req struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &req); err != nil || strings.TrimSpace(req.URL) == "" {
|
||||
http.Error(w, "bad json", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Peer anlegen (falls neu) und LastSeen setzen
|
||||
n.mu.Lock()
|
||||
if req.URL != n.self.URL {
|
||||
if p, ok := n.peers[req.URL]; ok {
|
||||
p.LastSeen = time.Now()
|
||||
} else {
|
||||
cp := Peer{URL: req.URL, LastSeen: time.Now()} // weitere Felder wie bekannt
|
||||
n.peers[req.URL] = &cp
|
||||
}
|
||||
}
|
||||
n.mu.Unlock()
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (n *Node) touchPeer(url string) {
|
||||
if strings.TrimSpace(url) == "" {
|
||||
return
|
||||
}
|
||||
n.mu.Lock()
|
||||
if p, ok := n.peers[url]; ok {
|
||||
p.LastSeen = time.Now()
|
||||
}
|
||||
n.mu.Unlock()
|
||||
}
|
||||
|
||||
func (n *Node) peersHandler(w http.ResponseWriter, r *http.Request) {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
var list []Peer
|
||||
list = append(list, n.self)
|
||||
for _, p := range n.peers {
|
||||
//p.Self = false
|
||||
list = append(list, *p)
|
||||
}
|
||||
writeJSON(w, http.StatusOK, list)
|
||||
}
|
||||
|
||||
func (n *Node) syncHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// verify signature
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
if !n.verify(body, r.Header.Get("X-Mesh-Sig")) {
|
||||
http.Error(w, "bad signature", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
var s Snapshot
|
||||
if err := json.Unmarshal(body, &s); err != nil {
|
||||
http.Error(w, "bad json", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// apply
|
||||
if err := n.cbs.ApplyRemote(r.Context(), s); err != nil {
|
||||
http.Error(w, "apply error: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func writeJSON(w http.ResponseWriter, code int, v any) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(code)
|
||||
_ = json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
|
||||
/*** Serve ***/
|
||||
|
||||
func (n *Node) Serve() error {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/mesh/peers", n.peersHandler)
|
||||
mux.HandleFunc("/mesh/hello", n.helloHandler)
|
||||
mux.HandleFunc("/mesh/blob", n.blobHandler)
|
||||
mux.HandleFunc("/mesh/sync", n.syncHandler)
|
||||
n.srv = &http.Server{Addr: n.cfg.BindAddr, Handler: mux}
|
||||
|
||||
// background loops
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.loopSeeder()
|
||||
}()
|
||||
|
||||
n.wg.Add(1)
|
||||
go func() { defer n.wg.Done(); n.loopPeerExchange() }()
|
||||
|
||||
if n.cfg.EnableDiscovery && n.cfg.DiscoveryAddress != "" {
|
||||
n.wg.Add(2)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.loopBeaconSend()
|
||||
}()
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.loopBeaconRecv()
|
||||
}()
|
||||
}
|
||||
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.loopAntiEntropy()
|
||||
}()
|
||||
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.loopHello()
|
||||
}()
|
||||
|
||||
// http server
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- n.srv.ListenAndServe()
|
||||
}()
|
||||
select {
|
||||
case err := <-errc:
|
||||
return err
|
||||
case <-n.stop:
|
||||
return http.ErrServerClosed
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) Close(ctx context.Context) error {
|
||||
close(n.stop)
|
||||
if n.srv != nil {
|
||||
_ = n.srv.Shutdown(ctx)
|
||||
}
|
||||
n.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Loops ***/
|
||||
|
||||
func (n *Node) loopPeerExchange() {
|
||||
t := time.NewTicker(30 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
// Seeds abfragen
|
||||
for _, s := range n.cfg.Seeds {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
continue
|
||||
}
|
||||
resp, err := n.client.Get(strings.TrimRight(s, "/") + "/mesh/peers")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var list []Peer
|
||||
if json.NewDecoder(resp.Body).Decode(&list) == nil {
|
||||
n.mu.Lock()
|
||||
for _, p := range list {
|
||||
if p.URL == "" || p.URL == n.self.URL {
|
||||
continue
|
||||
}
|
||||
if _, ok := n.peers[p.URL]; !ok {
|
||||
cp := p
|
||||
n.peers[p.URL] = &cp
|
||||
}
|
||||
}
|
||||
n.mu.Unlock()
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) loopSeeder() {
|
||||
// attempt to hello known seeds every 5s at start, then every 30s
|
||||
backoff := 5 * time.Second
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-time.After(backoff):
|
||||
}
|
||||
if len(n.cfg.Seeds) == 0 {
|
||||
backoff = 30 * time.Second
|
||||
continue
|
||||
}
|
||||
for _, s := range n.cfg.Seeds {
|
||||
if s == "" || s == n.self.URL {
|
||||
continue
|
||||
}
|
||||
_ = n.sendHello(s)
|
||||
}
|
||||
backoff = 30 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) loopAntiEntropy() {
|
||||
t := time.NewTicker(10 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
n.mu.RLock()
|
||||
targets := make([]string, 0, len(n.peers))
|
||||
for url := range n.peers {
|
||||
targets = append(targets, url)
|
||||
}
|
||||
n.mu.RUnlock()
|
||||
if len(targets) == 0 {
|
||||
continue
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
|
||||
snap, err := n.cbs.GetSnapshot(ctx)
|
||||
cancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, url := range targets {
|
||||
_ = n.sendSync(url, snap)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) loopBeaconSend() {
|
||||
addr, err := net.ResolveUDPAddr("udp", n.cfg.DiscoveryAddress)
|
||||
if err != nil {
|
||||
log.Printf("mesh beacon send resolve: %v", err)
|
||||
return
|
||||
}
|
||||
conn, err := net.DialUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
log.Printf("mesh beacon send dial: %v", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
type beacon struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
t := time.NewTicker(5 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
b, _ := json.Marshal(beacon{URL: n.self.URL})
|
||||
_, _ = conn.Write(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) loopBeaconRecv() {
|
||||
addr, err := net.ResolveUDPAddr("udp", n.cfg.DiscoveryAddress)
|
||||
if err != nil {
|
||||
log.Printf("mesh beacon recv resolve: %v", err)
|
||||
return
|
||||
}
|
||||
// enable multicast receive
|
||||
l, err := net.ListenMulticastUDP("udp", nil, addr)
|
||||
if err != nil {
|
||||
log.Printf("mesh beacon recv listen: %v", err)
|
||||
return
|
||||
}
|
||||
defer l.Close()
|
||||
_ = l.SetReadBuffer(1 << 16)
|
||||
|
||||
buf := make([]byte, 2048)
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
default:
|
||||
}
|
||||
_ = l.SetDeadline(time.Now().Add(6 * time.Second))
|
||||
nr, _, err := l.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
var msg struct{ URL string }
|
||||
if err := json.Unmarshal(buf[:nr], &msg); err == nil {
|
||||
if msg.URL != "" && msg.URL != n.self.URL {
|
||||
_ = n.sendHello(msg.URL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*** Outgoing ***/
|
||||
|
||||
func (n *Node) sendHello(url string) error {
|
||||
b, _ := json.Marshal(struct {
|
||||
URL string `json:"url"`
|
||||
}{URL: n.self.URL})
|
||||
req, _ := http.NewRequest(http.MethodPost, strings.TrimRight(url, "/")+"/mesh/hello", bytes.NewReader(b))
|
||||
req.Header.Set("X-Mesh-Sig", n.sign(b))
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
n.touchPeer(url) // eigenen Blick auf den Peer direkt als „gesehen“ markieren
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("hello %s: %s", url, resp.Status)
|
||||
}
|
||||
|
||||
func (n *Node) loopHello() {
|
||||
interval := n.cfg.HelloInterval
|
||||
if interval <= 0 {
|
||||
interval = 20 * time.Second
|
||||
}
|
||||
fanout := n.cfg.HelloFanout
|
||||
if fanout <= 0 {
|
||||
fanout = 8
|
||||
}
|
||||
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-n.stop:
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
|
||||
// Liste der *bekannten* Peers (nicht nur Seeds)
|
||||
n.mu.RLock()
|
||||
targets := make([]string, 0, len(n.peers))
|
||||
for url := range n.peers {
|
||||
if url != n.self.URL {
|
||||
targets = append(targets, url)
|
||||
}
|
||||
}
|
||||
n.mu.RUnlock()
|
||||
if len(targets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// zufällig mischen und auf Fanout begrenzen
|
||||
rand.Shuffle(len(targets), func(i, j int) { targets[i], targets[j] = targets[j], targets[i] })
|
||||
if fanout < len(targets) {
|
||||
targets = targets[:fanout]
|
||||
}
|
||||
|
||||
// leicht parallel pingen
|
||||
var wg sync.WaitGroup
|
||||
for _, u := range targets {
|
||||
u := u
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.sendHello(u)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) sendSync(url string, s Snapshot) error {
|
||||
b, _ := json.Marshal(s)
|
||||
req, _ := http.NewRequest(http.MethodPost, strings.TrimRight(url, "/")+"/mesh/sync", bytes.NewReader(b))
|
||||
req.Header.Set("X-Mesh-Sig", n.sign(b))
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
|
||||
defer cancel()
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("sync %s: %s", url, resp.Status)
|
||||
}
|
||||
|
||||
n.touchPeer(url)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PeerList liefert eine Kopie der bekannten Peers inkl. Self.
|
||||
func (n *Node) PeerList() []Peer {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
out := make([]Peer, 0, len(n.peers)+1)
|
||||
out = append(out, n.self)
|
||||
for _, p := range n.peers {
|
||||
cp := *p
|
||||
cp.Self = false
|
||||
out = append(out, cp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// SyncNow verschickt sofort den aktuellen Snapshot an alle bekannten Peers.
|
||||
func (n *Node) SyncNow(ctx context.Context) error {
|
||||
snap, err := n.cbs.GetSnapshot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.mu.RLock()
|
||||
targets := make([]string, 0, len(n.peers))
|
||||
for url := range n.peers {
|
||||
targets = append(targets, url)
|
||||
}
|
||||
n.mu.RUnlock()
|
||||
for _, u := range targets {
|
||||
_ = n.sendSync(u, snap)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Utilities ***/
|
||||
|
||||
// OwnerHint is a simple, optional mapping to distribute responsibility.
|
||||
func OwnerHint(id string, peers []string) int {
|
||||
if len(peers) == 0 {
|
||||
return 0
|
||||
}
|
||||
h := crc32.ChecksumIEEE([]byte(id))
|
||||
return int(h % uint32(len(peers)))
|
||||
}
|
||||
|
||||
// Helpers to load from ENV quickly
|
||||
func FromEnv() Config {
|
||||
return Config{
|
||||
BindAddr: getenvDefault("MESH_BIND", ":9090"),
|
||||
AdvertURL: os.Getenv("MESH_ADVERT"),
|
||||
Seeds: splitCSV(os.Getenv("MESH_SEEDS")),
|
||||
ClusterSecret: os.Getenv("MESH_CLUSTER_SECRET"),
|
||||
EnableDiscovery: os.Getenv("MESH_ENABLE_DISCOVERY") == "true",
|
||||
DiscoveryAddress: getenvDefault("MESH_DISCOVERY_ADDR", "239.8.8.8:9898"),
|
||||
}
|
||||
}
|
||||
|
||||
func splitCSV(s string) []string {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return nil
|
||||
}
|
||||
parts := strings.Split(s, ",")
|
||||
for i := range parts {
|
||||
parts[i] = strings.TrimSpace(parts[i])
|
||||
}
|
||||
// dedup
|
||||
out := make([]string, 0, len(parts))
|
||||
for _, p := range parts {
|
||||
if p == "" || slices.Contains(out, p) {
|
||||
continue
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func getenvDefault(k, def string) string {
|
||||
v := os.Getenv(k)
|
||||
if v == "" {
|
||||
return def
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// POST /mesh/blob (Body: {"id":<int64>}) -> streamt den Blob
|
||||
func (n *Node) blobHandler(w http.ResponseWriter, r *http.Request) {
|
||||
body, _ := io.ReadAll(r.Body)
|
||||
if !n.verify(body, r.Header.Get("X-Mesh-Sig")) {
|
||||
http.Error(w, "bad signature", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
var req struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
http.Error(w, "bad json", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if n.cbs.BlobOpen == nil {
|
||||
http.Error(w, "blob unavailable", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
rc, name, ct, size, err := n.cbs.BlobOpen(r.Context(), req.ID)
|
||||
if err != nil {
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
defer rc.Close()
|
||||
if ct == "" {
|
||||
ct = "application/octet-stream"
|
||||
}
|
||||
if size > 0 {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
|
||||
}
|
||||
w.Header().Set("Content-Type", ct)
|
||||
w.Header().Set("X-Blob-Name", name)
|
||||
_, _ = io.Copy(w, rc)
|
||||
}
|
||||
|
||||
// sendBlobRequest schickt eine signierte Anfrage an /mesh/blob und liefert die Response
|
||||
// zurück (Caller MUSS resp.Body schließen!). Bei HTTP 200 wird der Peer als gesehen markiert.
|
||||
func (n *Node) sendBlobRequest(url, id string) (*http.Response, error) {
|
||||
b, _ := json.Marshal(struct {
|
||||
ID string `json:"id"`
|
||||
}{ID: id})
|
||||
|
||||
req, _ := http.NewRequest(http.MethodPost, strings.TrimRight(url, "/")+"/mesh/blob", bytes.NewReader(b))
|
||||
req.Header.Set("X-Mesh-Sig", n.sign(b))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// ❗ WICHTIG: kein kurzer Timeout. Optional: großer Timeout aus Config
|
||||
ctx := context.Background()
|
||||
if n.cfg.BlobTimeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(context.Background(), n.cfg.BlobTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := n.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
n.touchPeer(url) // ausgehender Erfolg zählt als "gesehen"
|
||||
return resp, nil
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("blob %s from %s: %s", id, url, resp.Status)
|
||||
}
|
||||
|
||||
// Öffentliche Methode: versuche Blob bei irgendeinem Peer zu holen
|
||||
func (n *Node) FetchBlobAny(ctx context.Context, id string) (io.ReadCloser, string, string, int64, error) {
|
||||
n.mu.RLock()
|
||||
targets := make([]string, 0, len(n.peers))
|
||||
for url := range n.peers {
|
||||
targets = append(targets, url)
|
||||
}
|
||||
n.mu.RUnlock()
|
||||
if len(targets) == 0 {
|
||||
// Fallback: Seeds probieren
|
||||
targets = append(targets, n.cfg.Seeds...)
|
||||
}
|
||||
for _, u := range targets {
|
||||
if strings.TrimSpace(u) == "" || u == n.self.URL {
|
||||
continue
|
||||
}
|
||||
resp, err := n.sendBlobRequest(u, id)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
name := resp.Header.Get("X-Blob-Name")
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
var size int64 = -1
|
||||
if cl := resp.Header.Get("Content-Length"); cl != "" {
|
||||
if s, err := strconv.ParseInt(cl, 10, 64); err == nil {
|
||||
size = s
|
||||
}
|
||||
}
|
||||
// Caller muss resp.Body schließen
|
||||
return resp.Body, name, ct, size, nil
|
||||
}
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil, "", "", 0, fmt.Errorf("blob %s not found on peers", id)
|
||||
}
|
||||
@@ -1,316 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
blobDir string
|
||||
metaDir string
|
||||
tmpDir string
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type FileRecord struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
Size int64 `json:"size"`
|
||||
Meta map[string]string `json:"meta,omitempty"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
ContentType string `json:"contentType,omitempty"`
|
||||
}
|
||||
|
||||
func (fr FileRecord) SafeName() string {
|
||||
n := strings.TrimSpace(fr.Name)
|
||||
if n == "" {
|
||||
return fr.ID
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func Open(blobDir, metaDir, tmpDir string) (*Store, error) {
|
||||
for _, p := range []string{blobDir, metaDir, tmpDir} {
|
||||
if err := os.MkdirAll(p, 0o755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Store{blobDir: blobDir, metaDir: metaDir, tmpDir: tmpDir}, nil
|
||||
}
|
||||
|
||||
func (s *Store) Put(ctx context.Context, r io.Reader, name, metaStr string) (*FileRecord, error) {
|
||||
if name == "" {
|
||||
name = "file"
|
||||
}
|
||||
|
||||
tmp, err := os.CreateTemp(s.tmpDir, "upload-*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { tmp.Close(); os.Remove(tmp.Name()) }()
|
||||
|
||||
h := sha256.New()
|
||||
n, err := io.Copy(io.MultiWriter(tmp, h), r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := hex.EncodeToString(h.Sum(nil))
|
||||
|
||||
blobPath := filepath.Join(s.blobDir, hash)
|
||||
if _, err := os.Stat(blobPath); errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.Rename(tmp.Name(), blobPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
_ = os.Remove(tmp.Name())
|
||||
}
|
||||
|
||||
rec := &FileRecord{
|
||||
ID: newID(hash),
|
||||
Name: name,
|
||||
Hash: hash,
|
||||
Size: n,
|
||||
Meta: parseMeta(metaStr),
|
||||
CreatedAt: time.Now().UTC(),
|
||||
ContentType: "", // filled on GET via extension
|
||||
}
|
||||
|
||||
if err := s.writeMeta(rec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *Store) Open(ctx context.Context, id string) (io.ReadSeekCloser, *FileRecord, error) {
|
||||
rec, err := s.GetMeta(ctx, id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f, err := os.Open(filepath.Join(s.blobDir, rec.Hash))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return f, rec, nil
|
||||
}
|
||||
|
||||
func (s *Store) GetMeta(_ context.Context, id string) (*FileRecord, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
bb, err := os.ReadFile(filepath.Join(s.metaDir, id+".json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rec FileRecord
|
||||
if err := json.Unmarshal(bb, &rec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rec, nil
|
||||
}
|
||||
|
||||
func (s *Store) UpdateMeta(_ context.Context, id string, meta map[string]string) (*FileRecord, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
path := filepath.Join(s.metaDir, id+".json")
|
||||
bb, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var rec FileRecord
|
||||
if err := json.Unmarshal(bb, &rec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rec.Meta == nil {
|
||||
rec.Meta = map[string]string{}
|
||||
}
|
||||
for k, v := range meta {
|
||||
rec.Meta[k] = v
|
||||
}
|
||||
nb, _ := json.Marshal(&rec)
|
||||
if err := os.WriteFile(path, nb, 0o600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rec, nil
|
||||
}
|
||||
|
||||
func (s *Store) Delete(_ context.Context, id string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
// Only delete metadata; GC for unreferenced blobs is a separate task
|
||||
return os.Remove(filepath.Join(s.metaDir, id+".json"))
|
||||
}
|
||||
|
||||
func (s *Store) List(_ context.Context, q string, offset, limit int) ([]*FileRecord, int, error) {
|
||||
if limit <= 0 || limit > 200 {
|
||||
limit = 50
|
||||
}
|
||||
entries, err := os.ReadDir(s.metaDir)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var items []*FileRecord
|
||||
for _, e := range entries {
|
||||
if e.IsDir() || !strings.HasSuffix(e.Name(), ".json") {
|
||||
continue
|
||||
}
|
||||
bb, err := os.ReadFile(filepath.Join(s.metaDir, e.Name()))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var rec FileRecord
|
||||
if err := json.Unmarshal(bb, &rec); err != nil {
|
||||
continue
|
||||
}
|
||||
if q == "" || strings.Contains(strings.ToLower(rec.Name), strings.ToLower(q)) {
|
||||
items = append(items, &rec)
|
||||
}
|
||||
}
|
||||
sort.Slice(items, func(i, j int) bool { return items[i].CreatedAt.After(items[j].CreatedAt) })
|
||||
end := offset + limit
|
||||
if offset > len(items) {
|
||||
return []*FileRecord{}, 0, nil
|
||||
}
|
||||
if end > len(items) {
|
||||
end = len(items)
|
||||
}
|
||||
next := 0
|
||||
if end < len(items) {
|
||||
next = end
|
||||
}
|
||||
return items[offset:end], next, nil
|
||||
}
|
||||
|
||||
// --- Chunked uploads ---
|
||||
|
||||
type UploadSession struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Meta string `json:"meta"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
||||
func (s *Store) UploadInit(_ context.Context, name, meta string) (*UploadSession, error) {
|
||||
id := newID(fmt.Sprintf("sess-%d", time.Now().UnixNano()))
|
||||
us := &UploadSession{ID: id, Name: name, Meta: meta, CreatedAt: time.Now().UTC()}
|
||||
// session file marker
|
||||
if err := os.WriteFile(filepath.Join(s.tmpDir, id+".session"), []byte(name+""+meta), 0o600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return us, nil
|
||||
}
|
||||
|
||||
func (s *Store) partPath(uid string, n int) string {
|
||||
return filepath.Join(s.tmpDir, fmt.Sprintf("%s.part.%06d", uid, n))
|
||||
}
|
||||
|
||||
func (s *Store) UploadPart(_ context.Context, uid string, n int, r io.Reader) error {
|
||||
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Create(s.partPath(uid, n))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Store) UploadComplete(ctx context.Context, uid string) (*FileRecord, error) {
|
||||
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches, _ := filepath.Glob(filepath.Join(s.tmpDir, uid+".part.*"))
|
||||
if len(matches) == 0 {
|
||||
return nil, errors.New("no parts uploaded")
|
||||
}
|
||||
sort.Strings(matches)
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
for _, p := range matches {
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
_ = pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(pw, f); err != nil {
|
||||
_ = pw.CloseWithError(err)
|
||||
_ = f.Close()
|
||||
return
|
||||
}
|
||||
_ = f.Close()
|
||||
}
|
||||
_ = pw.Close()
|
||||
}()
|
||||
// Read first line of session file for name/meta (simple format)
|
||||
bb, _ := os.ReadFile(filepath.Join(s.tmpDir, uid+".session"))
|
||||
lines := strings.SplitN(string(bb), "", 2)
|
||||
name := "file"
|
||||
meta := ""
|
||||
if len(lines) >= 1 && strings.TrimSpace(lines[0]) != "" {
|
||||
name = strings.TrimSpace(lines[0])
|
||||
}
|
||||
if len(lines) == 2 {
|
||||
meta = strings.TrimSpace(lines[1])
|
||||
}
|
||||
|
||||
rec, err := s.Put(ctx, pr, name, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range matches {
|
||||
_ = os.Remove(p)
|
||||
}
|
||||
_ = os.Remove(filepath.Join(s.tmpDir, uid+".session"))
|
||||
return rec, nil
|
||||
}
|
||||
|
||||
func (s *Store) UploadAbort(_ context.Context, uid string) error {
|
||||
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
|
||||
return err
|
||||
}
|
||||
matches, _ := filepath.Glob(filepath.Join(s.tmpDir, uid+".part.*"))
|
||||
for _, p := range matches {
|
||||
_ = os.Remove(p)
|
||||
}
|
||||
return os.Remove(filepath.Join(s.tmpDir, uid+".session"))
|
||||
}
|
||||
|
||||
// --- helpers ---
|
||||
|
||||
func (s *Store) writeMeta(rec *FileRecord) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
bb, _ := json.Marshal(rec)
|
||||
return os.WriteFile(filepath.Join(s.metaDir, rec.ID+".json"), bb, 0o600)
|
||||
}
|
||||
|
||||
func newID(seed string) string {
|
||||
h := sha256.Sum256([]byte(fmt.Sprintf("%s|%d", seed, time.Now().UnixNano())))
|
||||
return hex.EncodeToString(h[:16])
|
||||
}
|
||||
|
||||
func parseMeta(s string) map[string]string {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
m := map[string]string{}
|
||||
for _, kv := range strings.Split(s, ",") {
|
||||
kvp := strings.SplitN(kv, "=", 2)
|
||||
if len(kvp) == 2 {
|
||||
m[strings.TrimSpace(kvp[0])] = strings.TrimSpace(kvp[1])
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
Reference in New Issue
Block a user