Files
decent-webui/internal/store/store.go
jbergner b5ad30cfbd
All checks were successful
release-tag / release-image (push) Successful in 1m32s
init
2025-09-24 11:50:07 +02:00

317 lines
7.4 KiB
Go

package store
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
type Store struct {
blobDir string
metaDir string
tmpDir string
mu sync.RWMutex
}
type FileRecord struct {
ID string `json:"id"`
Name string `json:"name"`
Hash string `json:"hash"`
Size int64 `json:"size"`
Meta map[string]string `json:"meta,omitempty"`
CreatedAt time.Time `json:"createdAt"`
ContentType string `json:"contentType,omitempty"`
}
func (fr FileRecord) SafeName() string {
n := strings.TrimSpace(fr.Name)
if n == "" {
return fr.ID
}
return n
}
func Open(blobDir, metaDir, tmpDir string) (*Store, error) {
for _, p := range []string{blobDir, metaDir, tmpDir} {
if err := os.MkdirAll(p, 0o755); err != nil {
return nil, err
}
}
return &Store{blobDir: blobDir, metaDir: metaDir, tmpDir: tmpDir}, nil
}
func (s *Store) Put(ctx context.Context, r io.Reader, name, metaStr string) (*FileRecord, error) {
if name == "" {
name = "file"
}
tmp, err := os.CreateTemp(s.tmpDir, "upload-*")
if err != nil {
return nil, err
}
defer func() { tmp.Close(); os.Remove(tmp.Name()) }()
h := sha256.New()
n, err := io.Copy(io.MultiWriter(tmp, h), r)
if err != nil {
return nil, err
}
hash := hex.EncodeToString(h.Sum(nil))
blobPath := filepath.Join(s.blobDir, hash)
if _, err := os.Stat(blobPath); errors.Is(err, os.ErrNotExist) {
if err := os.Rename(tmp.Name(), blobPath); err != nil {
return nil, err
}
} else {
_ = os.Remove(tmp.Name())
}
rec := &FileRecord{
ID: newID(hash),
Name: name,
Hash: hash,
Size: n,
Meta: parseMeta(metaStr),
CreatedAt: time.Now().UTC(),
ContentType: "", // filled on GET via extension
}
if err := s.writeMeta(rec); err != nil {
return nil, err
}
return rec, nil
}
func (s *Store) Open(ctx context.Context, id string) (io.ReadSeekCloser, *FileRecord, error) {
rec, err := s.GetMeta(ctx, id)
if err != nil {
return nil, nil, err
}
f, err := os.Open(filepath.Join(s.blobDir, rec.Hash))
if err != nil {
return nil, nil, err
}
return f, rec, nil
}
func (s *Store) GetMeta(_ context.Context, id string) (*FileRecord, error) {
s.mu.RLock()
defer s.mu.RUnlock()
bb, err := os.ReadFile(filepath.Join(s.metaDir, id+".json"))
if err != nil {
return nil, err
}
var rec FileRecord
if err := json.Unmarshal(bb, &rec); err != nil {
return nil, err
}
return &rec, nil
}
func (s *Store) UpdateMeta(_ context.Context, id string, meta map[string]string) (*FileRecord, error) {
s.mu.Lock()
defer s.mu.Unlock()
path := filepath.Join(s.metaDir, id+".json")
bb, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var rec FileRecord
if err := json.Unmarshal(bb, &rec); err != nil {
return nil, err
}
if rec.Meta == nil {
rec.Meta = map[string]string{}
}
for k, v := range meta {
rec.Meta[k] = v
}
nb, _ := json.Marshal(&rec)
if err := os.WriteFile(path, nb, 0o600); err != nil {
return nil, err
}
return &rec, nil
}
func (s *Store) Delete(_ context.Context, id string) error {
s.mu.Lock()
defer s.mu.Unlock()
// Only delete metadata; GC for unreferenced blobs is a separate task
return os.Remove(filepath.Join(s.metaDir, id+".json"))
}
func (s *Store) List(_ context.Context, q string, offset, limit int) ([]*FileRecord, int, error) {
if limit <= 0 || limit > 200 {
limit = 50
}
entries, err := os.ReadDir(s.metaDir)
if err != nil {
return nil, 0, err
}
var items []*FileRecord
for _, e := range entries {
if e.IsDir() || !strings.HasSuffix(e.Name(), ".json") {
continue
}
bb, err := os.ReadFile(filepath.Join(s.metaDir, e.Name()))
if err != nil {
continue
}
var rec FileRecord
if err := json.Unmarshal(bb, &rec); err != nil {
continue
}
if q == "" || strings.Contains(strings.ToLower(rec.Name), strings.ToLower(q)) {
items = append(items, &rec)
}
}
sort.Slice(items, func(i, j int) bool { return items[i].CreatedAt.After(items[j].CreatedAt) })
end := offset + limit
if offset > len(items) {
return []*FileRecord{}, 0, nil
}
if end > len(items) {
end = len(items)
}
next := 0
if end < len(items) {
next = end
}
return items[offset:end], next, nil
}
// --- Chunked uploads ---
type UploadSession struct {
ID string `json:"id"`
Name string `json:"name"`
Meta string `json:"meta"`
CreatedAt time.Time `json:"createdAt"`
}
func (s *Store) UploadInit(_ context.Context, name, meta string) (*UploadSession, error) {
id := newID(fmt.Sprintf("sess-%d", time.Now().UnixNano()))
us := &UploadSession{ID: id, Name: name, Meta: meta, CreatedAt: time.Now().UTC()}
// session file marker
if err := os.WriteFile(filepath.Join(s.tmpDir, id+".session"), []byte(name+""+meta), 0o600); err != nil {
return nil, err
}
return us, nil
}
func (s *Store) partPath(uid string, n int) string {
return filepath.Join(s.tmpDir, fmt.Sprintf("%s.part.%06d", uid, n))
}
func (s *Store) UploadPart(_ context.Context, uid string, n int, r io.Reader) error {
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
return err
}
f, err := os.Create(s.partPath(uid, n))
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, r)
return err
}
func (s *Store) UploadComplete(ctx context.Context, uid string) (*FileRecord, error) {
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
return nil, err
}
matches, _ := filepath.Glob(filepath.Join(s.tmpDir, uid+".part.*"))
if len(matches) == 0 {
return nil, errors.New("no parts uploaded")
}
sort.Strings(matches)
pr, pw := io.Pipe()
go func() {
for _, p := range matches {
f, err := os.Open(p)
if err != nil {
_ = pw.CloseWithError(err)
return
}
if _, err := io.Copy(pw, f); err != nil {
_ = pw.CloseWithError(err)
_ = f.Close()
return
}
_ = f.Close()
}
_ = pw.Close()
}()
// Read first line of session file for name/meta (simple format)
bb, _ := os.ReadFile(filepath.Join(s.tmpDir, uid+".session"))
lines := strings.SplitN(string(bb), "", 2)
name := "file"
meta := ""
if len(lines) >= 1 && strings.TrimSpace(lines[0]) != "" {
name = strings.TrimSpace(lines[0])
}
if len(lines) == 2 {
meta = strings.TrimSpace(lines[1])
}
rec, err := s.Put(ctx, pr, name, meta)
if err != nil {
return nil, err
}
for _, p := range matches {
_ = os.Remove(p)
}
_ = os.Remove(filepath.Join(s.tmpDir, uid+".session"))
return rec, nil
}
func (s *Store) UploadAbort(_ context.Context, uid string) error {
if _, err := os.Stat(filepath.Join(s.tmpDir, uid+".session")); err != nil {
return err
}
matches, _ := filepath.Glob(filepath.Join(s.tmpDir, uid+".part.*"))
for _, p := range matches {
_ = os.Remove(p)
}
return os.Remove(filepath.Join(s.tmpDir, uid+".session"))
}
// --- helpers ---
func (s *Store) writeMeta(rec *FileRecord) error {
s.mu.Lock()
defer s.mu.Unlock()
bb, _ := json.Marshal(rec)
return os.WriteFile(filepath.Join(s.metaDir, rec.ID+".json"), bb, 0o600)
}
func newID(seed string) string {
h := sha256.Sum256([]byte(fmt.Sprintf("%s|%d", seed, time.Now().UnixNano())))
return hex.EncodeToString(h[:16])
}
func parseMeta(s string) map[string]string {
if s == "" {
return nil
}
m := map[string]string{}
for _, kv := range strings.Split(s, ",") {
kvp := strings.SplitN(kv, "=", 2)
if len(kvp) == 2 {
m[strings.TrimSpace(kvp[0])] = strings.TrimSpace(kvp[1])
}
}
return m
}