Compare commits

...

No commits in common. "archive-go" and "master" have entirely different histories.

252 changed files with 31996 additions and 1485 deletions

36
.env.example Normal file
View File

@ -0,0 +1,36 @@
# =============================================================================
# Tanabata File Manager — environment variables
# Copy to .env and fill in the values.
# =============================================================================
# ---------------------------------------------------------------------------
# Server
# ---------------------------------------------------------------------------
LISTEN_ADDR=:8080
JWT_SECRET=change-me-to-a-random-32-byte-secret
JWT_ACCESS_TTL=15m
JWT_REFRESH_TTL=720h
# ---------------------------------------------------------------------------
# Database
# ---------------------------------------------------------------------------
DATABASE_URL=postgres://tanabata:password@localhost:5432/tanabata?sslmode=disable
# ---------------------------------------------------------------------------
# Storage
# ---------------------------------------------------------------------------
FILES_PATH=/data/files
THUMBS_CACHE_PATH=/data/thumbs
# ---------------------------------------------------------------------------
# Thumbnails
# ---------------------------------------------------------------------------
THUMB_WIDTH=160
THUMB_HEIGHT=160
PREVIEW_WIDTH=1920
PREVIEW_HEIGHT=1080
# ---------------------------------------------------------------------------
# Import
# ---------------------------------------------------------------------------
IMPORT_PATH=/data/import

67
.gitattributes vendored Normal file
View File

@ -0,0 +1,67 @@
# =============================================================================
# Tanabata File Manager — .gitattributes
# =============================================================================
# ---------------------------------------------------------------------------
# Line endings: normalize to LF in repo, native on checkout
# ---------------------------------------------------------------------------
* text=auto eol=lf
# ---------------------------------------------------------------------------
# Explicitly text
# ---------------------------------------------------------------------------
*.go text eol=lf
*.mod text eol=lf
*.sum text eol=lf
*.sql text eol=lf
*.ts text eol=lf
*.js text eol=lf
*.svelte text eol=lf
*.html text eol=lf
*.css text eol=lf
*.json text eol=lf
*.yaml text eol=lf
*.yml text eol=lf
*.md text eol=lf
*.txt text eol=lf
*.env* text eol=lf
*.sh text eol=lf
*.toml text eol=lf
*.xml text eol=lf
*.svg text eol=lf
Dockerfile text eol=lf
Makefile text eol=lf
# ---------------------------------------------------------------------------
# Explicitly binary
# ---------------------------------------------------------------------------
*.png binary
*.jpg binary
*.jpeg binary
*.gif binary
*.webp binary
*.ico binary
*.ttf binary
*.woff binary
*.woff2 binary
*.eot binary
*.otf binary
*.zip binary
*.gz binary
*.tar binary
# ---------------------------------------------------------------------------
# Diff behavior
# ---------------------------------------------------------------------------
*.sql diff=sql
*.go diff=golang
*.css diff=css
*.html diff=html
# ---------------------------------------------------------------------------
# Linguist: set repo language stats correctly
# ---------------------------------------------------------------------------
docs/reference/** linguist-documentation
frontend/static/** linguist-vendored
*.min.js linguist-vendored
*.min.css linguist-vendored

86
.gitignore vendored Normal file
View File

@ -0,0 +1,86 @@
# =============================================================================
# Tanabata File Manager — .gitignore
# =============================================================================
# ---------------------------------------------------------------------------
# Environment & secrets
# ---------------------------------------------------------------------------
.env
.env.local
.env.*.local
*.pem
*.key
# ---------------------------------------------------------------------------
# OS
# ---------------------------------------------------------------------------
.DS_Store
Thumbs.db
Desktop.ini
*.swp
*.swo
*~
# ---------------------------------------------------------------------------
# IDE
# ---------------------------------------------------------------------------
.vscode/*
!.vscode/extensions.json
!.vscode/settings.json
.idea/
*.iml
*.sublime-project
*.sublime-workspace
# ---------------------------------------------------------------------------
# Backend (Go)
# ---------------------------------------------------------------------------
backend/tmp/
backend/cmd/server/server
*.exe
*.exe~
*.dll
*.so
*.dylib
*.test
*.out
*.prof
coverage.out
coverage.html
# ---------------------------------------------------------------------------
# Frontend (SvelteKit / Node)
# ---------------------------------------------------------------------------
frontend/node_modules/
frontend/.svelte-kit/
frontend/build/
frontend/dist/
frontend/src/lib/api/schema.ts
# ---------------------------------------------------------------------------
# Docker
# ---------------------------------------------------------------------------
docker-compose.override.yml
# ---------------------------------------------------------------------------
# Data directories (runtime, not in repo)
# ---------------------------------------------------------------------------
data/
*.sqlite
*.sqlite3
# ---------------------------------------------------------------------------
# Misc
# ---------------------------------------------------------------------------
*.log
*.pid
*.seed
# ---------------------------------------------------------------------------
# Reference: exclude vendored libs, keep design sources
# ---------------------------------------------------------------------------
docs/reference/**/bootstrap.min.css
docs/reference/**/bootstrap.min.css.map
docs/reference/**/jquery-*.min.js
docs/reference/**/__pycache__/
docs/reference/**/*.pyc

58
CLAUDE.md Normal file
View File

@ -0,0 +1,58 @@
# Tanabata File Manager
Multi-user, tag-based web file manager for images and video.
## Architecture
Monorepo: `backend/` (Go) + `frontend/` (SvelteKit).
- Backend: Go + Gin + pgx v5 + goose migrations. Clean Architecture.
- Frontend: SvelteKit SPA + Tailwind CSS + CSS custom properties.
- DB: PostgreSQL 14+.
- Auth: JWT Bearer tokens.
## Key documents (read before coding)
- `openapi.yaml` — full REST API specification (36 paths, 58 operations)
- `docs/GO_PROJECT_STRUCTURE.md` — backend architecture, layer rules, DI pattern
- `docs/FRONTEND_STRUCTURE.md` — frontend architecture, CSS approach, API client
- `docs/Описание.md` — product requirements in Russian
- `backend/migrations/001_init.sql` — database schema (4 schemas, 16 tables)
## Design reference
The `docs/reference/` directory contains the previous Python/Flask version.
Use its visual design as the basis for the new frontend:
- Color palette: #312F45 (bg), #9592B5 (accent), #444455 (tag default), #111118 (elevated)
- Font: Epilogue (variable weight)
- Dark theme is primary
- Mobile-first layout with bottom navbar
- 160×160 thumbnail grid for files
- Colored tag pills
- Floating selection bar for multi-select
## Backend commands
```bash
cd backend
go run ./cmd/server # run dev server
go test ./... # run all tests
```
## Frontend commands
```bash
cd frontend
npm run dev # vite dev server
npm run build # production build
npm run generate:types # regenerate API types from openapi.yaml
```
## Conventions
- Go: gofmt, no global state, context.Context as first param in all service methods
- TypeScript: strict mode, named exports
- SQL: snake_case, all migrations via goose
- API errors: { code, message, details? }
- Git: conventional commits with scope — `type(scope): message`
- `(backend)` for Go backend code
- `(frontend)` for SvelteKit/TypeScript code
- `(project)` for root-level files (.gitignore, docs/reference, structure)

119
backend/cmd/server/main.go Normal file
View File

@ -0,0 +1,119 @@
package main
import (
"context"
"log/slog"
"os"
"github.com/jackc/pgx/v5/stdlib"
"github.com/pressly/goose/v3"
"tanabata/backend/internal/config"
"tanabata/backend/internal/db/postgres"
"tanabata/backend/internal/handler"
"tanabata/backend/internal/service"
"tanabata/backend/internal/storage"
"tanabata/backend/migrations"
)
func main() {
cfg, err := config.Load()
if err != nil {
slog.Error("failed to load config", "err", err)
os.Exit(1)
}
pool, err := postgres.NewPool(context.Background(), cfg.DatabaseURL)
if err != nil {
slog.Error("failed to connect to database", "err", err)
os.Exit(1)
}
defer pool.Close()
slog.Info("database connected")
migDB := stdlib.OpenDBFromPool(pool)
goose.SetBaseFS(migrations.FS)
if err := goose.SetDialect("postgres"); err != nil {
slog.Error("goose dialect error", "err", err)
os.Exit(1)
}
if err := goose.Up(migDB, "."); err != nil {
slog.Error("migrations failed", "err", err)
os.Exit(1)
}
migDB.Close()
slog.Info("migrations applied")
// Storage
diskStorage, err := storage.NewDiskStorage(
cfg.FilesPath,
cfg.ThumbsCachePath,
cfg.ThumbWidth, cfg.ThumbHeight,
cfg.PreviewWidth, cfg.PreviewHeight,
)
if err != nil {
slog.Error("failed to initialise storage", "err", err)
os.Exit(1)
}
// Repositories
userRepo := postgres.NewUserRepo(pool)
sessionRepo := postgres.NewSessionRepo(pool)
fileRepo := postgres.NewFileRepo(pool)
mimeRepo := postgres.NewMimeRepo(pool)
aclRepo := postgres.NewACLRepo(pool)
auditRepo := postgres.NewAuditRepo(pool)
tagRepo := postgres.NewTagRepo(pool)
tagRuleRepo := postgres.NewTagRuleRepo(pool)
categoryRepo := postgres.NewCategoryRepo(pool)
poolRepo := postgres.NewPoolRepo(pool)
transactor := postgres.NewTransactor(pool)
// Services
authSvc := service.NewAuthService(
userRepo,
sessionRepo,
cfg.JWTSecret,
cfg.JWTAccessTTL,
cfg.JWTRefreshTTL,
)
aclSvc := service.NewACLService(aclRepo)
auditSvc := service.NewAuditService(auditRepo)
tagSvc := service.NewTagService(tagRepo, tagRuleRepo, aclSvc, auditSvc, transactor)
categorySvc := service.NewCategoryService(categoryRepo, tagRepo, aclSvc, auditSvc)
poolSvc := service.NewPoolService(poolRepo, aclSvc, auditSvc)
fileSvc := service.NewFileService(
fileRepo,
mimeRepo,
diskStorage,
aclSvc,
auditSvc,
tagSvc,
transactor,
cfg.ImportPath,
)
userSvc := service.NewUserService(userRepo, auditSvc)
// Handlers
authMiddleware := handler.NewAuthMiddleware(authSvc)
authHandler := handler.NewAuthHandler(authSvc)
fileHandler := handler.NewFileHandler(fileSvc, tagSvc)
tagHandler := handler.NewTagHandler(tagSvc, fileSvc)
categoryHandler := handler.NewCategoryHandler(categorySvc)
poolHandler := handler.NewPoolHandler(poolSvc)
userHandler := handler.NewUserHandler(userSvc)
aclHandler := handler.NewACLHandler(aclSvc)
auditHandler := handler.NewAuditHandler(auditSvc)
r := handler.NewRouter(
authMiddleware, authHandler,
fileHandler, tagHandler, categoryHandler, poolHandler,
userHandler, aclHandler, auditHandler,
)
slog.Info("starting server", "addr", cfg.ListenAddr)
if err := r.Run(cfg.ListenAddr); err != nil {
slog.Error("server error", "err", err)
os.Exit(1)
}
}

View File

@ -1,19 +1,104 @@
module tanabata module tanabata/backend
go 1.23.0 go 1.26
toolchain go1.23.10 toolchain go1.26.1
require github.com/jackc/pgx/v5 v5.7.5
require ( require (
github.com/disintegration/imaging v1.6.2
github.com/gabriel-vasile/mimetype v1.4.12
github.com/gin-gonic/gin v1.9.1
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/google/uuid v1.6.0
github.com/jackc/pgx/v5 v5.5.5
github.com/joho/godotenv v1.5.1
github.com/pressly/goose/v3 v3.21.1
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
github.com/stretchr/testify v1.11.1
github.com/testcontainers/testcontainers-go v0.41.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0
golang.org/x/crypto v0.48.0
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8
)
require (
dario.cat/mergo v1.0.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/bytedance/sonic v1.15.0 // indirect
github.com/bytedance/sonic/loader v0.5.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v28.5.2+incompatible // indirect
github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/ebitengine/purego v0.10.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.22.0 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx v3.6.2+incompatible // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.2 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.10 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/go-archive v0.2.0 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/stretchr/testify v1.9.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
golang.org/x/crypto v0.37.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
golang.org/x/sync v0.13.0 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect
golang.org/x/text v0.24.0 // indirect github.com/shirou/gopsutil/v4 v4.26.2 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/tklauser/go-sysconf v0.3.16 // indirect
github.com/tklauser/numcpus v0.11.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.3.1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.41.0 // indirect
go.opentelemetry.io/otel/metric v1.41.0 // indirect
go.opentelemetry.io/otel/trace v1.41.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.22.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.41.0 // indirect
golang.org/x/text v0.34.0 // indirect
google.golang.org/grpc v1.79.1 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

View File

@ -1,32 +1,275 @@
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE=
github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k=
github.com/bytedance/sonic/loader v0.5.0 h1:gXH3KVnatgY7loH5/TkeVyXPfESoqSBSBEiDd5VjlgE=
github.com/bytedance/sonic/loader v0.5.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o=
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8=
github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pressly/goose/v3 v3.21.1 h1:5SSAKKWej8LVVzNLuT6KIvP1eFDuPvxa+B6H0w78buQ=
github.com/pressly/goose/v3 v3.21.1/go.mod h1:sqthmzV8PitchEkjecFJII//l43dLOCzfWh8pHEe+vE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=
github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE=
github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas=
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= github.com/testcontainers/testcontainers-go v0.41.0 h1:mfpsD0D36YgkxGj2LrIyxuwQ9i2wCKAD+ESsYM1wais=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= github.com/testcontainers/testcontainers-go v0.41.0/go.mod h1:pdFrEIfaPl24zmBjerWTTYaY0M6UHsqA1YSvsoU40MI=
github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0 h1:AOtFXssrDlLm84A2sTTR/AhvJiYbrIuCO59d+Ro9Tb0=
github.com/testcontainers/testcontainers-go/modules/postgres v0.41.0/go.mod h1:k2a09UKhgSp6vNpliIY0QSgm4Hi7GXVTzWvWgUemu/8=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0 h1:inYW9ZhgqiDqh6BioM7DVHHzEGVq76Db5897WLGZ5Go=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.41.0/go.mod h1:Izur+Wt8gClgMJqO/cZ8wdeeMryJ/xxiOVgFSSfpDTY=
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=
modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/sqlite v1.29.6 h1:0lOXGrycJPptfHDuohfYgNqoe4hu+gYuN/pKgY5XjS4=
modernc.org/sqlite v1.29.6/go.mod h1:S02dvcmm7TnTRvGhv8IGYyLnIt7AS2KPaB1F/71p75U=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

View File

@ -0,0 +1,107 @@
package config
import (
"errors"
"fmt"
"os"
"strconv"
"time"
"github.com/joho/godotenv"
)
// Config holds all application configuration loaded from environment variables.
type Config struct {
// Server
ListenAddr string
JWTSecret string
JWTAccessTTL time.Duration
JWTRefreshTTL time.Duration
// Database
DatabaseURL string
// Storage
FilesPath string
ThumbsCachePath string
// Thumbnails
ThumbWidth int
ThumbHeight int
PreviewWidth int
PreviewHeight int
// Import
ImportPath string
}
// Load reads a .env file (if present) then loads all configuration from
// environment variables. Returns an error listing every missing or invalid var.
func Load() (*Config, error) {
// Non-fatal: .env may not exist in production.
_ = godotenv.Load()
var errs []error
requireStr := func(key string) string {
v := os.Getenv(key)
if v == "" {
errs = append(errs, fmt.Errorf("%s is required", key))
}
return v
}
defaultStr := func(key, def string) string {
if v := os.Getenv(key); v != "" {
return v
}
return def
}
parseDuration := func(key, def string) time.Duration {
raw := defaultStr(key, def)
d, err := time.ParseDuration(raw)
if err != nil {
errs = append(errs, fmt.Errorf("%s: invalid duration %q: %w", key, raw, err))
return 0
}
return d
}
parseInt := func(key string, def int) int {
raw := os.Getenv(key)
if raw == "" {
return def
}
n, err := strconv.Atoi(raw)
if err != nil {
errs = append(errs, fmt.Errorf("%s: invalid integer %q: %w", key, raw, err))
return def
}
return n
}
cfg := &Config{
ListenAddr: defaultStr("LISTEN_ADDR", ":8080"),
JWTSecret: requireStr("JWT_SECRET"),
JWTAccessTTL: parseDuration("JWT_ACCESS_TTL", "15m"),
JWTRefreshTTL: parseDuration("JWT_REFRESH_TTL", "720h"),
DatabaseURL: requireStr("DATABASE_URL"),
FilesPath: requireStr("FILES_PATH"),
ThumbsCachePath: requireStr("THUMBS_CACHE_PATH"),
ThumbWidth: parseInt("THUMB_WIDTH", 160),
ThumbHeight: parseInt("THUMB_HEIGHT", 160),
PreviewWidth: parseInt("PREVIEW_WIDTH", 1920),
PreviewHeight: parseInt("PREVIEW_HEIGHT", 1080),
ImportPath: requireStr("IMPORT_PATH"),
}
if len(errs) > 0 {
return nil, errors.Join(errs...)
}
return cfg, nil
}

70
backend/internal/db/db.go Normal file
View File

@ -0,0 +1,70 @@
// Package db provides shared helpers used by all database adapters.
package db
import (
"context"
"fmt"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
)
// txKey is the context key used to store an active transaction.
type txKey struct{}
// TxFromContext returns the pgx.Tx stored in ctx by the Transactor, along
// with a boolean indicating whether a transaction is active.
func TxFromContext(ctx context.Context) (pgx.Tx, bool) {
tx, ok := ctx.Value(txKey{}).(pgx.Tx)
return tx, ok
}
// ContextWithTx returns a copy of ctx that carries tx.
// Called by the Transactor before invoking the user function.
func ContextWithTx(ctx context.Context, tx pgx.Tx) context.Context {
return context.WithValue(ctx, txKey{}, tx)
}
// Querier is the common query interface satisfied by both *pgxpool.Pool and
// pgx.Tx, allowing repo helpers to work with either.
type Querier interface {
QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error)
Exec(ctx context.Context, sql string, args ...any) (pgconn.CommandTag, error)
}
// ScanRow executes a single-row query against q and scans the result using
// scan. It wraps pgx.ErrNoRows so callers can detect missing rows without
// importing pgx directly.
func ScanRow[T any](ctx context.Context, q Querier, sql string, args []any, scan func(pgx.Row) (T, error)) (T, error) {
row := q.QueryRow(ctx, sql, args...)
val, err := scan(row)
if err != nil {
var zero T
if err == pgx.ErrNoRows {
return zero, fmt.Errorf("%w", pgx.ErrNoRows)
}
return zero, fmt.Errorf("ScanRow: %w", err)
}
return val, nil
}
// ClampLimit enforces the [1, max] range on limit, returning def when limit
// is zero or negative.
func ClampLimit(limit, def, max int) int {
if limit <= 0 {
return def
}
if limit > max {
return max
}
return limit
}
// ClampOffset returns 0 for negative offsets.
func ClampOffset(offset int) int {
if offset < 0 {
return 0
}
return offset
}

View File

@ -0,0 +1,118 @@
package postgres
import (
"context"
"errors"
"fmt"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
type permissionRow struct {
UserID int16 `db:"user_id"`
UserName string `db:"user_name"`
ObjectTypeID int16 `db:"object_type_id"`
ObjectID uuid.UUID `db:"object_id"`
CanView bool `db:"can_view"`
CanEdit bool `db:"can_edit"`
}
func toPermission(r permissionRow) domain.Permission {
return domain.Permission{
UserID: r.UserID,
UserName: r.UserName,
ObjectTypeID: r.ObjectTypeID,
ObjectID: r.ObjectID,
CanView: r.CanView,
CanEdit: r.CanEdit,
}
}
// ACLRepo implements port.ACLRepo using PostgreSQL.
type ACLRepo struct {
pool *pgxpool.Pool
}
// NewACLRepo creates an ACLRepo backed by pool.
func NewACLRepo(pool *pgxpool.Pool) *ACLRepo {
return &ACLRepo{pool: pool}
}
var _ port.ACLRepo = (*ACLRepo)(nil)
func (r *ACLRepo) List(ctx context.Context, objectTypeID int16, objectID uuid.UUID) ([]domain.Permission, error) {
const sql = `
SELECT p.user_id, u.name AS user_name, p.object_type_id, p.object_id,
p.can_view, p.can_edit
FROM acl.permissions p
JOIN core.users u ON u.id = p.user_id
WHERE p.object_type_id = $1 AND p.object_id = $2
ORDER BY u.name`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, objectTypeID, objectID)
if err != nil {
return nil, fmt.Errorf("ACLRepo.List: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[permissionRow])
if err != nil {
return nil, fmt.Errorf("ACLRepo.List scan: %w", err)
}
perms := make([]domain.Permission, len(collected))
for i, row := range collected {
perms[i] = toPermission(row)
}
return perms, nil
}
func (r *ACLRepo) Get(ctx context.Context, userID int16, objectTypeID int16, objectID uuid.UUID) (*domain.Permission, error) {
const sql = `
SELECT p.user_id, u.name AS user_name, p.object_type_id, p.object_id,
p.can_view, p.can_edit
FROM acl.permissions p
JOIN core.users u ON u.id = p.user_id
WHERE p.user_id = $1 AND p.object_type_id = $2 AND p.object_id = $3`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, userID, objectTypeID, objectID)
if err != nil {
return nil, fmt.Errorf("ACLRepo.Get: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[permissionRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("ACLRepo.Get scan: %w", err)
}
p := toPermission(row)
return &p, nil
}
func (r *ACLRepo) Set(ctx context.Context, objectTypeID int16, objectID uuid.UUID, perms []domain.Permission) error {
q := connOrTx(ctx, r.pool)
const del = `DELETE FROM acl.permissions WHERE object_type_id = $1 AND object_id = $2`
if _, err := q.Exec(ctx, del, objectTypeID, objectID); err != nil {
return fmt.Errorf("ACLRepo.Set delete: %w", err)
}
if len(perms) == 0 {
return nil
}
const ins = `
INSERT INTO acl.permissions (user_id, object_type_id, object_id, can_view, can_edit)
VALUES ($1, $2, $3, $4, $5)`
for _, p := range perms {
if _, err := q.Exec(ctx, ins, p.UserID, objectTypeID, objectID, p.CanView, p.CanEdit); err != nil {
return fmt.Errorf("ACLRepo.Set insert: %w", err)
}
}
return nil
}

View File

@ -0,0 +1,169 @@
package postgres
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// auditRowWithTotal matches the columns returned by the audit log SELECT.
// object_type is nullable (LEFT JOIN), object_id and details are nullable columns.
type auditRowWithTotal struct {
ID int64 `db:"id"`
UserID int16 `db:"user_id"`
UserName string `db:"user_name"`
Action string `db:"action"`
ObjectType *string `db:"object_type"`
ObjectID *uuid.UUID `db:"object_id"`
Details json.RawMessage `db:"details"`
PerformedAt time.Time `db:"performed_at"`
Total int `db:"total"`
}
func toAuditEntry(r auditRowWithTotal) domain.AuditEntry {
return domain.AuditEntry{
ID: r.ID,
UserID: r.UserID,
UserName: r.UserName,
Action: r.Action,
ObjectType: r.ObjectType,
ObjectID: r.ObjectID,
Details: r.Details,
PerformedAt: r.PerformedAt,
}
}
// AuditRepo implements port.AuditRepo using PostgreSQL.
type AuditRepo struct {
pool *pgxpool.Pool
}
// NewAuditRepo creates an AuditRepo backed by pool.
func NewAuditRepo(pool *pgxpool.Pool) *AuditRepo {
return &AuditRepo{pool: pool}
}
var _ port.AuditRepo = (*AuditRepo)(nil)
// Log inserts one audit record. action_type_id and object_type_id are resolved
// from the reference tables inside the INSERT via subqueries.
func (r *AuditRepo) Log(ctx context.Context, entry domain.AuditEntry) error {
const sql = `
INSERT INTO activity.audit_log
(user_id, action_type_id, object_type_id, object_id, details)
VALUES (
$1,
(SELECT id FROM activity.action_types WHERE name = $2),
CASE WHEN $3::text IS NOT NULL
THEN (SELECT id FROM core.object_types WHERE name = $3)
ELSE NULL END,
$4,
$5
)`
q := connOrTx(ctx, r.pool)
_, err := q.Exec(ctx, sql,
entry.UserID,
entry.Action,
entry.ObjectType,
entry.ObjectID,
entry.Details,
)
if err != nil {
return fmt.Errorf("AuditRepo.Log: %w", err)
}
return nil
}
// List returns a filtered, offset-paginated page of audit log entries ordered
// newest-first.
func (r *AuditRepo) List(ctx context.Context, filter domain.AuditFilter) (*domain.AuditPage, error) {
var conds []string
args := make([]any, 0, 8)
n := 1
if filter.UserID != nil {
conds = append(conds, fmt.Sprintf("a.user_id = $%d", n))
args = append(args, *filter.UserID)
n++
}
if filter.Action != "" {
conds = append(conds, fmt.Sprintf("at.name = $%d", n))
args = append(args, filter.Action)
n++
}
if filter.ObjectType != "" {
conds = append(conds, fmt.Sprintf("ot.name = $%d", n))
args = append(args, filter.ObjectType)
n++
}
if filter.ObjectID != nil {
conds = append(conds, fmt.Sprintf("a.object_id = $%d", n))
args = append(args, *filter.ObjectID)
n++
}
if filter.From != nil {
conds = append(conds, fmt.Sprintf("a.performed_at >= $%d", n))
args = append(args, *filter.From)
n++
}
if filter.To != nil {
conds = append(conds, fmt.Sprintf("a.performed_at <= $%d", n))
args = append(args, *filter.To)
n++
}
where := ""
if len(conds) > 0 {
where = "WHERE " + strings.Join(conds, " AND ")
}
limit := db.ClampLimit(filter.Limit, 50, 200)
offset := db.ClampOffset(filter.Offset)
args = append(args, limit, offset)
sql := fmt.Sprintf(`
SELECT a.id, a.user_id, u.name AS user_name,
at.name AS action,
ot.name AS object_type,
a.object_id, a.details,
a.performed_at,
COUNT(*) OVER() AS total
FROM activity.audit_log a
JOIN core.users u ON u.id = a.user_id
JOIN activity.action_types at ON at.id = a.action_type_id
LEFT JOIN core.object_types ot ON ot.id = a.object_type_id
%s
ORDER BY a.performed_at DESC
LIMIT $%d OFFSET $%d`, where, n, n+1)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, args...)
if err != nil {
return nil, fmt.Errorf("AuditRepo.List: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[auditRowWithTotal])
if err != nil {
return nil, fmt.Errorf("AuditRepo.List scan: %w", err)
}
page := &domain.AuditPage{Offset: offset, Limit: limit}
if len(collected) > 0 {
page.Total = collected[0].Total
}
page.Items = make([]domain.AuditEntry, len(collected))
for i, row := range collected {
page.Items[i] = toAuditEntry(row)
}
return page, nil
}

View File

@ -0,0 +1,297 @@
package postgres
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ---------------------------------------------------------------------------
// Row struct
// ---------------------------------------------------------------------------
type categoryRow struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
Notes *string `db:"notes"`
Color *string `db:"color"`
Metadata []byte `db:"metadata"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
}
type categoryRowWithTotal struct {
categoryRow
Total int `db:"total"`
}
// ---------------------------------------------------------------------------
// Converter
// ---------------------------------------------------------------------------
func toCategory(r categoryRow) domain.Category {
c := domain.Category{
ID: r.ID,
Name: r.Name,
Notes: r.Notes,
Color: r.Color,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
if len(r.Metadata) > 0 && string(r.Metadata) != "null" {
c.Metadata = json.RawMessage(r.Metadata)
}
return c
}
// ---------------------------------------------------------------------------
// Shared SQL
// ---------------------------------------------------------------------------
const categorySelectFrom = `
SELECT
c.id,
c.name,
c.notes,
c.color,
c.metadata,
c.creator_id,
u.name AS creator_name,
c.is_public
FROM data.categories c
JOIN core.users u ON u.id = c.creator_id`
func categorySortColumn(s string) string {
if s == "name" {
return "c.name"
}
return "c.id" // "created"
}
// ---------------------------------------------------------------------------
// CategoryRepo — implements port.CategoryRepo
// ---------------------------------------------------------------------------
// CategoryRepo handles category CRUD using PostgreSQL.
type CategoryRepo struct {
pool *pgxpool.Pool
}
var _ port.CategoryRepo = (*CategoryRepo)(nil)
// NewCategoryRepo creates a CategoryRepo backed by pool.
func NewCategoryRepo(pool *pgxpool.Pool) *CategoryRepo {
return &CategoryRepo{pool: pool}
}
// ---------------------------------------------------------------------------
// List
// ---------------------------------------------------------------------------
func (r *CategoryRepo) List(ctx context.Context, params port.OffsetParams) (*domain.CategoryOffsetPage, error) {
order := "ASC"
if strings.ToLower(params.Order) == "desc" {
order = "DESC"
}
sortCol := categorySortColumn(params.Sort)
args := []any{}
n := 1
var conditions []string
if params.Search != "" {
conditions = append(conditions, fmt.Sprintf("lower(c.name) LIKE lower($%d)", n))
args = append(args, "%"+params.Search+"%")
n++
}
where := ""
if len(conditions) > 0 {
where = "WHERE " + strings.Join(conditions, " AND ")
}
limit := params.Limit
if limit <= 0 {
limit = 50
}
offset := params.Offset
if offset < 0 {
offset = 0
}
query := fmt.Sprintf(`
SELECT
c.id, c.name, c.notes, c.color, c.metadata,
c.creator_id, u.name AS creator_name, c.is_public,
COUNT(*) OVER() AS total
FROM data.categories c
JOIN core.users u ON u.id = c.creator_id
%s
ORDER BY %s %s NULLS LAST, c.id ASC
LIMIT $%d OFFSET $%d`, where, sortCol, order, n, n+1)
args = append(args, limit, offset)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("CategoryRepo.List query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[categoryRowWithTotal])
if err != nil {
return nil, fmt.Errorf("CategoryRepo.List scan: %w", err)
}
items := make([]domain.Category, len(collected))
total := 0
for i, row := range collected {
items[i] = toCategory(row.categoryRow)
total = row.Total
}
return &domain.CategoryOffsetPage{
Items: items,
Total: total,
Offset: offset,
Limit: limit,
}, nil
}
// ---------------------------------------------------------------------------
// GetByID
// ---------------------------------------------------------------------------
func (r *CategoryRepo) GetByID(ctx context.Context, id uuid.UUID) (*domain.Category, error) {
const query = categorySelectFrom + `
WHERE c.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id)
if err != nil {
return nil, fmt.Errorf("CategoryRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[categoryRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("CategoryRepo.GetByID scan: %w", err)
}
c := toCategory(row)
return &c, nil
}
// ---------------------------------------------------------------------------
// Create
// ---------------------------------------------------------------------------
func (r *CategoryRepo) Create(ctx context.Context, c *domain.Category) (*domain.Category, error) {
const query = `
WITH ins AS (
INSERT INTO data.categories (name, notes, color, metadata, creator_id, is_public)
VALUES ($1, $2, $3, $4, $5, $6)
RETURNING *
)
SELECT ins.id, ins.name, ins.notes, ins.color, ins.metadata,
ins.creator_id, u.name AS creator_name, ins.is_public
FROM ins
JOIN core.users u ON u.id = ins.creator_id`
var meta any
if len(c.Metadata) > 0 {
meta = c.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query,
c.Name, c.Notes, c.Color, meta, c.CreatorID, c.IsPublic)
if err != nil {
return nil, fmt.Errorf("CategoryRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[categoryRow])
if err != nil {
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("CategoryRepo.Create scan: %w", err)
}
created := toCategory(row)
return &created, nil
}
// ---------------------------------------------------------------------------
// Update
// ---------------------------------------------------------------------------
// Update replaces all mutable fields. The caller must merge current values
// with the patch before calling (read-then-write semantics).
func (r *CategoryRepo) Update(ctx context.Context, id uuid.UUID, c *domain.Category) (*domain.Category, error) {
const query = `
WITH upd AS (
UPDATE data.categories SET
name = $2,
notes = $3,
color = $4,
metadata = COALESCE($5, metadata),
is_public = $6
WHERE id = $1
RETURNING *
)
SELECT upd.id, upd.name, upd.notes, upd.color, upd.metadata,
upd.creator_id, u.name AS creator_name, upd.is_public
FROM upd
JOIN core.users u ON u.id = upd.creator_id`
var meta any
if len(c.Metadata) > 0 {
meta = c.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query,
id, c.Name, c.Notes, c.Color, meta, c.IsPublic)
if err != nil {
return nil, fmt.Errorf("CategoryRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[categoryRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("CategoryRepo.Update scan: %w", err)
}
updated := toCategory(row)
return &updated, nil
}
// ---------------------------------------------------------------------------
// Delete
// ---------------------------------------------------------------------------
func (r *CategoryRepo) Delete(ctx context.Context, id uuid.UUID) error {
const query = `DELETE FROM data.categories WHERE id = $1`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, query, id)
if err != nil {
return fmt.Errorf("CategoryRepo.Delete: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}

View File

@ -0,0 +1,796 @@
package postgres
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ---------------------------------------------------------------------------
// Row structs
// ---------------------------------------------------------------------------
type fileRow struct {
ID uuid.UUID `db:"id"`
OriginalName *string `db:"original_name"`
MIMEType string `db:"mime_type"`
MIMEExtension string `db:"mime_extension"`
ContentDatetime time.Time `db:"content_datetime"`
Notes *string `db:"notes"`
Metadata json.RawMessage `db:"metadata"`
EXIF json.RawMessage `db:"exif"`
PHash *int64 `db:"phash"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
IsDeleted bool `db:"is_deleted"`
}
// fileTagRow is used for both single-file and batch tag loading.
// file_id is always selected so the same struct works for both cases.
type fileTagRow struct {
FileID uuid.UUID `db:"file_id"`
ID uuid.UUID `db:"id"`
Name string `db:"name"`
Notes *string `db:"notes"`
Color *string `db:"color"`
CategoryID *uuid.UUID `db:"category_id"`
CategoryName *string `db:"category_name"`
CategoryColor *string `db:"category_color"`
Metadata json.RawMessage `db:"metadata"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
}
// anchorValRow holds the sort-column values fetched for an anchor file.
type anchorValRow struct {
ContentDatetime time.Time `db:"content_datetime"`
OriginalName string `db:"original_name"` // COALESCE(original_name,'') applied in SQL
MIMEType string `db:"mime_type"`
}
// ---------------------------------------------------------------------------
// Converters
// ---------------------------------------------------------------------------
func toFile(r fileRow) domain.File {
return domain.File{
ID: r.ID,
OriginalName: r.OriginalName,
MIMEType: r.MIMEType,
MIMEExtension: r.MIMEExtension,
ContentDatetime: r.ContentDatetime,
Notes: r.Notes,
Metadata: r.Metadata,
EXIF: r.EXIF,
PHash: r.PHash,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
IsDeleted: r.IsDeleted,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
}
func toTagFromFileTag(r fileTagRow) domain.Tag {
return domain.Tag{
ID: r.ID,
Name: r.Name,
Notes: r.Notes,
Color: r.Color,
CategoryID: r.CategoryID,
CategoryName: r.CategoryName,
CategoryColor: r.CategoryColor,
Metadata: r.Metadata,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
}
// ---------------------------------------------------------------------------
// Cursor
// ---------------------------------------------------------------------------
type fileCursor struct {
Sort string `json:"s"` // canonical sort name
Order string `json:"o"` // "ASC" or "DESC"
ID string `json:"id"` // UUID of the boundary file
Val string `json:"v"` // sort column value; empty for "created" (id IS the key)
}
func encodeCursor(c fileCursor) string {
b, _ := json.Marshal(c)
return base64.RawURLEncoding.EncodeToString(b)
}
func decodeCursor(s string) (fileCursor, error) {
b, err := base64.RawURLEncoding.DecodeString(s)
if err != nil {
return fileCursor{}, fmt.Errorf("cursor: invalid encoding")
}
var c fileCursor
if err := json.Unmarshal(b, &c); err != nil {
return fileCursor{}, fmt.Errorf("cursor: invalid format")
}
return c, nil
}
// makeCursor builds a fileCursor from a boundary row and the current sort/order.
func makeCursor(r fileRow, sort, order string) fileCursor {
var val string
switch sort {
case "content_datetime":
val = r.ContentDatetime.UTC().Format(time.RFC3339Nano)
case "original_name":
if r.OriginalName != nil {
val = *r.OriginalName
}
case "mime":
val = r.MIMEType
// "created": val is empty; f.id is the sort key.
}
return fileCursor{Sort: sort, Order: order, ID: r.ID.String(), Val: val}
}
// ---------------------------------------------------------------------------
// Sort helpers
// ---------------------------------------------------------------------------
func normSort(s string) string {
switch s {
case "content_datetime", "original_name", "mime":
return s
default:
return "created"
}
}
func normOrder(o string) string {
if strings.EqualFold(o, "asc") {
return "ASC"
}
return "DESC"
}
// buildKeysetCond returns a keyset WHERE fragment and an ORDER BY fragment.
//
// - forward=true: items after the cursor in the sort order (standard next-page)
// - forward=false: items before the cursor (previous-page); ORDER BY is reversed,
// caller must reverse the result slice after fetching
// - incl=true: include the cursor file itself (anchor case; uses ≤ / ≥)
//
// All user values are bound as parameters — no SQL injection possible.
func buildKeysetCond(
sort, order string,
forward, incl bool,
cursorID uuid.UUID, cursorVal string,
n int, args []any,
) (where, orderBy string, nextN int, outArgs []any) {
// goDown=true → want smaller values → primary comparison is "<".
// Applies for DESC+forward and ASC+backward.
goDown := (order == "DESC") == forward
var op, idOp string
if goDown {
op = "<"
if incl {
idOp = "<="
} else {
idOp = "<"
}
} else {
op = ">"
if incl {
idOp = ">="
} else {
idOp = ">"
}
}
// Effective ORDER BY direction: reversed for backward so the DB returns
// the closest items first (the ones we keep after trimming the extra).
dir := order
if !forward {
if order == "DESC" {
dir = "ASC"
} else {
dir = "DESC"
}
}
switch sort {
case "created":
// Single-column keyset: f.id (UUID v7, so ordering = chronological).
where = fmt.Sprintf("f.id %s $%d", idOp, n)
orderBy = fmt.Sprintf("f.id %s", dir)
outArgs = append(args, cursorID)
n++
case "content_datetime":
// Two-column keyset: (content_datetime, id).
// $n is referenced twice in the SQL (< and =) but passed once in args —
// PostgreSQL extended protocol allows multiple references to $N.
t, _ := time.Parse(time.RFC3339Nano, cursorVal)
where = fmt.Sprintf(
"(f.content_datetime %s $%d OR (f.content_datetime = $%d AND f.id %s $%d))",
op, n, n, idOp, n+1)
orderBy = fmt.Sprintf("f.content_datetime %s, f.id %s", dir, dir)
outArgs = append(args, t, cursorID)
n += 2
case "original_name":
// COALESCE treats NULL names as '' for stable pagination.
where = fmt.Sprintf(
"(COALESCE(f.original_name,'') %s $%d OR (COALESCE(f.original_name,'') = $%d AND f.id %s $%d))",
op, n, n, idOp, n+1)
orderBy = fmt.Sprintf("COALESCE(f.original_name,'') %s, f.id %s", dir, dir)
outArgs = append(args, cursorVal, cursorID)
n += 2
default: // "mime"
where = fmt.Sprintf(
"(mt.name %s $%d OR (mt.name = $%d AND f.id %s $%d))",
op, n, n, idOp, n+1)
orderBy = fmt.Sprintf("mt.name %s, f.id %s", dir, dir)
outArgs = append(args, cursorVal, cursorID)
n += 2
}
nextN = n
return
}
// defaultOrderBy returns the natural ORDER BY for the first page (no cursor).
func defaultOrderBy(sort, order string) string {
switch sort {
case "created":
return fmt.Sprintf("f.id %s", order)
case "content_datetime":
return fmt.Sprintf("f.content_datetime %s, f.id %s", order, order)
case "original_name":
return fmt.Sprintf("COALESCE(f.original_name,'') %s, f.id %s", order, order)
default: // "mime"
return fmt.Sprintf("mt.name %s, f.id %s", order, order)
}
}
// ---------------------------------------------------------------------------
// FileRepo
// ---------------------------------------------------------------------------
// FileRepo implements port.FileRepo using PostgreSQL.
type FileRepo struct {
pool *pgxpool.Pool
}
// NewFileRepo creates a FileRepo backed by pool.
func NewFileRepo(pool *pgxpool.Pool) *FileRepo {
return &FileRepo{pool: pool}
}
var _ port.FileRepo = (*FileRepo)(nil)
// fileSelectCTE is the SELECT appended after a CTE named "r" that exposes
// all file columns (including mime_id). Used by Create, Update, and Restore
// to get the full denormalized record in a single round-trip.
const fileSelectCTE = `
SELECT r.id, r.original_name,
mt.name AS mime_type, mt.extension AS mime_extension,
r.content_datetime, r.notes, r.metadata, r.exif, r.phash,
r.creator_id, u.name AS creator_name,
r.is_public, r.is_deleted
FROM r
JOIN core.mime_types mt ON mt.id = r.mime_id
JOIN core.users u ON u.id = r.creator_id`
// ---------------------------------------------------------------------------
// Create
// ---------------------------------------------------------------------------
// Create inserts a new file record using the ID already set on f.
// The MIME type is resolved from f.MIMEType (name string) via a subquery.
func (r *FileRepo) Create(ctx context.Context, f *domain.File) (*domain.File, error) {
const sqlStr = `
WITH r AS (
INSERT INTO data.files
(id, original_name, mime_id, content_datetime, notes, metadata, exif, phash, creator_id, is_public)
VALUES (
$1,
$2,
(SELECT id FROM core.mime_types WHERE name = $3),
$4, $5, $6, $7, $8, $9, $10
)
RETURNING id, original_name, mime_id, content_datetime, notes,
metadata, exif, phash, creator_id, is_public, is_deleted
)` + fileSelectCTE
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr,
f.ID, f.OriginalName, f.MIMEType, f.ContentDatetime,
f.Notes, f.Metadata, f.EXIF, f.PHash,
f.CreatorID, f.IsPublic,
)
if err != nil {
return nil, fmt.Errorf("FileRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[fileRow])
if err != nil {
return nil, fmt.Errorf("FileRepo.Create scan: %w", err)
}
created := toFile(row)
return &created, nil
}
// ---------------------------------------------------------------------------
// GetByID
// ---------------------------------------------------------------------------
func (r *FileRepo) GetByID(ctx context.Context, id uuid.UUID) (*domain.File, error) {
const sqlStr = `
SELECT f.id, f.original_name,
mt.name AS mime_type, mt.extension AS mime_extension,
f.content_datetime, f.notes, f.metadata, f.exif, f.phash,
f.creator_id, u.name AS creator_name,
f.is_public, f.is_deleted
FROM data.files f
JOIN core.mime_types mt ON mt.id = f.mime_id
JOIN core.users u ON u.id = f.creator_id
WHERE f.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, id)
if err != nil {
return nil, fmt.Errorf("FileRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[fileRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("FileRepo.GetByID scan: %w", err)
}
f := toFile(row)
tags, err := r.ListTags(ctx, id)
if err != nil {
return nil, err
}
f.Tags = tags
return &f, nil
}
// ---------------------------------------------------------------------------
// Update
// ---------------------------------------------------------------------------
// Update applies editable metadata fields. MIME type and EXIF are immutable.
func (r *FileRepo) Update(ctx context.Context, id uuid.UUID, f *domain.File) (*domain.File, error) {
const sqlStr = `
WITH r AS (
UPDATE data.files
SET original_name = $2,
content_datetime = $3,
notes = $4,
metadata = $5,
is_public = $6
WHERE id = $1
RETURNING id, original_name, mime_id, content_datetime, notes,
metadata, exif, phash, creator_id, is_public, is_deleted
)` + fileSelectCTE
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr,
id, f.OriginalName, f.ContentDatetime,
f.Notes, f.Metadata, f.IsPublic,
)
if err != nil {
return nil, fmt.Errorf("FileRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[fileRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("FileRepo.Update scan: %w", err)
}
updated := toFile(row)
tags, err := r.ListTags(ctx, id)
if err != nil {
return nil, err
}
updated.Tags = tags
return &updated, nil
}
// ---------------------------------------------------------------------------
// SoftDelete / Restore / DeletePermanent
// ---------------------------------------------------------------------------
// SoftDelete moves a file to trash (is_deleted = true). Returns ErrNotFound
// if the file does not exist or is already in trash.
func (r *FileRepo) SoftDelete(ctx context.Context, id uuid.UUID) error {
const sqlStr = `UPDATE data.files SET is_deleted = true WHERE id = $1 AND is_deleted = false`
q := connOrTx(ctx, r.pool)
tag, err := q.Exec(ctx, sqlStr, id)
if err != nil {
return fmt.Errorf("FileRepo.SoftDelete: %w", err)
}
if tag.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
// Restore moves a file out of trash (is_deleted = false). Returns ErrNotFound
// if the file does not exist or is not in trash.
func (r *FileRepo) Restore(ctx context.Context, id uuid.UUID) (*domain.File, error) {
const sqlStr = `
WITH r AS (
UPDATE data.files
SET is_deleted = false
WHERE id = $1 AND is_deleted = true
RETURNING id, original_name, mime_id, content_datetime, notes,
metadata, exif, phash, creator_id, is_public, is_deleted
)` + fileSelectCTE
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, id)
if err != nil {
return nil, fmt.Errorf("FileRepo.Restore: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[fileRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("FileRepo.Restore scan: %w", err)
}
restored := toFile(row)
tags, err := r.ListTags(ctx, id)
if err != nil {
return nil, err
}
restored.Tags = tags
return &restored, nil
}
// DeletePermanent removes a file record permanently. Only allowed when the
// file is already in trash (is_deleted = true).
func (r *FileRepo) DeletePermanent(ctx context.Context, id uuid.UUID) error {
const sqlStr = `DELETE FROM data.files WHERE id = $1 AND is_deleted = true`
q := connOrTx(ctx, r.pool)
tag, err := q.Exec(ctx, sqlStr, id)
if err != nil {
return fmt.Errorf("FileRepo.DeletePermanent: %w", err)
}
if tag.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
// ---------------------------------------------------------------------------
// ListTags / SetTags
// ---------------------------------------------------------------------------
// ListTags returns all tags assigned to a file, ordered by tag name.
func (r *FileRepo) ListTags(ctx context.Context, fileID uuid.UUID) ([]domain.Tag, error) {
m, err := r.loadTagsBatch(ctx, []uuid.UUID{fileID})
if err != nil {
return nil, err
}
return m[fileID], nil
}
// SetTags replaces all tags on a file (full replace semantics).
func (r *FileRepo) SetTags(ctx context.Context, fileID uuid.UUID, tagIDs []uuid.UUID) error {
q := connOrTx(ctx, r.pool)
const del = `DELETE FROM data.file_tag WHERE file_id = $1`
if _, err := q.Exec(ctx, del, fileID); err != nil {
return fmt.Errorf("FileRepo.SetTags delete: %w", err)
}
if len(tagIDs) == 0 {
return nil
}
const ins = `INSERT INTO data.file_tag (file_id, tag_id) VALUES ($1, $2)`
for _, tagID := range tagIDs {
if _, err := q.Exec(ctx, ins, fileID, tagID); err != nil {
return fmt.Errorf("FileRepo.SetTags insert: %w", err)
}
}
return nil
}
// ---------------------------------------------------------------------------
// List
// ---------------------------------------------------------------------------
// List returns a cursor-paginated page of files.
//
// Pagination is keyset-based for stable performance on large tables.
// Cursor encodes the sort position; the caller provides direction.
// Anchor mode centres the result around a specific file UUID.
func (r *FileRepo) List(ctx context.Context, params domain.FileListParams) (*domain.FilePage, error) {
sort := normSort(params.Sort)
order := normOrder(params.Order)
forward := params.Direction != "backward"
limit := db.ClampLimit(params.Limit, 50, 200)
// --- resolve cursor / anchor ---
var (
cursorID uuid.UUID
cursorVal string
hasCursor bool
isAnchor bool
)
switch {
case params.Cursor != "":
cur, err := decodeCursor(params.Cursor)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
id, err := uuid.Parse(cur.ID)
if err != nil {
return nil, domain.ErrValidation
}
// Lock in the sort/order encoded in the cursor so changing query
// parameters mid-session doesn't corrupt pagination.
sort = normSort(cur.Sort)
order = normOrder(cur.Order)
cursorID = id
cursorVal = cur.Val
hasCursor = true
case params.Anchor != nil:
av, err := r.fetchAnchorVals(ctx, *params.Anchor)
if err != nil {
return nil, err
}
cursorID = *params.Anchor
switch sort {
case "content_datetime":
cursorVal = av.ContentDatetime.UTC().Format(time.RFC3339Nano)
case "original_name":
cursorVal = av.OriginalName
case "mime":
cursorVal = av.MIMEType
// "created": cursorVal stays ""; cursorID is the sort key.
}
hasCursor = true
isAnchor = true
}
// Without a cursor there is no meaningful "backward" direction.
if !hasCursor {
forward = true
}
// --- build WHERE and ORDER BY ---
var conds []string
args := make([]any, 0, 8)
n := 1
conds = append(conds, fmt.Sprintf("f.is_deleted = $%d", n))
args = append(args, params.Trash)
n++
if params.Search != "" {
conds = append(conds, fmt.Sprintf("f.original_name ILIKE $%d", n))
args = append(args, "%"+params.Search+"%")
n++
}
if params.Filter != "" {
filterSQL, nextN, filterArgs, err := ParseFilter(params.Filter, n)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
if filterSQL != "" {
conds = append(conds, filterSQL)
n = nextN
args = append(args, filterArgs...)
}
}
var orderBy string
if hasCursor {
ksWhere, ksOrder, nextN, ksArgs := buildKeysetCond(
sort, order, forward, isAnchor, cursorID, cursorVal, n, args)
conds = append(conds, ksWhere)
n = nextN
args = ksArgs
orderBy = ksOrder
} else {
orderBy = defaultOrderBy(sort, order)
}
where := ""
if len(conds) > 0 {
where = "WHERE " + strings.Join(conds, " AND ")
}
// Fetch one extra row to detect whether more items exist beyond this page.
args = append(args, limit+1)
sqlStr := fmt.Sprintf(`
SELECT f.id, f.original_name,
mt.name AS mime_type, mt.extension AS mime_extension,
f.content_datetime, f.notes, f.metadata, f.exif, f.phash,
f.creator_id, u.name AS creator_name,
f.is_public, f.is_deleted
FROM data.files f
JOIN core.mime_types mt ON mt.id = f.mime_id
JOIN core.users u ON u.id = f.creator_id
%s
ORDER BY %s
LIMIT $%d`, where, orderBy, n)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("FileRepo.List: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[fileRow])
if err != nil {
return nil, fmt.Errorf("FileRepo.List scan: %w", err)
}
// --- trim extra row and reverse for backward ---
hasMore := len(collected) > limit
if hasMore {
collected = collected[:limit]
}
if !forward {
// Results were fetched in reversed ORDER BY; invert to restore the
// natural sort order expected by the caller.
for i, j := 0, len(collected)-1; i < j; i, j = i+1, j-1 {
collected[i], collected[j] = collected[j], collected[i]
}
}
// --- assemble page ---
page := &domain.FilePage{
Items: make([]domain.File, len(collected)),
}
for i, row := range collected {
page.Items[i] = toFile(row)
}
// --- set next/prev cursors ---
// next_cursor: navigate further in the forward direction.
// prev_cursor: navigate further in the backward direction.
if len(collected) > 0 {
firstCur := encodeCursor(makeCursor(collected[0], sort, order))
lastCur := encodeCursor(makeCursor(collected[len(collected)-1], sort, order))
if forward {
// We only know a prev page exists if we arrived via cursor.
if hasCursor {
page.PrevCursor = &firstCur
}
if hasMore {
page.NextCursor = &lastCur
}
} else {
// Backward: last item (after reversal) is closest to original cursor.
if hasCursor {
page.NextCursor = &lastCur
}
if hasMore {
page.PrevCursor = &firstCur
}
}
}
// --- batch-load tags ---
if len(page.Items) > 0 {
fileIDs := make([]uuid.UUID, len(page.Items))
for i, f := range page.Items {
fileIDs[i] = f.ID
}
tagMap, err := r.loadTagsBatch(ctx, fileIDs)
if err != nil {
return nil, err
}
for i, f := range page.Items {
page.Items[i].Tags = tagMap[f.ID] // nil becomes []domain.Tag{} via loadTagsBatch
}
}
return page, nil
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
// fetchAnchorVals returns the sort-column values for the given file.
// Used to set up a cursor when the caller provides an anchor UUID.
func (r *FileRepo) fetchAnchorVals(ctx context.Context, fileID uuid.UUID) (*anchorValRow, error) {
const sqlStr = `
SELECT f.content_datetime,
COALESCE(f.original_name, '') AS original_name,
mt.name AS mime_type
FROM data.files f
JOIN core.mime_types mt ON mt.id = f.mime_id
WHERE f.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, fileID)
if err != nil {
return nil, fmt.Errorf("FileRepo.fetchAnchorVals: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[anchorValRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("FileRepo.fetchAnchorVals scan: %w", err)
}
return &row, nil
}
// loadTagsBatch fetches tags for multiple files in a single query and returns
// them as a map keyed by file ID. Every requested file ID appears as a key
// (with an empty slice if the file has no tags).
func (r *FileRepo) loadTagsBatch(ctx context.Context, fileIDs []uuid.UUID) (map[uuid.UUID][]domain.Tag, error) {
if len(fileIDs) == 0 {
return nil, nil
}
// Build a parameterised IN list. The max page size is 200, so at most 200
// placeholders — well within PostgreSQL's limits.
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs))
for i, id := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+1)
args[i] = id
}
sqlStr := fmt.Sprintf(`
SELECT ft.file_id,
t.id, t.name, t.notes, t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata, t.creator_id, u.name AS creator_name, t.is_public
FROM data.file_tag ft
JOIN data.tags t ON t.id = ft.tag_id
JOIN core.users u ON u.id = t.creator_id
LEFT JOIN data.categories c ON c.id = t.category_id
WHERE ft.file_id IN (%s)
ORDER BY ft.file_id, t.name`, strings.Join(placeholders, ","))
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("FileRepo.loadTagsBatch: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[fileTagRow])
if err != nil {
return nil, fmt.Errorf("FileRepo.loadTagsBatch scan: %w", err)
}
result := make(map[uuid.UUID][]domain.Tag, len(fileIDs))
for _, fid := range fileIDs {
result[fid] = []domain.Tag{} // guarantee every key has a non-nil slice
}
for _, row := range collected {
result[row.FileID] = append(result[row.FileID], toTagFromFileTag(row))
}
return result, nil
}

View File

@ -0,0 +1,286 @@
package postgres
import (
"fmt"
"strconv"
"strings"
"github.com/google/uuid"
)
// ---------------------------------------------------------------------------
// Token types
// ---------------------------------------------------------------------------
type filterTokenKind int
const (
ftkAnd filterTokenKind = iota
ftkOr
ftkNot
ftkLParen
ftkRParen
ftkTag // t=<uuid>
ftkMimeExact // m=<int>
ftkMimeLike // m~<pattern>
)
type filterToken struct {
kind filterTokenKind
tagID uuid.UUID // ftkTag
untagged bool // ftkTag with zero UUID → "file has no tags"
mimeID int16 // ftkMimeExact
pattern string // ftkMimeLike
}
// ---------------------------------------------------------------------------
// AST nodes
// ---------------------------------------------------------------------------
// filterNode produces a parameterized SQL fragment.
// n is the index of the next available positional parameter ($n).
// Returns the fragment, the updated n, and the extended args slice.
type filterNode interface {
toSQL(n int, args []any) (string, int, []any)
}
type andNode struct{ left, right filterNode }
type orNode struct{ left, right filterNode }
type notNode struct{ child filterNode }
type leafNode struct{ tok filterToken }
func (a *andNode) toSQL(n int, args []any) (string, int, []any) {
ls, n, args := a.left.toSQL(n, args)
rs, n, args := a.right.toSQL(n, args)
return "(" + ls + " AND " + rs + ")", n, args
}
func (o *orNode) toSQL(n int, args []any) (string, int, []any) {
ls, n, args := o.left.toSQL(n, args)
rs, n, args := o.right.toSQL(n, args)
return "(" + ls + " OR " + rs + ")", n, args
}
func (no *notNode) toSQL(n int, args []any) (string, int, []any) {
cs, n, args := no.child.toSQL(n, args)
return "(NOT " + cs + ")", n, args
}
func (l *leafNode) toSQL(n int, args []any) (string, int, []any) {
switch l.tok.kind {
case ftkTag:
if l.tok.untagged {
return "NOT EXISTS (SELECT 1 FROM data.file_tag ft WHERE ft.file_id = f.id)", n, args
}
s := fmt.Sprintf(
"EXISTS (SELECT 1 FROM data.file_tag ft WHERE ft.file_id = f.id AND ft.tag_id = $%d)", n)
return s, n + 1, append(args, l.tok.tagID)
case ftkMimeExact:
return fmt.Sprintf("f.mime_id = $%d", n), n + 1, append(args, l.tok.mimeID)
case ftkMimeLike:
// mt alias comes from the JOIN in the main file query (always present).
return fmt.Sprintf("mt.name LIKE $%d", n), n + 1, append(args, l.tok.pattern)
}
panic("filterNode.toSQL: unknown leaf kind")
}
// ---------------------------------------------------------------------------
// Lexer
// ---------------------------------------------------------------------------
// lexFilter tokenises the DSL string {a,b,c,...} into filterTokens.
func lexFilter(dsl string) ([]filterToken, error) {
dsl = strings.TrimSpace(dsl)
if !strings.HasPrefix(dsl, "{") || !strings.HasSuffix(dsl, "}") {
return nil, fmt.Errorf("filter DSL must be wrapped in braces: {…}")
}
inner := strings.TrimSpace(dsl[1 : len(dsl)-1])
if inner == "" {
return nil, nil
}
parts := strings.Split(inner, ",")
tokens := make([]filterToken, 0, len(parts))
for _, raw := range parts {
p := strings.TrimSpace(raw)
switch {
case p == "&":
tokens = append(tokens, filterToken{kind: ftkAnd})
case p == "|":
tokens = append(tokens, filterToken{kind: ftkOr})
case p == "!":
tokens = append(tokens, filterToken{kind: ftkNot})
case p == "(":
tokens = append(tokens, filterToken{kind: ftkLParen})
case p == ")":
tokens = append(tokens, filterToken{kind: ftkRParen})
case strings.HasPrefix(p, "t="):
id, err := uuid.Parse(p[2:])
if err != nil {
return nil, fmt.Errorf("filter: invalid tag UUID %q", p[2:])
}
tokens = append(tokens, filterToken{kind: ftkTag, tagID: id, untagged: id == uuid.Nil})
case strings.HasPrefix(p, "m="):
v, err := strconv.ParseInt(p[2:], 10, 16)
if err != nil {
return nil, fmt.Errorf("filter: invalid MIME ID %q", p[2:])
}
tokens = append(tokens, filterToken{kind: ftkMimeExact, mimeID: int16(v)})
case strings.HasPrefix(p, "m~"):
// The pattern value is passed as a query parameter, so no SQL injection risk.
tokens = append(tokens, filterToken{kind: ftkMimeLike, pattern: p[2:]})
default:
return nil, fmt.Errorf("filter: unknown token %q", p)
}
}
return tokens, nil
}
// ---------------------------------------------------------------------------
// Recursive-descent parser
// ---------------------------------------------------------------------------
type filterParser struct {
tokens []filterToken
pos int
}
func (p *filterParser) peek() (filterToken, bool) {
if p.pos >= len(p.tokens) {
return filterToken{}, false
}
return p.tokens[p.pos], true
}
func (p *filterParser) next() filterToken {
t := p.tokens[p.pos]
p.pos++
return t
}
// Grammar (standard NOT > AND > OR precedence):
//
// expr := or_expr
// or_expr := and_expr ('|' and_expr)*
// and_expr := not_expr ('&' not_expr)*
// not_expr := '!' not_expr | atom
// atom := '(' expr ')' | leaf
func (p *filterParser) parseExpr() (filterNode, error) { return p.parseOr() }
func (p *filterParser) parseOr() (filterNode, error) {
left, err := p.parseAnd()
if err != nil {
return nil, err
}
for {
t, ok := p.peek()
if !ok || t.kind != ftkOr {
break
}
p.next()
right, err := p.parseAnd()
if err != nil {
return nil, err
}
left = &orNode{left, right}
}
return left, nil
}
func (p *filterParser) parseAnd() (filterNode, error) {
left, err := p.parseNot()
if err != nil {
return nil, err
}
for {
t, ok := p.peek()
if !ok || t.kind != ftkAnd {
break
}
p.next()
right, err := p.parseNot()
if err != nil {
return nil, err
}
left = &andNode{left, right}
}
return left, nil
}
func (p *filterParser) parseNot() (filterNode, error) {
t, ok := p.peek()
if ok && t.kind == ftkNot {
p.next()
child, err := p.parseNot() // right-recursive to allow !!x
if err != nil {
return nil, err
}
return &notNode{child}, nil
}
return p.parseAtom()
}
func (p *filterParser) parseAtom() (filterNode, error) {
t, ok := p.peek()
if !ok {
return nil, fmt.Errorf("filter: unexpected end of expression")
}
if t.kind == ftkLParen {
p.next()
expr, err := p.parseExpr()
if err != nil {
return nil, err
}
rp, ok := p.peek()
if !ok || rp.kind != ftkRParen {
return nil, fmt.Errorf("filter: expected ')'")
}
p.next()
return expr, nil
}
switch t.kind {
case ftkTag, ftkMimeExact, ftkMimeLike:
p.next()
return &leafNode{t}, nil
default:
return nil, fmt.Errorf("filter: unexpected token at position %d", p.pos)
}
}
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
// ParseFilter parses a filter DSL string into a parameterized SQL fragment.
//
// argStart is the 1-based index for the first $N placeholder; this lets the
// caller interleave filter parameters with other query parameters.
//
// Returns ("", argStart, nil, nil) for an empty or trivial DSL.
// SQL injection is structurally impossible: every user-supplied value is
// bound as a query parameter ($N), never interpolated into the SQL string.
func ParseFilter(dsl string, argStart int) (sql string, nextN int, args []any, err error) {
dsl = strings.TrimSpace(dsl)
if dsl == "" || dsl == "{}" {
return "", argStart, nil, nil
}
toks, err := lexFilter(dsl)
if err != nil {
return "", argStart, nil, err
}
if len(toks) == 0 {
return "", argStart, nil, nil
}
p := &filterParser{tokens: toks}
node, err := p.parseExpr()
if err != nil {
return "", argStart, nil, err
}
if p.pos != len(p.tokens) {
return "", argStart, nil, fmt.Errorf("filter: trailing tokens at position %d", p.pos)
}
sql, nextN, args = node.toSQL(argStart, nil)
return sql, nextN, args, nil
}

View File

@ -0,0 +1,97 @@
package postgres
import (
"context"
"errors"
"fmt"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
type mimeRow struct {
ID int16 `db:"id"`
Name string `db:"name"`
Extension string `db:"extension"`
}
func toMIMEType(r mimeRow) domain.MIMEType {
return domain.MIMEType{
ID: r.ID,
Name: r.Name,
Extension: r.Extension,
}
}
// MimeRepo implements port.MimeRepo using PostgreSQL.
type MimeRepo struct {
pool *pgxpool.Pool
}
// NewMimeRepo creates a MimeRepo backed by pool.
func NewMimeRepo(pool *pgxpool.Pool) *MimeRepo {
return &MimeRepo{pool: pool}
}
var _ port.MimeRepo = (*MimeRepo)(nil)
func (r *MimeRepo) List(ctx context.Context) ([]domain.MIMEType, error) {
const sql = `SELECT id, name, extension FROM core.mime_types ORDER BY name`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql)
if err != nil {
return nil, fmt.Errorf("MimeRepo.List: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[mimeRow])
if err != nil {
return nil, fmt.Errorf("MimeRepo.List scan: %w", err)
}
result := make([]domain.MIMEType, len(collected))
for i, row := range collected {
result[i] = toMIMEType(row)
}
return result, nil
}
func (r *MimeRepo) GetByID(ctx context.Context, id int16) (*domain.MIMEType, error) {
const sql = `SELECT id, name, extension FROM core.mime_types WHERE id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, id)
if err != nil {
return nil, fmt.Errorf("MimeRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[mimeRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("MimeRepo.GetByID scan: %w", err)
}
m := toMIMEType(row)
return &m, nil
}
func (r *MimeRepo) GetByName(ctx context.Context, name string) (*domain.MIMEType, error) {
const sql = `SELECT id, name, extension FROM core.mime_types WHERE name = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, name)
if err != nil {
return nil, fmt.Errorf("MimeRepo.GetByName: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[mimeRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrUnsupportedMIME
}
return nil, fmt.Errorf("MimeRepo.GetByName scan: %w", err)
}
m := toMIMEType(row)
return &m, nil
}

View File

@ -0,0 +1,665 @@
package postgres
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ---------------------------------------------------------------------------
// Row structs
// ---------------------------------------------------------------------------
type poolRow struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
Notes *string `db:"notes"`
Metadata []byte `db:"metadata"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
FileCount int `db:"file_count"`
}
type poolRowWithTotal struct {
poolRow
Total int `db:"total"`
}
// poolFileRow is a flat struct combining all file columns plus pool position.
type poolFileRow struct {
ID uuid.UUID `db:"id"`
OriginalName *string `db:"original_name"`
MIMEType string `db:"mime_type"`
MIMEExtension string `db:"mime_extension"`
ContentDatetime time.Time `db:"content_datetime"`
Notes *string `db:"notes"`
Metadata json.RawMessage `db:"metadata"`
EXIF json.RawMessage `db:"exif"`
PHash *int64 `db:"phash"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
IsDeleted bool `db:"is_deleted"`
Position int `db:"position"`
}
// ---------------------------------------------------------------------------
// Converters
// ---------------------------------------------------------------------------
func toPool(r poolRow) domain.Pool {
p := domain.Pool{
ID: r.ID,
Name: r.Name,
Notes: r.Notes,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
FileCount: r.FileCount,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
if len(r.Metadata) > 0 && string(r.Metadata) != "null" {
p.Metadata = json.RawMessage(r.Metadata)
}
return p
}
func toPoolFile(r poolFileRow) domain.PoolFile {
return domain.PoolFile{
File: domain.File{
ID: r.ID,
OriginalName: r.OriginalName,
MIMEType: r.MIMEType,
MIMEExtension: r.MIMEExtension,
ContentDatetime: r.ContentDatetime,
Notes: r.Notes,
Metadata: r.Metadata,
EXIF: r.EXIF,
PHash: r.PHash,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
IsDeleted: r.IsDeleted,
CreatedAt: domain.UUIDCreatedAt(r.ID),
},
Position: r.Position,
}
}
// ---------------------------------------------------------------------------
// Cursor
// ---------------------------------------------------------------------------
type poolFileCursor struct {
Position int `json:"p"`
FileID string `json:"id"`
}
func encodePoolCursor(c poolFileCursor) string {
b, _ := json.Marshal(c)
return base64.RawURLEncoding.EncodeToString(b)
}
func decodePoolCursor(s string) (poolFileCursor, error) {
b, err := base64.RawURLEncoding.DecodeString(s)
if err != nil {
return poolFileCursor{}, fmt.Errorf("cursor: invalid encoding")
}
var c poolFileCursor
if err := json.Unmarshal(b, &c); err != nil {
return poolFileCursor{}, fmt.Errorf("cursor: invalid format")
}
return c, nil
}
// ---------------------------------------------------------------------------
// Shared SQL
// ---------------------------------------------------------------------------
// poolCountSubquery computes per-pool file counts.
const poolCountSubquery = `(SELECT pool_id, COUNT(*) AS cnt FROM data.file_pool GROUP BY pool_id)`
const poolSelectFrom = `
SELECT p.id, p.name, p.notes, p.metadata,
p.creator_id, u.name AS creator_name, p.is_public,
COALESCE(fc.cnt, 0) AS file_count
FROM data.pools p
JOIN core.users u ON u.id = p.creator_id
LEFT JOIN ` + poolCountSubquery + ` fc ON fc.pool_id = p.id`
func poolSortColumn(s string) string {
if s == "name" {
return "p.name"
}
return "p.id" // "created"
}
// ---------------------------------------------------------------------------
// PoolRepo
// ---------------------------------------------------------------------------
// PoolRepo implements port.PoolRepo using PostgreSQL.
type PoolRepo struct {
pool *pgxpool.Pool
}
var _ port.PoolRepo = (*PoolRepo)(nil)
// NewPoolRepo creates a PoolRepo backed by pool.
func NewPoolRepo(pool *pgxpool.Pool) *PoolRepo {
return &PoolRepo{pool: pool}
}
// ---------------------------------------------------------------------------
// List
// ---------------------------------------------------------------------------
func (r *PoolRepo) List(ctx context.Context, params port.OffsetParams) (*domain.PoolOffsetPage, error) {
order := "ASC"
if strings.ToLower(params.Order) == "desc" {
order = "DESC"
}
sortCol := poolSortColumn(params.Sort)
args := []any{}
n := 1
var conditions []string
if params.Search != "" {
conditions = append(conditions, fmt.Sprintf("lower(p.name) LIKE lower($%d)", n))
args = append(args, "%"+params.Search+"%")
n++
}
where := ""
if len(conditions) > 0 {
where = "WHERE " + strings.Join(conditions, " AND ")
}
limit := params.Limit
if limit <= 0 {
limit = 50
}
offset := params.Offset
if offset < 0 {
offset = 0
}
query := fmt.Sprintf(`
SELECT p.id, p.name, p.notes, p.metadata,
p.creator_id, u.name AS creator_name, p.is_public,
COALESCE(fc.cnt, 0) AS file_count,
COUNT(*) OVER() AS total
FROM data.pools p
JOIN core.users u ON u.id = p.creator_id
LEFT JOIN %s fc ON fc.pool_id = p.id
%s
ORDER BY %s %s NULLS LAST, p.id ASC
LIMIT $%d OFFSET $%d`, poolCountSubquery, where, sortCol, order, n, n+1)
args = append(args, limit, offset)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.List query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[poolRowWithTotal])
if err != nil {
return nil, fmt.Errorf("PoolRepo.List scan: %w", err)
}
items := make([]domain.Pool, len(collected))
total := 0
for i, row := range collected {
items[i] = toPool(row.poolRow)
total = row.Total
}
return &domain.PoolOffsetPage{
Items: items,
Total: total,
Offset: offset,
Limit: limit,
}, nil
}
// ---------------------------------------------------------------------------
// GetByID
// ---------------------------------------------------------------------------
func (r *PoolRepo) GetByID(ctx context.Context, id uuid.UUID) (*domain.Pool, error) {
query := poolSelectFrom + `
WHERE p.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id)
if err != nil {
return nil, fmt.Errorf("PoolRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("PoolRepo.GetByID scan: %w", err)
}
p := toPool(row)
return &p, nil
}
// ---------------------------------------------------------------------------
// Create
// ---------------------------------------------------------------------------
func (r *PoolRepo) Create(ctx context.Context, p *domain.Pool) (*domain.Pool, error) {
const query = `
WITH ins AS (
INSERT INTO data.pools (name, notes, metadata, creator_id, is_public)
VALUES ($1, $2, $3, $4, $5)
RETURNING *
)
SELECT ins.id, ins.name, ins.notes, ins.metadata,
ins.creator_id, u.name AS creator_name, ins.is_public,
0 AS file_count
FROM ins
JOIN core.users u ON u.id = ins.creator_id`
var meta any
if len(p.Metadata) > 0 {
meta = p.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, p.Name, p.Notes, meta, p.CreatorID, p.IsPublic)
if err != nil {
return nil, fmt.Errorf("PoolRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("PoolRepo.Create scan: %w", err)
}
created := toPool(row)
return &created, nil
}
// ---------------------------------------------------------------------------
// Update
// ---------------------------------------------------------------------------
func (r *PoolRepo) Update(ctx context.Context, id uuid.UUID, p *domain.Pool) (*domain.Pool, error) {
const query = `
WITH upd AS (
UPDATE data.pools SET
name = $2,
notes = $3,
metadata = COALESCE($4, metadata),
is_public = $5
WHERE id = $1
RETURNING *
)
SELECT upd.id, upd.name, upd.notes, upd.metadata,
upd.creator_id, u.name AS creator_name, upd.is_public,
COALESCE(fc.cnt, 0) AS file_count
FROM upd
JOIN core.users u ON u.id = upd.creator_id
LEFT JOIN (SELECT pool_id, COUNT(*) AS cnt FROM data.file_pool WHERE pool_id = $1 GROUP BY pool_id) fc
ON fc.pool_id = upd.id`
var meta any
if len(p.Metadata) > 0 {
meta = p.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id, p.Name, p.Notes, meta, p.IsPublic)
if err != nil {
return nil, fmt.Errorf("PoolRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("PoolRepo.Update scan: %w", err)
}
updated := toPool(row)
return &updated, nil
}
// ---------------------------------------------------------------------------
// Delete
// ---------------------------------------------------------------------------
func (r *PoolRepo) Delete(ctx context.Context, id uuid.UUID) error {
const query = `DELETE FROM data.pools WHERE id = $1`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, query, id)
if err != nil {
return fmt.Errorf("PoolRepo.Delete: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
// ---------------------------------------------------------------------------
// ListFiles
// ---------------------------------------------------------------------------
// fileSelectForPool is the column list for pool file queries (without position).
const fileSelectForPool = `
f.id, f.original_name,
mt.name AS mime_type, mt.extension AS mime_extension,
f.content_datetime, f.notes, f.metadata, f.exif, f.phash,
f.creator_id, u.name AS creator_name,
f.is_public, f.is_deleted`
func (r *PoolRepo) ListFiles(ctx context.Context, poolID uuid.UUID, params port.PoolFileListParams) (*domain.PoolFilePage, error) {
limit := db.ClampLimit(params.Limit, 50, 200)
args := []any{poolID}
n := 2
var conds []string
conds = append(conds, "fp.pool_id = $1")
conds = append(conds, "f.is_deleted = false")
if params.Filter != "" {
filterSQL, nextN, filterArgs, err := ParseFilter(params.Filter, n)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
if filterSQL != "" {
conds = append(conds, filterSQL)
n = nextN
args = append(args, filterArgs...)
}
}
// Cursor condition.
var orderBy string
if params.Cursor != "" {
cur, err := decodePoolCursor(params.Cursor)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
fileID, err := uuid.Parse(cur.FileID)
if err != nil {
return nil, domain.ErrValidation
}
conds = append(conds, fmt.Sprintf(
"(fp.position > $%d OR (fp.position = $%d AND fp.file_id > $%d))",
n, n, n+1))
args = append(args, cur.Position, fileID)
n += 2
}
orderBy = "fp.position ASC, fp.file_id ASC"
where := "WHERE " + strings.Join(conds, " AND ")
args = append(args, limit+1)
sqlStr := fmt.Sprintf(`
SELECT %s, fp.position
FROM data.file_pool fp
JOIN data.files f ON f.id = fp.file_id
JOIN core.mime_types mt ON mt.id = f.mime_id
JOIN core.users u ON u.id = f.creator_id
%s
ORDER BY %s
LIMIT $%d`, fileSelectForPool, where, orderBy, n)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.ListFiles query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[poolFileRow])
if err != nil {
return nil, fmt.Errorf("PoolRepo.ListFiles scan: %w", err)
}
hasMore := len(collected) > limit
if hasMore {
collected = collected[:limit]
}
items := make([]domain.PoolFile, len(collected))
for i, row := range collected {
items[i] = toPoolFile(row)
}
page := &domain.PoolFilePage{Items: items}
if hasMore && len(collected) > 0 {
last := collected[len(collected)-1]
cur := encodePoolCursor(poolFileCursor{
Position: last.Position,
FileID: last.ID.String(),
})
page.NextCursor = &cur
}
// Batch-load tags.
if len(items) > 0 {
fileIDs := make([]uuid.UUID, len(items))
for i, pf := range items {
fileIDs[i] = pf.File.ID
}
tagMap, err := r.loadPoolTagsBatch(ctx, fileIDs)
if err != nil {
return nil, err
}
for i, pf := range items {
page.Items[i].File.Tags = tagMap[pf.File.ID]
}
}
return page, nil
}
// loadPoolTagsBatch re-uses the same pattern as FileRepo.loadTagsBatch.
func (r *PoolRepo) loadPoolTagsBatch(ctx context.Context, fileIDs []uuid.UUID) (map[uuid.UUID][]domain.Tag, error) {
if len(fileIDs) == 0 {
return nil, nil
}
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs))
for i, id := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+1)
args[i] = id
}
sqlStr := fmt.Sprintf(`
SELECT ft.file_id,
t.id, t.name, t.notes, t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata, t.creator_id, u.name AS creator_name, t.is_public
FROM data.file_tag ft
JOIN data.tags t ON t.id = ft.tag_id
JOIN core.users u ON u.id = t.creator_id
LEFT JOIN data.categories c ON c.id = t.category_id
WHERE ft.file_id IN (%s)
ORDER BY ft.file_id, t.name`, strings.Join(placeholders, ","))
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.loadPoolTagsBatch: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[fileTagRow])
if err != nil {
return nil, fmt.Errorf("PoolRepo.loadPoolTagsBatch scan: %w", err)
}
result := make(map[uuid.UUID][]domain.Tag, len(fileIDs))
for _, fid := range fileIDs {
result[fid] = []domain.Tag{}
}
for _, row := range collected {
result[row.FileID] = append(result[row.FileID], toTagFromFileTag(row))
}
return result, nil
}
// ---------------------------------------------------------------------------
// AddFiles
// ---------------------------------------------------------------------------
// AddFiles inserts files into the pool. When position is nil, files are
// appended after the last existing file (MAX(position) + 1000 * i).
// When position is provided (0-indexed), files are inserted at that index
// and all pool positions are reassigned in one shot.
func (r *PoolRepo) AddFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID, position *int) error {
if len(fileIDs) == 0 {
return nil
}
q := connOrTx(ctx, r.pool)
if position == nil {
// Append: get current max position, then bulk-insert.
var maxPos int
row := q.QueryRow(ctx, `SELECT COALESCE(MAX(position), 0) FROM data.file_pool WHERE pool_id = $1`, poolID)
if err := row.Scan(&maxPos); err != nil {
return fmt.Errorf("PoolRepo.AddFiles maxPos: %w", err)
}
const ins = `INSERT INTO data.file_pool (file_id, pool_id, position) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING`
for i, fid := range fileIDs {
if _, err := q.Exec(ctx, ins, fid, poolID, maxPos+1000*(i+1)); err != nil {
return fmt.Errorf("PoolRepo.AddFiles insert: %w", err)
}
}
return nil
}
// Positional insert: rebuild the full ordered list and reassign.
return r.insertAtPosition(ctx, q, poolID, fileIDs, *position)
}
// insertAtPosition fetches the current ordered file list, splices in the new
// IDs at index pos (0-indexed, clamped), then does a full position reassign.
func (r *PoolRepo) insertAtPosition(ctx context.Context, q db.Querier, poolID uuid.UUID, newIDs []uuid.UUID, pos int) error {
// 1. Fetch current order.
rows, err := q.Query(ctx, `SELECT file_id FROM data.file_pool WHERE pool_id = $1 ORDER BY position ASC, file_id ASC`, poolID)
if err != nil {
return fmt.Errorf("PoolRepo.insertAtPosition fetch: %w", err)
}
var current []uuid.UUID
for rows.Next() {
var fid uuid.UUID
if err := rows.Scan(&fid); err != nil {
rows.Close()
return fmt.Errorf("PoolRepo.insertAtPosition scan: %w", err)
}
current = append(current, fid)
}
rows.Close()
if err := rows.Err(); err != nil {
return fmt.Errorf("PoolRepo.insertAtPosition rows: %w", err)
}
// 2. Build new ordered list, skipping already-present IDs from newIDs.
present := make(map[uuid.UUID]bool, len(current))
for _, fid := range current {
present[fid] = true
}
toAdd := make([]uuid.UUID, 0, len(newIDs))
for _, fid := range newIDs {
if !present[fid] {
toAdd = append(toAdd, fid)
}
}
if len(toAdd) == 0 {
return nil // all already present
}
if pos < 0 {
pos = 0
}
if pos > len(current) {
pos = len(current)
}
ordered := make([]uuid.UUID, 0, len(current)+len(toAdd))
ordered = append(ordered, current[:pos]...)
ordered = append(ordered, toAdd...)
ordered = append(ordered, current[pos:]...)
// 3. Full replace.
return r.reassignPositions(ctx, q, poolID, ordered)
}
// reassignPositions does a DELETE + bulk INSERT for the pool with positions
// 1000, 2000, 3000, ...
func (r *PoolRepo) reassignPositions(ctx context.Context, q db.Querier, poolID uuid.UUID, ordered []uuid.UUID) error {
if _, err := q.Exec(ctx, `DELETE FROM data.file_pool WHERE pool_id = $1`, poolID); err != nil {
return fmt.Errorf("PoolRepo.reassignPositions delete: %w", err)
}
if len(ordered) == 0 {
return nil
}
const ins = `INSERT INTO data.file_pool (file_id, pool_id, position) VALUES ($1, $2, $3)`
for i, fid := range ordered {
if _, err := q.Exec(ctx, ins, fid, poolID, 1000*(i+1)); err != nil {
return fmt.Errorf("PoolRepo.reassignPositions insert: %w", err)
}
}
return nil
}
// ---------------------------------------------------------------------------
// RemoveFiles
// ---------------------------------------------------------------------------
func (r *PoolRepo) RemoveFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
if len(fileIDs) == 0 {
return nil
}
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs)+1)
args[0] = poolID
for i, fid := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+2)
args[i+1] = fid
}
query := fmt.Sprintf(
`DELETE FROM data.file_pool WHERE pool_id = $1 AND file_id IN (%s)`,
strings.Join(placeholders, ","))
q := connOrTx(ctx, r.pool)
if _, err := q.Exec(ctx, query, args...); err != nil {
return fmt.Errorf("PoolRepo.RemoveFiles: %w", err)
}
return nil
}
// ---------------------------------------------------------------------------
// Reorder
// ---------------------------------------------------------------------------
// Reorder replaces the full ordered sequence with positions 1000, 2000, …
// Only file IDs already in the pool are allowed; unknown IDs are silently
// skipped to avoid integrity violations.
func (r *PoolRepo) Reorder(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
q := connOrTx(ctx, r.pool)
return r.reassignPositions(ctx, q, poolID, fileIDs)
}

View File

@ -0,0 +1,67 @@
// Package postgres provides the PostgreSQL implementations of the port interfaces.
package postgres
import (
"context"
"fmt"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
)
// NewPool creates and validates a *pgxpool.Pool from the given connection URL.
// The pool is ready to use; the caller is responsible for closing it.
func NewPool(ctx context.Context, url string) (*pgxpool.Pool, error) {
pool, err := pgxpool.New(ctx, url)
if err != nil {
return nil, fmt.Errorf("pgxpool.New: %w", err)
}
if err := pool.Ping(ctx); err != nil {
pool.Close()
return nil, fmt.Errorf("postgres ping: %w", err)
}
return pool, nil
}
// Transactor implements port.Transactor using a pgxpool.Pool.
type Transactor struct {
pool *pgxpool.Pool
}
// NewTransactor creates a Transactor backed by pool.
func NewTransactor(pool *pgxpool.Pool) *Transactor {
return &Transactor{pool: pool}
}
// WithTx begins a transaction, stores it in ctx, and calls fn. If fn returns
// an error the transaction is rolled back; otherwise it is committed.
func (t *Transactor) WithTx(ctx context.Context, fn func(ctx context.Context) error) error {
tx, err := t.pool.BeginTx(ctx, pgx.TxOptions{})
if err != nil {
return fmt.Errorf("begin tx: %w", err)
}
txCtx := db.ContextWithTx(ctx, tx)
if err := fn(txCtx); err != nil {
_ = tx.Rollback(ctx)
return err
}
if err := tx.Commit(ctx); err != nil {
return fmt.Errorf("commit tx: %w", err)
}
return nil
}
// connOrTx returns the pgx.Tx stored in ctx by WithTx, or the pool itself when
// no transaction is active. The returned value satisfies db.Querier and can be
// used directly for queries and commands.
func connOrTx(ctx context.Context, pool *pgxpool.Pool) db.Querier {
if tx, ok := db.TxFromContext(ctx); ok {
return tx
}
return pool
}

View File

@ -0,0 +1,162 @@
package postgres
import (
"context"
"errors"
"fmt"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// sessionRow matches the columns stored in activity.sessions.
// IsCurrent is a service-layer concern and is not stored in the database.
type sessionRow struct {
ID int `db:"id"`
TokenHash string `db:"token_hash"`
UserID int16 `db:"user_id"`
UserAgent string `db:"user_agent"`
StartedAt time.Time `db:"started_at"`
ExpiresAt *time.Time `db:"expires_at"`
LastActivity time.Time `db:"last_activity"`
}
// sessionRowWithTotal extends sessionRow with a window-function count for ListByUser.
type sessionRowWithTotal struct {
sessionRow
Total int `db:"total"`
}
func toSession(r sessionRow) domain.Session {
return domain.Session{
ID: r.ID,
TokenHash: r.TokenHash,
UserID: r.UserID,
UserAgent: r.UserAgent,
StartedAt: r.StartedAt,
ExpiresAt: r.ExpiresAt,
LastActivity: r.LastActivity,
}
}
// SessionRepo implements port.SessionRepo using PostgreSQL.
type SessionRepo struct {
pool *pgxpool.Pool
}
// NewSessionRepo creates a SessionRepo backed by pool.
func NewSessionRepo(pool *pgxpool.Pool) *SessionRepo {
return &SessionRepo{pool: pool}
}
var _ port.SessionRepo = (*SessionRepo)(nil)
func (r *SessionRepo) Create(ctx context.Context, s *domain.Session) (*domain.Session, error) {
const sql = `
INSERT INTO activity.sessions (token_hash, user_id, user_agent, expires_at)
VALUES ($1, $2, $3, $4)
RETURNING id, token_hash, user_id, user_agent, started_at, expires_at, last_activity`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, s.TokenHash, s.UserID, s.UserAgent, s.ExpiresAt)
if err != nil {
return nil, fmt.Errorf("SessionRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[sessionRow])
if err != nil {
return nil, fmt.Errorf("SessionRepo.Create scan: %w", err)
}
created := toSession(row)
return &created, nil
}
func (r *SessionRepo) GetByTokenHash(ctx context.Context, hash string) (*domain.Session, error) {
const sql = `
SELECT id, token_hash, user_id, user_agent, started_at, expires_at, last_activity
FROM activity.sessions
WHERE token_hash = $1 AND is_active = true`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, hash)
if err != nil {
return nil, fmt.Errorf("SessionRepo.GetByTokenHash: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[sessionRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("SessionRepo.GetByTokenHash scan: %w", err)
}
s := toSession(row)
return &s, nil
}
func (r *SessionRepo) ListByUser(ctx context.Context, userID int16) (*domain.SessionList, error) {
const sql = `
SELECT id, token_hash, user_id, user_agent, started_at, expires_at, last_activity,
COUNT(*) OVER() AS total
FROM activity.sessions
WHERE user_id = $1 AND is_active = true
ORDER BY started_at DESC`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, userID)
if err != nil {
return nil, fmt.Errorf("SessionRepo.ListByUser: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[sessionRowWithTotal])
if err != nil {
return nil, fmt.Errorf("SessionRepo.ListByUser scan: %w", err)
}
list := &domain.SessionList{}
if len(collected) > 0 {
list.Total = collected[0].Total
}
list.Items = make([]domain.Session, len(collected))
for i, row := range collected {
list.Items[i] = toSession(row.sessionRow)
}
return list, nil
}
func (r *SessionRepo) UpdateLastActivity(ctx context.Context, id int, t time.Time) error {
const sql = `UPDATE activity.sessions SET last_activity = $2 WHERE id = $1`
q := connOrTx(ctx, r.pool)
tag, err := q.Exec(ctx, sql, id, t)
if err != nil {
return fmt.Errorf("SessionRepo.UpdateLastActivity: %w", err)
}
if tag.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
func (r *SessionRepo) Delete(ctx context.Context, id int) error {
const sql = `UPDATE activity.sessions SET is_active = false WHERE id = $1 AND is_active = true`
q := connOrTx(ctx, r.pool)
tag, err := q.Exec(ctx, sql, id)
if err != nil {
return fmt.Errorf("SessionRepo.Delete: %w", err)
}
if tag.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
func (r *SessionRepo) DeleteByUserID(ctx context.Context, userID int16) error {
const sql = `UPDATE activity.sessions SET is_active = false WHERE user_id = $1 AND is_active = true`
q := connOrTx(ctx, r.pool)
_, err := q.Exec(ctx, sql, userID)
if err != nil {
return fmt.Errorf("SessionRepo.DeleteByUserID: %w", err)
}
return nil
}

View File

@ -0,0 +1,634 @@
package postgres
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ---------------------------------------------------------------------------
// Row structs — use pgx-scannable types
// ---------------------------------------------------------------------------
type tagRow struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
Notes *string `db:"notes"`
Color *string `db:"color"`
CategoryID *uuid.UUID `db:"category_id"`
CategoryName *string `db:"category_name"`
CategoryColor *string `db:"category_color"`
Metadata []byte `db:"metadata"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
}
type tagRowWithTotal struct {
tagRow
Total int `db:"total"`
}
type tagRuleRow struct {
WhenTagID uuid.UUID `db:"when_tag_id"`
ThenTagID uuid.UUID `db:"then_tag_id"`
ThenTagName string `db:"then_tag_name"`
IsActive bool `db:"is_active"`
}
// ---------------------------------------------------------------------------
// Converters
// ---------------------------------------------------------------------------
func toTag(r tagRow) domain.Tag {
t := domain.Tag{
ID: r.ID,
Name: r.Name,
Notes: r.Notes,
Color: r.Color,
CategoryID: r.CategoryID,
CategoryName: r.CategoryName,
CategoryColor: r.CategoryColor,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
if len(r.Metadata) > 0 && string(r.Metadata) != "null" {
t.Metadata = json.RawMessage(r.Metadata)
}
return t
}
func toTagRule(r tagRuleRow) domain.TagRule {
return domain.TagRule{
WhenTagID: r.WhenTagID,
ThenTagID: r.ThenTagID,
ThenTagName: r.ThenTagName,
IsActive: r.IsActive,
}
}
// ---------------------------------------------------------------------------
// Shared SQL fragments
// ---------------------------------------------------------------------------
const tagSelectFrom = `
SELECT
t.id,
t.name,
t.notes,
t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata,
t.creator_id,
u.name AS creator_name,
t.is_public
FROM data.tags t
LEFT JOIN data.categories c ON c.id = t.category_id
JOIN core.users u ON u.id = t.creator_id`
func tagSortColumn(s string) string {
switch s {
case "name":
return "t.name"
case "color":
return "t.color"
case "category_name":
return "c.name"
default: // "created"
return "t.id"
}
}
// isPgUniqueViolation reports whether err is a PostgreSQL unique-constraint error.
func isPgUniqueViolation(err error) bool {
var pgErr *pgconn.PgError
return errors.As(err, &pgErr) && pgErr.Code == "23505"
}
// ---------------------------------------------------------------------------
// TagRepo — implements port.TagRepo
// ---------------------------------------------------------------------------
// TagRepo handles tag CRUD and filetag relations.
type TagRepo struct {
pool *pgxpool.Pool
}
var _ port.TagRepo = (*TagRepo)(nil)
// NewTagRepo creates a TagRepo backed by pool.
func NewTagRepo(pool *pgxpool.Pool) *TagRepo {
return &TagRepo{pool: pool}
}
// ---------------------------------------------------------------------------
// List / ListByCategory
// ---------------------------------------------------------------------------
func (r *TagRepo) List(ctx context.Context, params port.OffsetParams) (*domain.TagOffsetPage, error) {
return r.listTags(ctx, params, nil)
}
func (r *TagRepo) ListByCategory(ctx context.Context, categoryID uuid.UUID, params port.OffsetParams) (*domain.TagOffsetPage, error) {
return r.listTags(ctx, params, &categoryID)
}
func (r *TagRepo) listTags(ctx context.Context, params port.OffsetParams, categoryID *uuid.UUID) (*domain.TagOffsetPage, error) {
order := "ASC"
if strings.ToLower(params.Order) == "desc" {
order = "DESC"
}
sortCol := tagSortColumn(params.Sort)
args := []any{}
n := 1
var conditions []string
if params.Search != "" {
conditions = append(conditions, fmt.Sprintf("lower(t.name) LIKE lower($%d)", n))
args = append(args, "%"+params.Search+"%")
n++
}
if categoryID != nil {
conditions = append(conditions, fmt.Sprintf("t.category_id = $%d", n))
args = append(args, *categoryID)
n++
}
where := ""
if len(conditions) > 0 {
where = "WHERE " + strings.Join(conditions, " AND ")
}
limit := params.Limit
if limit <= 0 {
limit = 50
}
offset := params.Offset
if offset < 0 {
offset = 0
}
query := fmt.Sprintf(`
SELECT
t.id, t.name, t.notes, t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata, t.creator_id,
u.name AS creator_name,
t.is_public,
COUNT(*) OVER() AS total
FROM data.tags t
LEFT JOIN data.categories c ON c.id = t.category_id
JOIN core.users u ON u.id = t.creator_id
%s
ORDER BY %s %s NULLS LAST, t.id ASC
LIMIT $%d OFFSET $%d`, where, sortCol, order, n, n+1)
args = append(args, limit, offset)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("TagRepo.List query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[tagRowWithTotal])
if err != nil {
return nil, fmt.Errorf("TagRepo.List scan: %w", err)
}
items := make([]domain.Tag, len(collected))
total := 0
for i, row := range collected {
items[i] = toTag(row.tagRow)
total = row.Total
}
return &domain.TagOffsetPage{
Items: items,
Total: total,
Offset: offset,
Limit: limit,
}, nil
}
// ---------------------------------------------------------------------------
// GetByID
// ---------------------------------------------------------------------------
func (r *TagRepo) GetByID(ctx context.Context, id uuid.UUID) (*domain.Tag, error) {
const query = tagSelectFrom + `
WHERE t.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id)
if err != nil {
return nil, fmt.Errorf("TagRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[tagRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("TagRepo.GetByID scan: %w", err)
}
t := toTag(row)
return &t, nil
}
// ---------------------------------------------------------------------------
// Create
// ---------------------------------------------------------------------------
func (r *TagRepo) Create(ctx context.Context, t *domain.Tag) (*domain.Tag, error) {
const query = `
WITH ins AS (
INSERT INTO data.tags (name, notes, color, category_id, metadata, creator_id, is_public)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING *
)
SELECT
ins.id, ins.name, ins.notes, ins.color,
ins.category_id,
c.name AS category_name,
c.color AS category_color,
ins.metadata, ins.creator_id,
u.name AS creator_name,
ins.is_public
FROM ins
LEFT JOIN data.categories c ON c.id = ins.category_id
JOIN core.users u ON u.id = ins.creator_id`
var meta any
if len(t.Metadata) > 0 {
meta = t.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query,
t.Name, t.Notes, t.Color, t.CategoryID, meta, t.CreatorID, t.IsPublic)
if err != nil {
return nil, fmt.Errorf("TagRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[tagRow])
if err != nil {
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("TagRepo.Create scan: %w", err)
}
created := toTag(row)
return &created, nil
}
// ---------------------------------------------------------------------------
// Update
// ---------------------------------------------------------------------------
// Update replaces all mutable fields. The caller must merge current values with
// the patch (read-then-write) before calling this.
func (r *TagRepo) Update(ctx context.Context, id uuid.UUID, t *domain.Tag) (*domain.Tag, error) {
const query = `
WITH upd AS (
UPDATE data.tags SET
name = $2,
notes = $3,
color = $4,
category_id = $5,
metadata = COALESCE($6, metadata),
is_public = $7
WHERE id = $1
RETURNING *
)
SELECT
upd.id, upd.name, upd.notes, upd.color,
upd.category_id,
c.name AS category_name,
c.color AS category_color,
upd.metadata, upd.creator_id,
u.name AS creator_name,
upd.is_public
FROM upd
LEFT JOIN data.categories c ON c.id = upd.category_id
JOIN core.users u ON u.id = upd.creator_id`
var meta any
if len(t.Metadata) > 0 {
meta = t.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query,
id, t.Name, t.Notes, t.Color, t.CategoryID, meta, t.IsPublic)
if err != nil {
return nil, fmt.Errorf("TagRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[tagRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("TagRepo.Update scan: %w", err)
}
updated := toTag(row)
return &updated, nil
}
// ---------------------------------------------------------------------------
// Delete
// ---------------------------------------------------------------------------
func (r *TagRepo) Delete(ctx context.Context, id uuid.UUID) error {
const query = `DELETE FROM data.tags WHERE id = $1`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, query, id)
if err != nil {
return fmt.Errorf("TagRepo.Delete: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
// ---------------------------------------------------------------------------
// Filetag operations
// ---------------------------------------------------------------------------
func (r *TagRepo) ListByFile(ctx context.Context, fileID uuid.UUID) ([]domain.Tag, error) {
const query = tagSelectFrom + `
JOIN data.file_tag ft ON ft.tag_id = t.id
WHERE ft.file_id = $1
ORDER BY t.name`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, fileID)
if err != nil {
return nil, fmt.Errorf("TagRepo.ListByFile: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[tagRow])
if err != nil {
return nil, fmt.Errorf("TagRepo.ListByFile scan: %w", err)
}
tags := make([]domain.Tag, len(collected))
for i, row := range collected {
tags[i] = toTag(row)
}
return tags, nil
}
func (r *TagRepo) AddFileTag(ctx context.Context, fileID, tagID uuid.UUID) error {
const query = `
INSERT INTO data.file_tag (file_id, tag_id) VALUES ($1, $2)
ON CONFLICT DO NOTHING`
q := connOrTx(ctx, r.pool)
if _, err := q.Exec(ctx, query, fileID, tagID); err != nil {
return fmt.Errorf("TagRepo.AddFileTag: %w", err)
}
return nil
}
func (r *TagRepo) RemoveFileTag(ctx context.Context, fileID, tagID uuid.UUID) error {
const query = `DELETE FROM data.file_tag WHERE file_id = $1 AND tag_id = $2`
q := connOrTx(ctx, r.pool)
if _, err := q.Exec(ctx, query, fileID, tagID); err != nil {
return fmt.Errorf("TagRepo.RemoveFileTag: %w", err)
}
return nil
}
func (r *TagRepo) SetFileTags(ctx context.Context, fileID uuid.UUID, tagIDs []uuid.UUID) error {
q := connOrTx(ctx, r.pool)
if _, err := q.Exec(ctx,
`DELETE FROM data.file_tag WHERE file_id = $1`, fileID); err != nil {
return fmt.Errorf("TagRepo.SetFileTags delete: %w", err)
}
if len(tagIDs) == 0 {
return nil
}
placeholders := make([]string, len(tagIDs))
args := []any{fileID}
for i, tagID := range tagIDs {
placeholders[i] = fmt.Sprintf("($1, $%d)", i+2)
args = append(args, tagID)
}
ins := `INSERT INTO data.file_tag (file_id, tag_id) VALUES ` +
strings.Join(placeholders, ", ") + ` ON CONFLICT DO NOTHING`
if _, err := q.Exec(ctx, ins, args...); err != nil {
return fmt.Errorf("TagRepo.SetFileTags insert: %w", err)
}
return nil
}
func (r *TagRepo) CommonTagsForFiles(ctx context.Context, fileIDs []uuid.UUID) ([]domain.Tag, error) {
if len(fileIDs) == 0 {
return []domain.Tag{}, nil
}
return r.queryTagsByPresence(ctx, fileIDs, "=")
}
func (r *TagRepo) PartialTagsForFiles(ctx context.Context, fileIDs []uuid.UUID) ([]domain.Tag, error) {
if len(fileIDs) == 0 {
return []domain.Tag{}, nil
}
return r.queryTagsByPresence(ctx, fileIDs, "<")
}
func (r *TagRepo) queryTagsByPresence(ctx context.Context, fileIDs []uuid.UUID, op string) ([]domain.Tag, error) {
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs)+1)
for i, id := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+1)
args[i] = id
}
args[len(fileIDs)] = len(fileIDs)
n := len(fileIDs) + 1
query := fmt.Sprintf(`
SELECT
t.id, t.name, t.notes, t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata, t.creator_id,
u.name AS creator_name,
t.is_public
FROM data.tags t
JOIN data.file_tag ft ON ft.tag_id = t.id
LEFT JOIN data.categories c ON c.id = t.category_id
JOIN core.users u ON u.id = t.creator_id
WHERE ft.file_id IN (%s)
GROUP BY t.id, c.id, u.id
HAVING COUNT(DISTINCT ft.file_id) %s $%d
ORDER BY t.name`,
strings.Join(placeholders, ", "), op, n)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("TagRepo.queryTagsByPresence: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[tagRow])
if err != nil {
return nil, fmt.Errorf("TagRepo.queryTagsByPresence scan: %w", err)
}
tags := make([]domain.Tag, len(collected))
for i, row := range collected {
tags[i] = toTag(row)
}
return tags, nil
}
// ---------------------------------------------------------------------------
// TagRuleRepo — implements port.TagRuleRepo (separate type to avoid method collision)
// ---------------------------------------------------------------------------
// TagRuleRepo handles tag-rule CRUD.
type TagRuleRepo struct {
pool *pgxpool.Pool
}
var _ port.TagRuleRepo = (*TagRuleRepo)(nil)
// NewTagRuleRepo creates a TagRuleRepo backed by pool.
func NewTagRuleRepo(pool *pgxpool.Pool) *TagRuleRepo {
return &TagRuleRepo{pool: pool}
}
func (r *TagRuleRepo) ListByTag(ctx context.Context, tagID uuid.UUID) ([]domain.TagRule, error) {
const query = `
SELECT
tr.when_tag_id,
tr.then_tag_id,
t.name AS then_tag_name,
tr.is_active
FROM data.tag_rules tr
JOIN data.tags t ON t.id = tr.then_tag_id
WHERE tr.when_tag_id = $1
ORDER BY t.name`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, tagID)
if err != nil {
return nil, fmt.Errorf("TagRuleRepo.ListByTag: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[tagRuleRow])
if err != nil {
return nil, fmt.Errorf("TagRuleRepo.ListByTag scan: %w", err)
}
rules := make([]domain.TagRule, len(collected))
for i, row := range collected {
rules[i] = toTagRule(row)
}
return rules, nil
}
func (r *TagRuleRepo) Create(ctx context.Context, rule domain.TagRule) (*domain.TagRule, error) {
const query = `
WITH ins AS (
INSERT INTO data.tag_rules (when_tag_id, then_tag_id, is_active)
VALUES ($1, $2, $3)
RETURNING *
)
SELECT ins.when_tag_id, ins.then_tag_id, t.name AS then_tag_name, ins.is_active
FROM ins
JOIN data.tags t ON t.id = ins.then_tag_id`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, rule.WhenTagID, rule.ThenTagID, rule.IsActive)
if err != nil {
return nil, fmt.Errorf("TagRuleRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[tagRuleRow])
if err != nil {
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("TagRuleRepo.Create scan: %w", err)
}
result := toTagRule(row)
return &result, nil
}
func (r *TagRuleRepo) SetActive(ctx context.Context, whenTagID, thenTagID uuid.UUID, active, applyToExisting bool) error {
const updateQuery = `
UPDATE data.tag_rules SET is_active = $3
WHERE when_tag_id = $1 AND then_tag_id = $2`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, updateQuery, whenTagID, thenTagID, active)
if err != nil {
return fmt.Errorf("TagRuleRepo.SetActive: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
if !active || !applyToExisting {
return nil
}
// Retroactively apply the full transitive expansion of thenTagID to all
// files that already carry whenTagID. The recursive CTE walks active rules
// starting from thenTagID (mirrors the Go expandTagSet BFS).
const retroQuery = `
WITH RECURSIVE expansion(tag_id) AS (
SELECT $2::uuid
UNION
SELECT r.then_tag_id
FROM data.tag_rules r
JOIN expansion e ON r.when_tag_id = e.tag_id
WHERE r.is_active = true
)
INSERT INTO data.file_tag (file_id, tag_id)
SELECT ft.file_id, e.tag_id
FROM data.file_tag ft
CROSS JOIN expansion e
WHERE ft.tag_id = $1
ON CONFLICT DO NOTHING`
if _, err := q.Exec(ctx, retroQuery, whenTagID, thenTagID); err != nil {
return fmt.Errorf("TagRuleRepo.SetActive retroactive apply: %w", err)
}
return nil
}
func (r *TagRuleRepo) Delete(ctx context.Context, whenTagID, thenTagID uuid.UUID) error {
const query = `
DELETE FROM data.tag_rules
WHERE when_tag_id = $1 AND then_tag_id = $2`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, query, whenTagID, thenTagID)
if err != nil {
return fmt.Errorf("TagRuleRepo.Delete: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}

View File

@ -0,0 +1,196 @@
package postgres
import (
"context"
"errors"
"fmt"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// userRow matches the columns returned by every user SELECT.
type userRow struct {
ID int16 `db:"id"`
Name string `db:"name"`
Password string `db:"password"`
IsAdmin bool `db:"is_admin"`
CanCreate bool `db:"can_create"`
IsBlocked bool `db:"is_blocked"`
}
// userRowWithTotal extends userRow with a window-function total for List.
type userRowWithTotal struct {
userRow
Total int `db:"total"`
}
func toUser(r userRow) domain.User {
return domain.User{
ID: r.ID,
Name: r.Name,
Password: r.Password,
IsAdmin: r.IsAdmin,
CanCreate: r.CanCreate,
IsBlocked: r.IsBlocked,
}
}
// userSortColumn whitelists valid sort keys to prevent SQL injection.
var userSortColumn = map[string]string{
"name": "name",
"id": "id",
}
// UserRepo implements port.UserRepo using PostgreSQL.
type UserRepo struct {
pool *pgxpool.Pool
}
// NewUserRepo creates a UserRepo backed by pool.
func NewUserRepo(pool *pgxpool.Pool) *UserRepo {
return &UserRepo{pool: pool}
}
var _ port.UserRepo = (*UserRepo)(nil)
func (r *UserRepo) List(ctx context.Context, params port.OffsetParams) (*domain.UserPage, error) {
col, ok := userSortColumn[params.Sort]
if !ok {
col = "id"
}
ord := "ASC"
if params.Order == "desc" {
ord = "DESC"
}
limit := db.ClampLimit(params.Limit, 50, 200)
offset := db.ClampOffset(params.Offset)
sql := fmt.Sprintf(`
SELECT id, name, password, is_admin, can_create, is_blocked,
COUNT(*) OVER() AS total
FROM core.users
ORDER BY %s %s
LIMIT $1 OFFSET $2`, col, ord)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, limit, offset)
if err != nil {
return nil, fmt.Errorf("UserRepo.List: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[userRowWithTotal])
if err != nil {
return nil, fmt.Errorf("UserRepo.List scan: %w", err)
}
page := &domain.UserPage{Offset: offset, Limit: limit}
if len(collected) > 0 {
page.Total = collected[0].Total
}
page.Items = make([]domain.User, len(collected))
for i, row := range collected {
page.Items[i] = toUser(row.userRow)
}
return page, nil
}
func (r *UserRepo) GetByID(ctx context.Context, id int16) (*domain.User, error) {
const sql = `
SELECT id, name, password, is_admin, can_create, is_blocked
FROM core.users WHERE id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, id)
if err != nil {
return nil, fmt.Errorf("UserRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[userRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("UserRepo.GetByID scan: %w", err)
}
u := toUser(row)
return &u, nil
}
func (r *UserRepo) GetByName(ctx context.Context, name string) (*domain.User, error) {
const sql = `
SELECT id, name, password, is_admin, can_create, is_blocked
FROM core.users WHERE name = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, name)
if err != nil {
return nil, fmt.Errorf("UserRepo.GetByName: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[userRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("UserRepo.GetByName scan: %w", err)
}
u := toUser(row)
return &u, nil
}
func (r *UserRepo) Create(ctx context.Context, u *domain.User) (*domain.User, error) {
const sql = `
INSERT INTO core.users (name, password, is_admin, can_create)
VALUES ($1, $2, $3, $4)
RETURNING id, name, password, is_admin, can_create, is_blocked`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, u.Name, u.Password, u.IsAdmin, u.CanCreate)
if err != nil {
return nil, fmt.Errorf("UserRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[userRow])
if err != nil {
return nil, fmt.Errorf("UserRepo.Create scan: %w", err)
}
created := toUser(row)
return &created, nil
}
func (r *UserRepo) Update(ctx context.Context, id int16, u *domain.User) (*domain.User, error) {
const sql = `
UPDATE core.users
SET name = $2, password = $3, is_admin = $4, can_create = $5, is_blocked = $6
WHERE id = $1
RETURNING id, name, password, is_admin, can_create, is_blocked`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sql, id, u.Name, u.Password, u.IsAdmin, u.CanCreate, u.IsBlocked)
if err != nil {
return nil, fmt.Errorf("UserRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[userRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("UserRepo.Update scan: %w", err)
}
updated := toUser(row)
return &updated, nil
}
func (r *UserRepo) Delete(ctx context.Context, id int16) error {
const sql = `DELETE FROM core.users WHERE id = $1`
q := connOrTx(ctx, r.pool)
tag, err := q.Exec(ctx, sql, id)
if err != nil {
return fmt.Errorf("UserRepo.Delete: %w", err)
}
if tag.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}

View File

@ -0,0 +1,19 @@
package domain
import "github.com/google/uuid"
// ObjectType is a reference entity (file, tag, category, pool).
type ObjectType struct {
ID int16
Name string
}
// Permission represents a per-object access entry for a user.
type Permission struct {
UserID int16
UserName string // denormalized
ObjectTypeID int16
ObjectID uuid.UUID
CanView bool
CanEdit bool
}

View File

@ -0,0 +1,46 @@
package domain
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// ActionType is a reference entity for auditable user actions.
type ActionType struct {
ID int16
Name string
}
// AuditEntry is a single audit log record.
type AuditEntry struct {
ID int64
UserID int16
UserName string // denormalized
Action string // action type name, e.g. "file_create"
ObjectType *string
ObjectID *uuid.UUID
Details json.RawMessage
PerformedAt time.Time
}
// AuditPage is an offset-based page of audit log entries.
type AuditPage struct {
Items []AuditEntry
Total int
Offset int
Limit int
}
// AuditFilter holds filter parameters for querying the audit log.
type AuditFilter struct {
UserID *int16
Action string
ObjectType string
ObjectID *uuid.UUID
From *time.Time
To *time.Time
Offset int
Limit int
}

View File

@ -0,0 +1,29 @@
package domain
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// Category is a logical grouping of tags.
type Category struct {
ID uuid.UUID
Name string
Notes *string
Color *string // 6-char hex
Metadata json.RawMessage
CreatorID int16
CreatorName string // denormalized
IsPublic bool
CreatedAt time.Time // extracted from UUID v7 via UUIDCreatedAt
}
// CategoryOffsetPage is an offset-based page of categories.
type CategoryOffsetPage struct {
Items []Category
Total int
Offset int
Limit int
}

View File

@ -0,0 +1,32 @@
package domain
import "context"
type ctxKey int
const userKey ctxKey = iota
type contextUser struct {
ID int16
IsAdmin bool
SessionID int
}
// WithUser stores user identity and current session ID in ctx.
func WithUser(ctx context.Context, userID int16, isAdmin bool, sessionID int) context.Context {
return context.WithValue(ctx, userKey, contextUser{
ID: userID,
IsAdmin: isAdmin,
SessionID: sessionID,
})
}
// UserFromContext retrieves user identity from ctx.
// Returns zero values if no user is stored.
func UserFromContext(ctx context.Context) (userID int16, isAdmin bool, sessionID int) {
u, ok := ctx.Value(userKey).(contextUser)
if !ok {
return 0, false, 0
}
return u.ID, u.IsAdmin, u.SessionID
}

View File

@ -1,65 +1,21 @@
package domain package domain
import "fmt" // DomainError is a typed domain error with a stable machine-readable code.
// Handlers map these codes to HTTP status codes.
type ErrorCode string
const (
// File errors
ErrCodeFileNotFound ErrorCode = "FILE_NOT_FOUND"
ErrCodeMIMENotSupported ErrorCode = "MIME_NOT_SUPPORTED"
// Tag errors
ErrCodeTagNotFound ErrorCode = "TAG_NOT_FOUND"
// General errors
ErrCodeBadRequest ErrorCode = "BAD_REQUEST"
ErrCodeInternal ErrorCode = "INTERNAL_SERVER_ERROR"
)
type DomainError struct { type DomainError struct {
Err error `json:"-"` code string
Code ErrorCode `json:"code"` message string
Message string `json:"message"`
Details []any `json:"-"`
} }
func (e *DomainError) Wrap(err error) *DomainError { func (e *DomainError) Error() string { return e.message }
e.Err = err func (e *DomainError) Code() string { return e.code }
return e
}
func NewErrorFileNotFound(file_id string) *DomainError { // Sentinel domain errors. Use errors.Is(err, domain.ErrNotFound) for matching.
return &DomainError{ var (
Code: ErrCodeFileNotFound, ErrNotFound = &DomainError{"not_found", "not found"}
Message: fmt.Sprintf("File not found: %q", file_id), ErrForbidden = &DomainError{"forbidden", "forbidden"}
} ErrUnauthorized = &DomainError{"unauthorized", "unauthorized"}
} ErrConflict = &DomainError{"conflict", "conflict"}
ErrValidation = &DomainError{"validation_error", "validation error"}
func NewErrorMIMENotSupported(mime string) *DomainError { ErrUnsupportedMIME = &DomainError{"unsupported_mime", "unsupported MIME type"}
return &DomainError{ )
Code: ErrCodeMIMENotSupported,
Message: fmt.Sprintf("MIME not supported: %q", mime),
}
}
func NewErrorTagNotFound(tag_id string) *DomainError {
return &DomainError{
Code: ErrCodeTagNotFound,
Message: fmt.Sprintf("Tag not found: %q", tag_id),
}
}
func NewErrorBadRequest(message string) *DomainError {
return &DomainError{
Code: ErrCodeBadRequest,
Message: message,
}
}
func NewErrorUnexpected() *DomainError {
return &DomainError{
Code: ErrCodeInternal,
Message: "An unexpected error occured",
}
}

View File

@ -0,0 +1,67 @@
package domain
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// MIMEType holds MIME whitelist data.
type MIMEType struct {
ID int16
Name string
Extension string
}
// File represents a managed file record.
type File struct {
ID uuid.UUID
OriginalName *string
MIMEType string // denormalized from core.mime_types
MIMEExtension string // denormalized from core.mime_types
ContentDatetime time.Time
Notes *string
Metadata json.RawMessage
EXIF json.RawMessage
PHash *int64
CreatorID int16
CreatorName string // denormalized from core.users
IsPublic bool
IsDeleted bool
CreatedAt time.Time // extracted from UUID v7 via UUIDCreatedAt
Tags []Tag // loaded with the file
}
// FileListParams holds all parameters for listing/filtering files.
type FileListParams struct {
// Pagination
Cursor string
Direction string // "forward" or "backward"
Anchor *uuid.UUID
Limit int
// Sorting
Sort string // "content_datetime" | "created" | "original_name" | "mime"
Order string // "asc" | "desc"
// Filtering
Filter string // filter DSL expression
Search string // substring match on original_name
Trash bool // if true, return only soft-deleted files
}
// FilePage is the result of a cursor-based file listing.
type FilePage struct {
Items []File
NextCursor *string
PrevCursor *string
}
// UUIDCreatedAt extracts the creation timestamp embedded in a UUID v7.
// UUID v7 stores Unix milliseconds in the most-significant 48 bits.
func UUIDCreatedAt(id uuid.UUID) time.Time {
ms := int64(id[0])<<40 | int64(id[1])<<32 | int64(id[2])<<24 |
int64(id[3])<<16 | int64(id[4])<<8 | int64(id[5])
return time.Unix(ms/1000, (ms%1000)*int64(time.Millisecond)).UTC()
}

View File

@ -0,0 +1,41 @@
package domain
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// Pool is an ordered collection of files.
type Pool struct {
ID uuid.UUID
Name string
Notes *string
Metadata json.RawMessage
CreatorID int16
CreatorName string // denormalized
IsPublic bool
FileCount int
CreatedAt time.Time // extracted from UUID v7 via UUIDCreatedAt
}
// PoolFile is a File with its ordering position within a pool.
type PoolFile struct {
File
Position int
}
// PoolFilePage is the result of a cursor-based pool file listing.
type PoolFilePage struct {
Items []PoolFile
NextCursor *string
}
// PoolOffsetPage is an offset-based page of pools.
type PoolOffsetPage struct {
Items []Pool
Total int
Offset int
Limit int
}

View File

@ -1,17 +0,0 @@
package domain
import (
"context"
"encoding/json"
"time"
)
type FileRepository interface {
GetAccess(ctx context.Context, user_id int, file_id string) (canView, canEdit bool, domainErr *DomainError)
GetSlice(ctx context.Context, user_id int, filter, sort string, limit, offset int) (files Slice[FileItem], domainErr *DomainError)
Get(ctx context.Context, user_id int, file_id string) (file FileFull, domainErr *DomainError)
Add(ctx context.Context, user_id int, name, mime string, datetime time.Time, notes string, metadata json.RawMessage) (file FileCore, domainErr *DomainError)
Update(ctx context.Context, file_id string, updates map[string]interface{}) (domainErr *DomainError)
Delete(ctx context.Context, file_id string) (domainErr *DomainError)
GetTags(ctx context.Context, user_id int, file_id string) (tags []TagItem, domainErr *DomainError)
}

View File

@ -0,0 +1,41 @@
package domain
import (
"encoding/json"
"time"
"github.com/google/uuid"
)
// Tag represents a file label.
type Tag struct {
ID uuid.UUID
Name string
Notes *string
Color *string // 6-char hex, e.g. "5DCAA5"
CategoryID *uuid.UUID
CategoryName *string // denormalized
CategoryColor *string // denormalized
Metadata json.RawMessage
CreatorID int16
CreatorName string // denormalized
IsPublic bool
CreatedAt time.Time // extracted from UUID v7 via UUIDCreatedAt
}
// TagRule defines an auto-tagging rule: when WhenTagID is applied,
// ThenTagID is automatically applied as well.
type TagRule struct {
WhenTagID uuid.UUID
ThenTagID uuid.UUID
ThenTagName string // denormalized
IsActive bool
}
// TagOffsetPage is an offset-based page of tags.
type TagOffsetPage struct {
Items []Tag
Total int
Offset int
Limit int
}

View File

@ -0,0 +1,39 @@
package domain
import "time"
// User is an application user.
type User struct {
ID int16
Name string
Password string // bcrypt hash; only populated when needed for auth
IsAdmin bool
CanCreate bool
IsBlocked bool
}
// Session is an active user session.
type Session struct {
ID int
TokenHash string
UserID int16
UserAgent string
StartedAt time.Time
ExpiresAt *time.Time
LastActivity time.Time
IsCurrent bool // true when this session matches the caller's token
}
// UserPage is an offset-based page of users.
type UserPage struct {
Items []User
Total int
Offset int
Limit int
}
// SessionList is a list of sessions with a total count.
type SessionList struct {
Items []Session
Total int
}

View File

@ -0,0 +1,144 @@
package handler
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// objectTypeIDs maps the URL segment to the object_type PK in core.object_types.
// Row order matches 007_seed_data.sql: file=1, tag=2, category=3, pool=4.
var objectTypeIDs = map[string]int16{
"file": 1,
"tag": 2,
"category": 3,
"pool": 4,
}
// ACLHandler handles GET/PUT /acl/:object_type/:object_id.
type ACLHandler struct {
aclSvc *service.ACLService
}
// NewACLHandler creates an ACLHandler.
func NewACLHandler(aclSvc *service.ACLService) *ACLHandler {
return &ACLHandler{aclSvc: aclSvc}
}
// ---------------------------------------------------------------------------
// Response type
// ---------------------------------------------------------------------------
type permissionJSON struct {
UserID int16 `json:"user_id"`
UserName string `json:"user_name"`
CanView bool `json:"can_view"`
CanEdit bool `json:"can_edit"`
}
func toPermissionJSON(p domain.Permission) permissionJSON {
return permissionJSON{
UserID: p.UserID,
UserName: p.UserName,
CanView: p.CanView,
CanEdit: p.CanEdit,
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parseACLPath(c *gin.Context) (objectTypeID int16, objectID uuid.UUID, ok bool) {
typeStr := c.Param("object_type")
id, exists := objectTypeIDs[typeStr]
if !exists {
respondError(c, domain.ErrValidation)
return 0, uuid.UUID{}, false
}
objectID, err := uuid.Parse(c.Param("object_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return 0, uuid.UUID{}, false
}
return id, objectID, true
}
// ---------------------------------------------------------------------------
// GET /acl/:object_type/:object_id
// ---------------------------------------------------------------------------
func (h *ACLHandler) GetPermissions(c *gin.Context) {
objectTypeID, objectID, ok := parseACLPath(c)
if !ok {
return
}
perms, err := h.aclSvc.GetPermissions(c.Request.Context(), objectTypeID, objectID)
if err != nil {
respondError(c, err)
return
}
out := make([]permissionJSON, len(perms))
for i, p := range perms {
out[i] = toPermissionJSON(p)
}
respondJSON(c, http.StatusOK, out)
}
// ---------------------------------------------------------------------------
// PUT /acl/:object_type/:object_id
// ---------------------------------------------------------------------------
func (h *ACLHandler) SetPermissions(c *gin.Context) {
objectTypeID, objectID, ok := parseACLPath(c)
if !ok {
return
}
var body struct {
Permissions []struct {
UserID int16 `json:"user_id" binding:"required"`
CanView bool `json:"can_view"`
CanEdit bool `json:"can_edit"`
} `json:"permissions" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
perms := make([]domain.Permission, len(body.Permissions))
for i, p := range body.Permissions {
perms[i] = domain.Permission{
UserID: p.UserID,
CanView: p.CanView,
CanEdit: p.CanEdit,
}
}
if err := h.aclSvc.SetPermissions(c.Request.Context(), objectTypeID, objectID, perms); err != nil {
respondError(c, err)
return
}
// Re-read to return the stored permissions (with UserName denormalized).
stored, err := h.aclSvc.GetPermissions(c.Request.Context(), objectTypeID, objectID)
if err != nil {
respondError(c, err)
return
}
out := make([]permissionJSON, len(stored))
for i, p := range stored {
out[i] = toPermissionJSON(p)
}
respondJSON(c, http.StatusOK, out)
}

View File

@ -0,0 +1,120 @@
package handler
import (
"net/http"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// AuditHandler handles GET /audit.
type AuditHandler struct {
auditSvc *service.AuditService
}
// NewAuditHandler creates an AuditHandler.
func NewAuditHandler(auditSvc *service.AuditService) *AuditHandler {
return &AuditHandler{auditSvc: auditSvc}
}
// ---------------------------------------------------------------------------
// Response type
// ---------------------------------------------------------------------------
type auditEntryJSON struct {
ID int64 `json:"id"`
UserID int16 `json:"user_id"`
UserName string `json:"user_name"`
Action string `json:"action"`
ObjectType *string `json:"object_type"`
ObjectID *string `json:"object_id"`
PerformedAt string `json:"performed_at"`
}
func toAuditEntryJSON(e domain.AuditEntry) auditEntryJSON {
j := auditEntryJSON{
ID: e.ID,
UserID: e.UserID,
UserName: e.UserName,
Action: e.Action,
ObjectType: e.ObjectType,
PerformedAt: e.PerformedAt.UTC().Format(time.RFC3339),
}
if e.ObjectID != nil {
s := e.ObjectID.String()
j.ObjectID = &s
}
return j
}
// ---------------------------------------------------------------------------
// GET /audit (admin)
// ---------------------------------------------------------------------------
func (h *AuditHandler) List(c *gin.Context) {
if !requireAdmin(c) {
return
}
filter := domain.AuditFilter{}
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil {
filter.Limit = n
}
}
if s := c.Query("offset"); s != "" {
if n, err := strconv.Atoi(s); err == nil {
filter.Offset = n
}
}
if s := c.Query("user_id"); s != "" {
if n, err := strconv.ParseInt(s, 10, 16); err == nil {
id := int16(n)
filter.UserID = &id
}
}
if s := c.Query("action"); s != "" {
filter.Action = s
}
if s := c.Query("object_type"); s != "" {
filter.ObjectType = s
}
if s := c.Query("object_id"); s != "" {
if id, err := uuid.Parse(s); err == nil {
filter.ObjectID = &id
}
}
if s := c.Query("from"); s != "" {
if t, err := time.Parse(time.RFC3339, s); err == nil {
filter.From = &t
}
}
if s := c.Query("to"); s != "" {
if t, err := time.Parse(time.RFC3339, s); err == nil {
filter.To = &t
}
}
page, err := h.auditSvc.Query(c.Request.Context(), filter)
if err != nil {
respondError(c, err)
return
}
items := make([]auditEntryJSON, len(page.Items))
for i, e := range page.Items {
items[i] = toAuditEntryJSON(e)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}

View File

@ -0,0 +1,140 @@
package handler
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// AuthHandler handles all /auth endpoints.
type AuthHandler struct {
authSvc *service.AuthService
}
// NewAuthHandler creates an AuthHandler backed by authSvc.
func NewAuthHandler(authSvc *service.AuthService) *AuthHandler {
return &AuthHandler{authSvc: authSvc}
}
// Login handles POST /auth/login.
func (h *AuthHandler) Login(c *gin.Context) {
var req struct {
Name string `json:"name" binding:"required"`
Password string `json:"password" binding:"required"`
}
if err := c.ShouldBindJSON(&req); err != nil {
respondError(c, domain.ErrValidation)
return
}
pair, err := h.authSvc.Login(c.Request.Context(), req.Name, req.Password, c.GetHeader("User-Agent"))
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, gin.H{
"access_token": pair.AccessToken,
"refresh_token": pair.RefreshToken,
"expires_in": pair.ExpiresIn,
})
}
// Refresh handles POST /auth/refresh.
func (h *AuthHandler) Refresh(c *gin.Context) {
var req struct {
RefreshToken string `json:"refresh_token" binding:"required"`
}
if err := c.ShouldBindJSON(&req); err != nil {
respondError(c, domain.ErrValidation)
return
}
pair, err := h.authSvc.Refresh(c.Request.Context(), req.RefreshToken, c.GetHeader("User-Agent"))
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, gin.H{
"access_token": pair.AccessToken,
"refresh_token": pair.RefreshToken,
"expires_in": pair.ExpiresIn,
})
}
// Logout handles POST /auth/logout. Requires authentication.
func (h *AuthHandler) Logout(c *gin.Context) {
_, _, sessionID := domain.UserFromContext(c.Request.Context())
if err := h.authSvc.Logout(c.Request.Context(), sessionID); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ListSessions handles GET /auth/sessions. Requires authentication.
func (h *AuthHandler) ListSessions(c *gin.Context) {
userID, _, sessionID := domain.UserFromContext(c.Request.Context())
list, err := h.authSvc.ListSessions(c.Request.Context(), userID, sessionID)
if err != nil {
respondError(c, err)
return
}
type sessionItem struct {
ID int `json:"id"`
UserAgent string `json:"user_agent"`
StartedAt string `json:"started_at"`
ExpiresAt any `json:"expires_at"`
LastActivity string `json:"last_activity"`
IsCurrent bool `json:"is_current"`
}
items := make([]sessionItem, len(list.Items))
for i, s := range list.Items {
var expiresAt any
if s.ExpiresAt != nil {
expiresAt = s.ExpiresAt.Format("2006-01-02T15:04:05Z07:00")
}
items[i] = sessionItem{
ID: s.ID,
UserAgent: s.UserAgent,
StartedAt: s.StartedAt.Format("2006-01-02T15:04:05Z07:00"),
ExpiresAt: expiresAt,
LastActivity: s.LastActivity.Format("2006-01-02T15:04:05Z07:00"),
IsCurrent: s.IsCurrent,
}
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": list.Total,
})
}
// TerminateSession handles DELETE /auth/sessions/:id. Requires authentication.
func (h *AuthHandler) TerminateSession(c *gin.Context) {
idStr := c.Param("id")
id, err := strconv.Atoi(idStr)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
userID, isAdmin, _ := domain.UserFromContext(c.Request.Context())
if err := h.authSvc.TerminateSession(c.Request.Context(), userID, isAdmin, id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}

View File

@ -0,0 +1,235 @@
package handler
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// CategoryHandler handles all /categories endpoints.
type CategoryHandler struct {
categorySvc *service.CategoryService
}
// NewCategoryHandler creates a CategoryHandler.
func NewCategoryHandler(categorySvc *service.CategoryService) *CategoryHandler {
return &CategoryHandler{categorySvc: categorySvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type categoryJSON struct {
ID string `json:"id"`
Name string `json:"name"`
Notes *string `json:"notes"`
Color *string `json:"color"`
CreatorID int16 `json:"creator_id"`
CreatorName string `json:"creator_name"`
IsPublic bool `json:"is_public"`
CreatedAt string `json:"created_at"`
}
func toCategoryJSON(c domain.Category) categoryJSON {
return categoryJSON{
ID: c.ID.String(),
Name: c.Name,
Notes: c.Notes,
Color: c.Color,
CreatorID: c.CreatorID,
CreatorName: c.CreatorName,
IsPublic: c.IsPublic,
CreatedAt: c.CreatedAt.UTC().Format("2006-01-02T15:04:05Z"),
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parseCategoryID(c *gin.Context) (uuid.UUID, bool) {
id, err := uuid.Parse(c.Param("category_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return uuid.UUID{}, false
}
return id, true
}
// ---------------------------------------------------------------------------
// GET /categories
// ---------------------------------------------------------------------------
func (h *CategoryHandler) List(c *gin.Context) {
params := parseOffsetParams(c, "created")
page, err := h.categorySvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]categoryJSON, len(page.Items))
for i, cat := range page.Items {
items[i] = toCategoryJSON(cat)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}
// ---------------------------------------------------------------------------
// POST /categories
// ---------------------------------------------------------------------------
func (h *CategoryHandler) Create(c *gin.Context) {
var body struct {
Name string `json:"name" binding:"required"`
Notes *string `json:"notes"`
Color *string `json:"color"`
IsPublic *bool `json:"is_public"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
created, err := h.categorySvc.Create(c.Request.Context(), service.CategoryParams{
Name: body.Name,
Notes: body.Notes,
Color: body.Color,
IsPublic: body.IsPublic,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toCategoryJSON(*created))
}
// ---------------------------------------------------------------------------
// GET /categories/:category_id
// ---------------------------------------------------------------------------
func (h *CategoryHandler) Get(c *gin.Context) {
id, ok := parseCategoryID(c)
if !ok {
return
}
cat, err := h.categorySvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toCategoryJSON(*cat))
}
// ---------------------------------------------------------------------------
// PATCH /categories/:category_id
// ---------------------------------------------------------------------------
func (h *CategoryHandler) Update(c *gin.Context) {
id, ok := parseCategoryID(c)
if !ok {
return
}
// Use a raw map to detect explicitly-null fields.
var raw map[string]any
if err := c.ShouldBindJSON(&raw); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.CategoryParams{}
if v, ok := raw["name"]; ok {
if s, ok := v.(string); ok {
params.Name = s
}
}
if _, ok := raw["notes"]; ok {
if raw["notes"] == nil {
empty := ""
params.Notes = &empty
} else if s, ok := raw["notes"].(string); ok {
params.Notes = &s
}
}
if _, ok := raw["color"]; ok {
if raw["color"] == nil {
empty := ""
params.Color = &empty
} else if s, ok := raw["color"].(string); ok {
params.Color = &s
}
}
if v, ok := raw["is_public"]; ok {
if b, ok := v.(bool); ok {
params.IsPublic = &b
}
}
updated, err := h.categorySvc.Update(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toCategoryJSON(*updated))
}
// ---------------------------------------------------------------------------
// DELETE /categories/:category_id
// ---------------------------------------------------------------------------
func (h *CategoryHandler) Delete(c *gin.Context) {
id, ok := parseCategoryID(c)
if !ok {
return
}
if err := h.categorySvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// GET /categories/:category_id/tags
// ---------------------------------------------------------------------------
func (h *CategoryHandler) ListTags(c *gin.Context) {
id, ok := parseCategoryID(c)
if !ok {
return
}
params := parseOffsetParams(c, "created")
page, err := h.categorySvc.ListTags(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
items := make([]tagJSON, len(page.Items))
for i, t := range page.Items {
items[i] = toTagJSON(t)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}

View File

@ -0,0 +1,645 @@
package handler
import (
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/gabriel-vasile/mimetype"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// FileHandler handles all /files endpoints.
type FileHandler struct {
fileSvc *service.FileService
tagSvc *service.TagService
}
// NewFileHandler creates a FileHandler.
func NewFileHandler(fileSvc *service.FileService, tagSvc *service.TagService) *FileHandler {
return &FileHandler{fileSvc: fileSvc, tagSvc: tagSvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type tagJSON struct {
ID string `json:"id"`
Name string `json:"name"`
Notes *string `json:"notes"`
Color *string `json:"color"`
CategoryID *string `json:"category_id"`
CategoryName *string `json:"category_name"`
CategoryColor *string `json:"category_color"`
CreatorID int16 `json:"creator_id"`
CreatorName string `json:"creator_name"`
IsPublic bool `json:"is_public"`
CreatedAt string `json:"created_at"`
}
type fileJSON struct {
ID string `json:"id"`
OriginalName *string `json:"original_name"`
MIMEType string `json:"mime_type"`
MIMEExtension string `json:"mime_extension"`
ContentDatetime string `json:"content_datetime"`
Notes *string `json:"notes"`
Metadata json.RawMessage `json:"metadata"`
EXIF json.RawMessage `json:"exif"`
PHash *int64 `json:"phash"`
CreatorID int16 `json:"creator_id"`
CreatorName string `json:"creator_name"`
IsPublic bool `json:"is_public"`
IsDeleted bool `json:"is_deleted"`
CreatedAt string `json:"created_at"`
Tags []tagJSON `json:"tags"`
}
func toTagJSON(t domain.Tag) tagJSON {
j := tagJSON{
ID: t.ID.String(),
Name: t.Name,
Notes: t.Notes,
Color: t.Color,
CategoryName: t.CategoryName,
CategoryColor: t.CategoryColor,
CreatorID: t.CreatorID,
CreatorName: t.CreatorName,
IsPublic: t.IsPublic,
CreatedAt: t.CreatedAt.Format(time.RFC3339),
}
if t.CategoryID != nil {
s := t.CategoryID.String()
j.CategoryID = &s
}
return j
}
func toFileJSON(f domain.File) fileJSON {
tags := make([]tagJSON, len(f.Tags))
for i, t := range f.Tags {
tags[i] = toTagJSON(t)
}
exif := f.EXIF
if exif == nil {
exif = json.RawMessage("{}")
}
return fileJSON{
ID: f.ID.String(),
OriginalName: f.OriginalName,
MIMEType: f.MIMEType,
MIMEExtension: f.MIMEExtension,
ContentDatetime: f.ContentDatetime.Format(time.RFC3339),
Notes: f.Notes,
Metadata: f.Metadata,
EXIF: exif,
PHash: f.PHash,
CreatorID: f.CreatorID,
CreatorName: f.CreatorName,
IsPublic: f.IsPublic,
IsDeleted: f.IsDeleted,
CreatedAt: f.CreatedAt.Format(time.RFC3339),
Tags: tags,
}
}
// ---------------------------------------------------------------------------
// Helper
// ---------------------------------------------------------------------------
func parseFileID(c *gin.Context) (uuid.UUID, bool) {
id, err := uuid.Parse(c.Param("id"))
if err != nil {
respondError(c, domain.ErrValidation)
return uuid.UUID{}, false
}
return id, true
}
// ---------------------------------------------------------------------------
// GET /files
// ---------------------------------------------------------------------------
func (h *FileHandler) List(c *gin.Context) {
params := domain.FileListParams{
Cursor: c.Query("cursor"),
Direction: c.DefaultQuery("direction", "forward"),
Sort: c.DefaultQuery("sort", "created"),
Order: c.DefaultQuery("order", "desc"),
Filter: c.Query("filter"),
Search: c.Query("search"),
}
if limitStr := c.Query("limit"); limitStr != "" {
n, err := strconv.Atoi(limitStr)
if err != nil || n < 1 || n > 200 {
respondError(c, domain.ErrValidation)
return
}
params.Limit = n
} else {
params.Limit = 50
}
if anchorStr := c.Query("anchor"); anchorStr != "" {
id, err := uuid.Parse(anchorStr)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.Anchor = &id
}
if trashStr := c.Query("trash"); trashStr == "true" || trashStr == "1" {
params.Trash = true
}
page, err := h.fileSvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]fileJSON, len(page.Items))
for i, f := range page.Items {
items[i] = toFileJSON(f)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"next_cursor": page.NextCursor,
"prev_cursor": page.PrevCursor,
})
}
// ---------------------------------------------------------------------------
// POST /files (multipart upload)
// ---------------------------------------------------------------------------
func (h *FileHandler) Upload(c *gin.Context) {
fh, err := c.FormFile("file")
if err != nil {
respondError(c, domain.ErrValidation)
return
}
src, err := fh.Open()
if err != nil {
respondError(c, err)
return
}
defer src.Close()
// Detect MIME from actual bytes (ignore client-supplied Content-Type).
mt, err := mimetype.DetectReader(src)
if err != nil {
respondError(c, err)
return
}
// Rewind by reopening — FormFile gives a multipart.File which supports Seek.
if _, err := src.Seek(0, io.SeekStart); err != nil {
respondError(c, err)
return
}
mimeStr := strings.SplitN(mt.String(), ";", 2)[0]
params := service.UploadParams{
Reader: src,
MIMEType: mimeStr,
IsPublic: c.PostForm("is_public") == "true",
}
if name := fh.Filename; name != "" {
params.OriginalName = &name
}
if notes := c.PostForm("notes"); notes != "" {
params.Notes = &notes
}
if metaStr := c.PostForm("metadata"); metaStr != "" {
params.Metadata = json.RawMessage(metaStr)
}
if dtStr := c.PostForm("content_datetime"); dtStr != "" {
t, err := time.Parse(time.RFC3339, dtStr)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.ContentDatetime = &t
}
if tagIDsStr := c.PostForm("tag_ids"); tagIDsStr != "" {
for _, raw := range strings.Split(tagIDsStr, ",") {
raw = strings.TrimSpace(raw)
if raw == "" {
continue
}
id, err := uuid.Parse(raw)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.TagIDs = append(params.TagIDs, id)
}
}
f, err := h.fileSvc.Upload(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toFileJSON(*f))
}
// ---------------------------------------------------------------------------
// GET /files/:id
// ---------------------------------------------------------------------------
func (h *FileHandler) GetMeta(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
f, err := h.fileSvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toFileJSON(*f))
}
// ---------------------------------------------------------------------------
// PATCH /files/:id
// ---------------------------------------------------------------------------
func (h *FileHandler) UpdateMeta(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
var body struct {
OriginalName *string `json:"original_name"`
ContentDatetime *string `json:"content_datetime"`
Notes *string `json:"notes"`
Metadata json.RawMessage `json:"metadata"`
IsPublic *bool `json:"is_public"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.UpdateParams{
OriginalName: body.OriginalName,
Notes: body.Notes,
Metadata: body.Metadata,
IsPublic: body.IsPublic,
}
if body.ContentDatetime != nil {
t, err := time.Parse(time.RFC3339, *body.ContentDatetime)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.ContentDatetime = &t
}
f, err := h.fileSvc.Update(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toFileJSON(*f))
}
// ---------------------------------------------------------------------------
// DELETE /files/:id (soft-delete)
// ---------------------------------------------------------------------------
func (h *FileHandler) SoftDelete(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
if err := h.fileSvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// GET /files/:id/content
// ---------------------------------------------------------------------------
func (h *FileHandler) GetContent(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
res, err := h.fileSvc.GetContent(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
defer res.Body.Close()
c.Header("Content-Type", res.MIMEType)
if res.OriginalName != nil {
c.Header("Content-Disposition",
fmt.Sprintf("attachment; filename=%q", *res.OriginalName))
}
c.Status(http.StatusOK)
io.Copy(c.Writer, res.Body) //nolint:errcheck
}
// ---------------------------------------------------------------------------
// PUT /files/:id/content (replace)
// ---------------------------------------------------------------------------
func (h *FileHandler) ReplaceContent(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
fh, err := c.FormFile("file")
if err != nil {
respondError(c, domain.ErrValidation)
return
}
src, err := fh.Open()
if err != nil {
respondError(c, err)
return
}
defer src.Close()
mt, err := mimetype.DetectReader(src)
if err != nil {
respondError(c, err)
return
}
if _, err := src.Seek(0, io.SeekStart); err != nil {
respondError(c, err)
return
}
mimeStr := strings.SplitN(mt.String(), ";", 2)[0]
name := fh.Filename
params := service.UploadParams{
Reader: src,
MIMEType: mimeStr,
OriginalName: &name,
}
f, err := h.fileSvc.Replace(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toFileJSON(*f))
}
// ---------------------------------------------------------------------------
// GET /files/:id/thumbnail
// ---------------------------------------------------------------------------
func (h *FileHandler) GetThumbnail(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
rc, err := h.fileSvc.GetThumbnail(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
defer rc.Close()
c.Header("Content-Type", "image/jpeg")
c.Status(http.StatusOK)
io.Copy(c.Writer, rc) //nolint:errcheck
}
// ---------------------------------------------------------------------------
// GET /files/:id/preview
// ---------------------------------------------------------------------------
func (h *FileHandler) GetPreview(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
rc, err := h.fileSvc.GetPreview(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
defer rc.Close()
c.Header("Content-Type", "image/jpeg")
c.Status(http.StatusOK)
io.Copy(c.Writer, rc) //nolint:errcheck
}
// ---------------------------------------------------------------------------
// POST /files/:id/restore
// ---------------------------------------------------------------------------
func (h *FileHandler) Restore(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
f, err := h.fileSvc.Restore(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toFileJSON(*f))
}
// ---------------------------------------------------------------------------
// DELETE /files/:id/permanent
// ---------------------------------------------------------------------------
func (h *FileHandler) PermanentDelete(c *gin.Context) {
id, ok := parseFileID(c)
if !ok {
return
}
if err := h.fileSvc.PermanentDelete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// POST /files/bulk/tags
// ---------------------------------------------------------------------------
func (h *FileHandler) BulkSetTags(c *gin.Context) {
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
Action string `json:"action" binding:"required"`
TagIDs []string `json:"tag_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
if body.Action != "add" && body.Action != "remove" {
respondError(c, domain.ErrValidation)
return
}
fileIDs, err := parseUUIDs(body.FileIDs)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tagIDs, err := parseUUIDs(body.TagIDs)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
applied, err := h.tagSvc.BulkSetTags(c.Request.Context(), fileIDs, body.Action, tagIDs)
if err != nil {
respondError(c, err)
return
}
strs := make([]string, len(applied))
for i, id := range applied {
strs[i] = id.String()
}
respondJSON(c, http.StatusOK, gin.H{"applied_tag_ids": strs})
}
// ---------------------------------------------------------------------------
// POST /files/bulk/delete
// ---------------------------------------------------------------------------
func (h *FileHandler) BulkDelete(c *gin.Context) {
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs, err := parseUUIDs(body.FileIDs)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
if err := h.fileSvc.BulkDelete(c.Request.Context(), fileIDs); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// POST /files/bulk/common-tags
// ---------------------------------------------------------------------------
func (h *FileHandler) CommonTags(c *gin.Context) {
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs, err := parseUUIDs(body.FileIDs)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
common, partial, err := h.tagSvc.CommonTags(c.Request.Context(), fileIDs)
if err != nil {
respondError(c, err)
return
}
toStrs := func(tags []domain.Tag) []string {
s := make([]string, len(tags))
for i, t := range tags {
s[i] = t.ID.String()
}
return s
}
respondJSON(c, http.StatusOK, gin.H{
"common_tag_ids": toStrs(common),
"partial_tag_ids": toStrs(partial),
})
}
// ---------------------------------------------------------------------------
// POST /files/import
// ---------------------------------------------------------------------------
func (h *FileHandler) Import(c *gin.Context) {
var body struct {
Path string `json:"path"`
}
// Body is optional; ignore bind errors.
_ = c.ShouldBindJSON(&body)
result, err := h.fileSvc.Import(c.Request.Context(), body.Path)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, result)
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parseUUIDs(strs []string) ([]uuid.UUID, error) {
ids := make([]uuid.UUID, 0, len(strs))
for _, s := range strs {
id, err := uuid.Parse(s)
if err != nil {
return nil, err
}
ids = append(ids, id)
}
return ids, nil
}

View File

@ -0,0 +1,52 @@
package handler
import (
"net/http"
"strings"
"github.com/gin-gonic/gin"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/service"
)
// AuthMiddleware validates Bearer JWTs and injects user identity into context.
type AuthMiddleware struct {
authSvc *service.AuthService
}
// NewAuthMiddleware creates an AuthMiddleware backed by authSvc.
func NewAuthMiddleware(authSvc *service.AuthService) *AuthMiddleware {
return &AuthMiddleware{authSvc: authSvc}
}
// Handle returns a Gin handler function that enforces authentication.
// On success it calls c.Next(); on failure it aborts with 401 JSON.
func (m *AuthMiddleware) Handle() gin.HandlerFunc {
return func(c *gin.Context) {
raw := c.GetHeader("Authorization")
if !strings.HasPrefix(raw, "Bearer ") {
c.JSON(http.StatusUnauthorized, errorBody{
Code: domain.ErrUnauthorized.Code(),
Message: "authorization header missing or malformed",
})
c.Abort()
return
}
token := strings.TrimPrefix(raw, "Bearer ")
claims, err := m.authSvc.ParseAccessToken(token)
if err != nil {
c.JSON(http.StatusUnauthorized, errorBody{
Code: domain.ErrUnauthorized.Code(),
Message: "invalid or expired token",
})
c.Abort()
return
}
ctx := domain.WithUser(c.Request.Context(), claims.UserID, claims.IsAdmin, claims.SessionID)
c.Request = c.Request.WithContext(ctx)
c.Next()
}
}

View File

@ -0,0 +1,356 @@
package handler
import (
"net/http"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
"tanabata/backend/internal/service"
)
// PoolHandler handles all /pools endpoints.
type PoolHandler struct {
poolSvc *service.PoolService
}
// NewPoolHandler creates a PoolHandler.
func NewPoolHandler(poolSvc *service.PoolService) *PoolHandler {
return &PoolHandler{poolSvc: poolSvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type poolJSON struct {
ID string `json:"id"`
Name string `json:"name"`
Notes *string `json:"notes"`
CreatorID int16 `json:"creator_id"`
CreatorName string `json:"creator_name"`
IsPublic bool `json:"is_public"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
}
type poolFileJSON struct {
fileJSON
Position int `json:"position"`
}
func toPoolJSON(p domain.Pool) poolJSON {
return poolJSON{
ID: p.ID.String(),
Name: p.Name,
Notes: p.Notes,
CreatorID: p.CreatorID,
CreatorName: p.CreatorName,
IsPublic: p.IsPublic,
FileCount: p.FileCount,
CreatedAt: p.CreatedAt.UTC().Format(time.RFC3339),
}
}
func toPoolFileJSON(pf domain.PoolFile) poolFileJSON {
return poolFileJSON{
fileJSON: toFileJSON(pf.File),
Position: pf.Position,
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parsePoolID(c *gin.Context) (uuid.UUID, bool) {
id, err := uuid.Parse(c.Param("pool_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return uuid.UUID{}, false
}
return id, true
}
func parsePoolFileParams(c *gin.Context) port.PoolFileListParams {
limit := 50
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 200 {
limit = n
}
}
return port.PoolFileListParams{
Cursor: c.Query("cursor"),
Limit: limit,
Filter: c.Query("filter"),
}
}
// ---------------------------------------------------------------------------
// GET /pools
// ---------------------------------------------------------------------------
func (h *PoolHandler) List(c *gin.Context) {
params := parseOffsetParams(c, "created")
page, err := h.poolSvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]poolJSON, len(page.Items))
for i, p := range page.Items {
items[i] = toPoolJSON(p)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}
// ---------------------------------------------------------------------------
// POST /pools
// ---------------------------------------------------------------------------
func (h *PoolHandler) Create(c *gin.Context) {
var body struct {
Name string `json:"name" binding:"required"`
Notes *string `json:"notes"`
IsPublic *bool `json:"is_public"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
created, err := h.poolSvc.Create(c.Request.Context(), service.PoolParams{
Name: body.Name,
Notes: body.Notes,
IsPublic: body.IsPublic,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toPoolJSON(*created))
}
// ---------------------------------------------------------------------------
// GET /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Get(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
p, err := h.poolSvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toPoolJSON(*p))
}
// ---------------------------------------------------------------------------
// PATCH /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Update(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
var raw map[string]any
if err := c.ShouldBindJSON(&raw); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.PoolParams{}
if v, ok := raw["name"]; ok {
if s, ok := v.(string); ok {
params.Name = s
}
}
if _, ok := raw["notes"]; ok {
if raw["notes"] == nil {
empty := ""
params.Notes = &empty
} else if s, ok := raw["notes"].(string); ok {
params.Notes = &s
}
}
if v, ok := raw["is_public"]; ok {
if b, ok := v.(bool); ok {
params.IsPublic = &b
}
}
updated, err := h.poolSvc.Update(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toPoolJSON(*updated))
}
// ---------------------------------------------------------------------------
// DELETE /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Delete(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
if err := h.poolSvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// GET /pools/:pool_id/files
// ---------------------------------------------------------------------------
func (h *PoolHandler) ListFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
params := parsePoolFileParams(c)
page, err := h.poolSvc.ListFiles(c.Request.Context(), poolID, params)
if err != nil {
respondError(c, err)
return
}
items := make([]poolFileJSON, len(page.Items))
for i, pf := range page.Items {
items[i] = toPoolFileJSON(pf)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"next_cursor": page.NextCursor,
})
}
// ---------------------------------------------------------------------------
// POST /pools/:pool_id/files
// ---------------------------------------------------------------------------
func (h *PoolHandler) AddFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
Position *int `json:"position"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.AddFiles(c.Request.Context(), poolID, fileIDs, body.Position); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusCreated)
}
// ---------------------------------------------------------------------------
// POST /pools/:pool_id/files/remove
// ---------------------------------------------------------------------------
func (h *PoolHandler) RemoveFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.RemoveFiles(c.Request.Context(), poolID, fileIDs); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// PUT /pools/:pool_id/files/reorder
// ---------------------------------------------------------------------------
func (h *PoolHandler) Reorder(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.Reorder(c.Request.Context(), poolID, fileIDs); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}

View File

@ -0,0 +1,55 @@
package handler
import (
"errors"
"net/http"
"github.com/gin-gonic/gin"
"tanabata/backend/internal/domain"
)
// errorBody is the JSON shape returned for all error responses.
type errorBody struct {
Code string `json:"code"`
Message string `json:"message"`
}
func respondJSON(c *gin.Context, status int, data any) {
c.JSON(status, data)
}
// respondError maps a domain error to the appropriate HTTP status and writes
// a JSON error body. Unknown errors become 500.
func respondError(c *gin.Context, err error) {
var de *domain.DomainError
if errors.As(err, &de) {
c.JSON(domainStatus(de), errorBody{Code: de.Code(), Message: de.Error()})
return
}
c.JSON(http.StatusInternalServerError, errorBody{
Code: "internal_error",
Message: "internal server error",
})
}
// domainStatus maps a DomainError sentinel to its HTTP status code per the
// error mapping table in docs/GO_PROJECT_STRUCTURE.md.
func domainStatus(de *domain.DomainError) int {
switch de {
case domain.ErrNotFound:
return http.StatusNotFound
case domain.ErrForbidden:
return http.StatusForbidden
case domain.ErrUnauthorized:
return http.StatusUnauthorized
case domain.ErrConflict:
return http.StatusConflict
case domain.ErrValidation:
return http.StatusBadRequest
case domain.ErrUnsupportedMIME:
return http.StatusUnsupportedMediaType
default:
return http.StatusInternalServerError
}
}

View File

@ -0,0 +1,167 @@
package handler
import (
"net/http"
"github.com/gin-gonic/gin"
)
// NewRouter builds and returns a configured Gin engine.
func NewRouter(
auth *AuthMiddleware,
authHandler *AuthHandler,
fileHandler *FileHandler,
tagHandler *TagHandler,
categoryHandler *CategoryHandler,
poolHandler *PoolHandler,
userHandler *UserHandler,
aclHandler *ACLHandler,
auditHandler *AuditHandler,
) *gin.Engine {
r := gin.New()
r.Use(gin.Logger(), gin.Recovery())
// Health check — no auth required.
r.GET("/health", func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"status": "ok"})
})
v1 := r.Group("/api/v1")
// -------------------------------------------------------------------------
// Auth
// -------------------------------------------------------------------------
authGroup := v1.Group("/auth")
{
authGroup.POST("/login", authHandler.Login)
authGroup.POST("/refresh", authHandler.Refresh)
protected := authGroup.Group("", auth.Handle())
{
protected.POST("/logout", authHandler.Logout)
protected.GET("/sessions", authHandler.ListSessions)
protected.DELETE("/sessions/:id", authHandler.TerminateSession)
}
}
// -------------------------------------------------------------------------
// Files (all require auth)
// -------------------------------------------------------------------------
files := v1.Group("/files", auth.Handle())
{
files.GET("", fileHandler.List)
files.POST("", fileHandler.Upload)
// Bulk + import routes registered before /:id to prevent param collision.
files.POST("/bulk/tags", fileHandler.BulkSetTags)
files.POST("/bulk/delete", fileHandler.BulkDelete)
files.POST("/bulk/common-tags", fileHandler.CommonTags)
files.POST("/import", fileHandler.Import)
// Per-file routes.
files.GET("/:id", fileHandler.GetMeta)
files.PATCH("/:id", fileHandler.UpdateMeta)
files.DELETE("/:id", fileHandler.SoftDelete)
files.GET("/:id/content", fileHandler.GetContent)
files.PUT("/:id/content", fileHandler.ReplaceContent)
files.GET("/:id/thumbnail", fileHandler.GetThumbnail)
files.GET("/:id/preview", fileHandler.GetPreview)
files.POST("/:id/restore", fileHandler.Restore)
files.DELETE("/:id/permanent", fileHandler.PermanentDelete)
// Filetag relations — served by TagHandler for auto-rule support.
files.GET("/:id/tags", tagHandler.FileListTags)
files.PUT("/:id/tags", tagHandler.FileSetTags)
files.PUT("/:id/tags/:tag_id", tagHandler.FileAddTag)
files.DELETE("/:id/tags/:tag_id", tagHandler.FileRemoveTag)
}
// -------------------------------------------------------------------------
// Tags (all require auth)
// -------------------------------------------------------------------------
tags := v1.Group("/tags", auth.Handle())
{
tags.GET("", tagHandler.List)
tags.POST("", tagHandler.Create)
tags.GET("/:tag_id", tagHandler.Get)
tags.PATCH("/:tag_id", tagHandler.Update)
tags.DELETE("/:tag_id", tagHandler.Delete)
tags.GET("/:tag_id/files", tagHandler.ListFiles)
tags.GET("/:tag_id/rules", tagHandler.ListRules)
tags.POST("/:tag_id/rules", tagHandler.CreateRule)
tags.PATCH("/:tag_id/rules/:then_tag_id", tagHandler.PatchRule)
tags.DELETE("/:tag_id/rules/:then_tag_id", tagHandler.DeleteRule)
}
// -------------------------------------------------------------------------
// Categories (all require auth)
// -------------------------------------------------------------------------
categories := v1.Group("/categories", auth.Handle())
{
categories.GET("", categoryHandler.List)
categories.POST("", categoryHandler.Create)
categories.GET("/:category_id", categoryHandler.Get)
categories.PATCH("/:category_id", categoryHandler.Update)
categories.DELETE("/:category_id", categoryHandler.Delete)
categories.GET("/:category_id/tags", categoryHandler.ListTags)
}
// -------------------------------------------------------------------------
// Pools (all require auth)
// -------------------------------------------------------------------------
pools := v1.Group("/pools", auth.Handle())
{
pools.GET("", poolHandler.List)
pools.POST("", poolHandler.Create)
pools.GET("/:pool_id", poolHandler.Get)
pools.PATCH("/:pool_id", poolHandler.Update)
pools.DELETE("/:pool_id", poolHandler.Delete)
// Sub-routes registered before /:pool_id/files to avoid param conflicts.
pools.POST("/:pool_id/files/remove", poolHandler.RemoveFiles)
pools.PUT("/:pool_id/files/reorder", poolHandler.Reorder)
pools.GET("/:pool_id/files", poolHandler.ListFiles)
pools.POST("/:pool_id/files", poolHandler.AddFiles)
}
// -------------------------------------------------------------------------
// Users (auth required; admin checks enforced in handler)
// -------------------------------------------------------------------------
users := v1.Group("/users", auth.Handle())
{
// /users/me must be registered before /:user_id to avoid param capture.
users.GET("/me", userHandler.GetMe)
users.PATCH("/me", userHandler.UpdateMe)
users.GET("", userHandler.List)
users.POST("", userHandler.Create)
users.GET("/:user_id", userHandler.Get)
users.PATCH("/:user_id", userHandler.UpdateAdmin)
users.DELETE("/:user_id", userHandler.Delete)
}
// -------------------------------------------------------------------------
// ACL (auth required)
// -------------------------------------------------------------------------
acl := v1.Group("/acl", auth.Handle())
{
acl.GET("/:object_type/:object_id", aclHandler.GetPermissions)
acl.PUT("/:object_type/:object_id", aclHandler.SetPermissions)
}
// -------------------------------------------------------------------------
// Audit (auth required; admin check enforced in handler)
// -------------------------------------------------------------------------
v1.GET("/audit", auth.Handle(), auditHandler.List)
return r
}

View File

@ -0,0 +1,532 @@
package handler
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
"tanabata/backend/internal/service"
)
// TagHandler handles all /tags endpoints.
type TagHandler struct {
tagSvc *service.TagService
fileSvc *service.FileService
}
// NewTagHandler creates a TagHandler.
func NewTagHandler(tagSvc *service.TagService, fileSvc *service.FileService) *TagHandler {
return &TagHandler{tagSvc: tagSvc, fileSvc: fileSvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type tagRuleJSON struct {
WhenTagID string `json:"when_tag_id"`
ThenTagID string `json:"then_tag_id"`
ThenTagName string `json:"then_tag_name"`
IsActive bool `json:"is_active"`
}
func toTagRuleJSON(r domain.TagRule) tagRuleJSON {
return tagRuleJSON{
WhenTagID: r.WhenTagID.String(),
ThenTagID: r.ThenTagID.String(),
ThenTagName: r.ThenTagName,
IsActive: r.IsActive,
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parseTagID(c *gin.Context) (uuid.UUID, bool) {
id, err := uuid.Parse(c.Param("tag_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return uuid.UUID{}, false
}
return id, true
}
func parseOffsetParams(c *gin.Context, defaultSort string) port.OffsetParams {
limit := 50
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 200 {
limit = n
}
}
offset := 0
if s := c.Query("offset"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n >= 0 {
offset = n
}
}
sort := c.DefaultQuery("sort", defaultSort)
order := c.DefaultQuery("order", "desc")
search := c.Query("search")
return port.OffsetParams{Sort: sort, Order: order, Search: search, Limit: limit, Offset: offset}
}
// ---------------------------------------------------------------------------
// GET /tags
// ---------------------------------------------------------------------------
func (h *TagHandler) List(c *gin.Context) {
params := parseOffsetParams(c, "created")
page, err := h.tagSvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]tagJSON, len(page.Items))
for i, t := range page.Items {
items[i] = toTagJSON(t)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}
// ---------------------------------------------------------------------------
// POST /tags
// ---------------------------------------------------------------------------
func (h *TagHandler) Create(c *gin.Context) {
var body struct {
Name string `json:"name" binding:"required"`
Notes *string `json:"notes"`
Color *string `json:"color"`
CategoryID *string `json:"category_id"`
IsPublic *bool `json:"is_public"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.TagParams{
Name: body.Name,
Notes: body.Notes,
Color: body.Color,
IsPublic: body.IsPublic,
}
if body.CategoryID != nil {
id, err := uuid.Parse(*body.CategoryID)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.CategoryID = &id
}
t, err := h.tagSvc.Create(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toTagJSON(*t))
}
// ---------------------------------------------------------------------------
// GET /tags/:tag_id
// ---------------------------------------------------------------------------
func (h *TagHandler) Get(c *gin.Context) {
id, ok := parseTagID(c)
if !ok {
return
}
t, err := h.tagSvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toTagJSON(*t))
}
// ---------------------------------------------------------------------------
// PATCH /tags/:tag_id
// ---------------------------------------------------------------------------
func (h *TagHandler) Update(c *gin.Context) {
id, ok := parseTagID(c)
if !ok {
return
}
// Use a raw map to distinguish "field absent" from "field = null".
var raw map[string]any
if err := c.ShouldBindJSON(&raw); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.TagParams{}
if v, ok := raw["name"]; ok {
if s, ok := v.(string); ok {
params.Name = s
}
}
if _, ok := raw["notes"]; ok {
if raw["notes"] == nil {
params.Notes = ptr("")
} else if s, ok := raw["notes"].(string); ok {
params.Notes = &s
}
}
if _, ok := raw["color"]; ok {
if raw["color"] == nil {
nilStr := ""
params.Color = &nilStr
} else if s, ok := raw["color"].(string); ok {
params.Color = &s
}
}
if _, ok := raw["category_id"]; ok {
if raw["category_id"] == nil {
nilID := uuid.Nil
params.CategoryID = &nilID // signals "unassign"
} else if s, ok := raw["category_id"].(string); ok {
cid, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
params.CategoryID = &cid
}
}
if v, ok := raw["is_public"]; ok {
if b, ok := v.(bool); ok {
params.IsPublic = &b
}
}
t, err := h.tagSvc.Update(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toTagJSON(*t))
}
// ---------------------------------------------------------------------------
// DELETE /tags/:tag_id
// ---------------------------------------------------------------------------
func (h *TagHandler) Delete(c *gin.Context) {
id, ok := parseTagID(c)
if !ok {
return
}
if err := h.tagSvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// GET /tags/:tag_id/files
// ---------------------------------------------------------------------------
func (h *TagHandler) ListFiles(c *gin.Context) {
id, ok := parseTagID(c)
if !ok {
return
}
limit := 50
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 200 {
limit = n
}
}
// Delegate to file service with a tag filter.
page, err := h.fileSvc.List(c.Request.Context(), domain.FileListParams{
Cursor: c.Query("cursor"),
Direction: "forward",
Limit: limit,
Sort: "created",
Order: "desc",
Filter: "{t=" + id.String() + "}",
})
if err != nil {
respondError(c, err)
return
}
items := make([]fileJSON, len(page.Items))
for i, f := range page.Items {
items[i] = toFileJSON(f)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"next_cursor": page.NextCursor,
"prev_cursor": page.PrevCursor,
})
}
// ---------------------------------------------------------------------------
// GET /tags/:tag_id/rules
// ---------------------------------------------------------------------------
func (h *TagHandler) ListRules(c *gin.Context) {
id, ok := parseTagID(c)
if !ok {
return
}
rules, err := h.tagSvc.ListRules(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
items := make([]tagRuleJSON, len(rules))
for i, r := range rules {
items[i] = toTagRuleJSON(r)
}
respondJSON(c, http.StatusOK, items)
}
// ---------------------------------------------------------------------------
// POST /tags/:tag_id/rules
// ---------------------------------------------------------------------------
func (h *TagHandler) CreateRule(c *gin.Context) {
whenTagID, ok := parseTagID(c)
if !ok {
return
}
var body struct {
ThenTagID string `json:"then_tag_id" binding:"required"`
IsActive *bool `json:"is_active"`
ApplyToExisting *bool `json:"apply_to_existing"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
thenTagID, err := uuid.Parse(body.ThenTagID)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
isActive := true
if body.IsActive != nil {
isActive = *body.IsActive
}
applyToExisting := true
if body.ApplyToExisting != nil {
applyToExisting = *body.ApplyToExisting
}
rule, err := h.tagSvc.CreateRule(c.Request.Context(), whenTagID, thenTagID, isActive, applyToExisting)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toTagRuleJSON(*rule))
}
// ---------------------------------------------------------------------------
// PATCH /tags/:tag_id/rules/:then_tag_id
// ---------------------------------------------------------------------------
func (h *TagHandler) PatchRule(c *gin.Context) {
whenTagID, ok := parseTagID(c)
if !ok {
return
}
thenTagID, err := uuid.Parse(c.Param("then_tag_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
var body struct {
IsActive *bool `json:"is_active"`
ApplyToExisting *bool `json:"apply_to_existing"`
}
if err := c.ShouldBindJSON(&body); err != nil || body.IsActive == nil {
respondError(c, domain.ErrValidation)
return
}
applyToExisting := false
if body.ApplyToExisting != nil {
applyToExisting = *body.ApplyToExisting
}
rule, err := h.tagSvc.SetRuleActive(c.Request.Context(), whenTagID, thenTagID, *body.IsActive, applyToExisting)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toTagRuleJSON(*rule))
}
// ---------------------------------------------------------------------------
// DELETE /tags/:tag_id/rules/:then_tag_id
// ---------------------------------------------------------------------------
func (h *TagHandler) DeleteRule(c *gin.Context) {
whenTagID, ok := parseTagID(c)
if !ok {
return
}
thenTagID, err := uuid.Parse(c.Param("then_tag_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
if err := h.tagSvc.DeleteRule(c.Request.Context(), whenTagID, thenTagID); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// File-tag endpoints wired through TagService
// (called from file routes, shared handler logic lives here)
// ---------------------------------------------------------------------------
// FileListTags handles GET /files/:id/tags.
func (h *TagHandler) FileListTags(c *gin.Context) {
fileID, err := uuid.Parse(c.Param("id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tags, err := h.tagSvc.ListFileTags(c.Request.Context(), fileID)
if err != nil {
respondError(c, err)
return
}
items := make([]tagJSON, len(tags))
for i, t := range tags {
items[i] = toTagJSON(t)
}
respondJSON(c, http.StatusOK, items)
}
// FileSetTags handles PUT /files/:id/tags.
func (h *TagHandler) FileSetTags(c *gin.Context) {
fileID, err := uuid.Parse(c.Param("id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
var body struct {
TagIDs []string `json:"tag_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
tagIDs, err := parseUUIDs(body.TagIDs)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tags, err := h.tagSvc.SetFileTags(c.Request.Context(), fileID, tagIDs)
if err != nil {
respondError(c, err)
return
}
items := make([]tagJSON, len(tags))
for i, t := range tags {
items[i] = toTagJSON(t)
}
respondJSON(c, http.StatusOK, items)
}
// FileAddTag handles PUT /files/:id/tags/:tag_id.
func (h *TagHandler) FileAddTag(c *gin.Context) {
fileID, err := uuid.Parse(c.Param("id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tagID, err := uuid.Parse(c.Param("tag_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tags, err := h.tagSvc.AddFileTag(c.Request.Context(), fileID, tagID)
if err != nil {
respondError(c, err)
return
}
items := make([]tagJSON, len(tags))
for i, t := range tags {
items[i] = toTagJSON(t)
}
respondJSON(c, http.StatusOK, items)
}
// FileRemoveTag handles DELETE /files/:id/tags/:tag_id.
func (h *TagHandler) FileRemoveTag(c *gin.Context) {
fileID, err := uuid.Parse(c.Param("id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
tagID, err := uuid.Parse(c.Param("tag_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return
}
if err := h.tagSvc.RemoveFileTag(c.Request.Context(), fileID, tagID); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func ptr(s string) *string { return &s }

View File

@ -0,0 +1,258 @@
package handler
import (
"net/http"
"strconv"
"github.com/gin-gonic/gin"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
"tanabata/backend/internal/service"
)
// UserHandler handles all /users endpoints.
type UserHandler struct {
userSvc *service.UserService
}
// NewUserHandler creates a UserHandler.
func NewUserHandler(userSvc *service.UserService) *UserHandler {
return &UserHandler{userSvc: userSvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type userJSON struct {
ID int16 `json:"id"`
Name string `json:"name"`
IsAdmin bool `json:"is_admin"`
CanCreate bool `json:"can_create"`
IsBlocked bool `json:"is_blocked"`
}
func toUserJSON(u domain.User) userJSON {
return userJSON{
ID: u.ID,
Name: u.Name,
IsAdmin: u.IsAdmin,
CanCreate: u.CanCreate,
IsBlocked: u.IsBlocked,
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func requireAdmin(c *gin.Context) bool {
_, isAdmin, _ := domain.UserFromContext(c.Request.Context())
if !isAdmin {
respondError(c, domain.ErrForbidden)
return false
}
return true
}
func parseUserID(c *gin.Context) (int16, bool) {
n, err := strconv.ParseInt(c.Param("user_id"), 10, 16)
if err != nil {
respondError(c, domain.ErrValidation)
return 0, false
}
return int16(n), true
}
// ---------------------------------------------------------------------------
// GET /users/me
// ---------------------------------------------------------------------------
func (h *UserHandler) GetMe(c *gin.Context) {
u, err := h.userSvc.GetMe(c.Request.Context())
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toUserJSON(*u))
}
// ---------------------------------------------------------------------------
// PATCH /users/me
// ---------------------------------------------------------------------------
func (h *UserHandler) UpdateMe(c *gin.Context) {
var body struct {
Name string `json:"name"`
Password *string `json:"password"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
updated, err := h.userSvc.UpdateMe(c.Request.Context(), service.UpdateMeParams{
Name: body.Name,
Password: body.Password,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toUserJSON(*updated))
}
// ---------------------------------------------------------------------------
// GET /users (admin)
// ---------------------------------------------------------------------------
func (h *UserHandler) List(c *gin.Context) {
if !requireAdmin(c) {
return
}
params := port.OffsetParams{
Sort: c.DefaultQuery("sort", "id"),
Order: c.DefaultQuery("order", "asc"),
}
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil {
params.Limit = n
}
}
if s := c.Query("offset"); s != "" {
if n, err := strconv.Atoi(s); err == nil {
params.Offset = n
}
}
page, err := h.userSvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]userJSON, len(page.Items))
for i, u := range page.Items {
items[i] = toUserJSON(u)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}
// ---------------------------------------------------------------------------
// POST /users (admin)
// ---------------------------------------------------------------------------
func (h *UserHandler) Create(c *gin.Context) {
if !requireAdmin(c) {
return
}
var body struct {
Name string `json:"name" binding:"required"`
Password string `json:"password" binding:"required"`
IsAdmin bool `json:"is_admin"`
CanCreate bool `json:"can_create"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
created, err := h.userSvc.Create(c.Request.Context(), service.CreateUserParams{
Name: body.Name,
Password: body.Password,
IsAdmin: body.IsAdmin,
CanCreate: body.CanCreate,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toUserJSON(*created))
}
// ---------------------------------------------------------------------------
// GET /users/:user_id (admin)
// ---------------------------------------------------------------------------
func (h *UserHandler) Get(c *gin.Context) {
if !requireAdmin(c) {
return
}
id, ok := parseUserID(c)
if !ok {
return
}
u, err := h.userSvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toUserJSON(*u))
}
// ---------------------------------------------------------------------------
// PATCH /users/:user_id (admin)
// ---------------------------------------------------------------------------
func (h *UserHandler) UpdateAdmin(c *gin.Context) {
if !requireAdmin(c) {
return
}
id, ok := parseUserID(c)
if !ok {
return
}
var body struct {
IsAdmin *bool `json:"is_admin"`
CanCreate *bool `json:"can_create"`
IsBlocked *bool `json:"is_blocked"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
updated, err := h.userSvc.UpdateAdmin(c.Request.Context(), id, service.UpdateAdminParams{
IsAdmin: body.IsAdmin,
CanCreate: body.CanCreate,
IsBlocked: body.IsBlocked,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toUserJSON(*updated))
}
// ---------------------------------------------------------------------------
// DELETE /users/:user_id (admin)
// ---------------------------------------------------------------------------
func (h *UserHandler) Delete(c *gin.Context) {
if !requireAdmin(c) {
return
}
id, ok := parseUserID(c)
if !ok {
return
}
if err := h.userSvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}

View File

@ -1,54 +0,0 @@
package postgres
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/internal/domain"
)
// Initialize PostgreSQL database driver
func New(dbURL string) (*pgxpool.Pool, error) {
poolConfig, err := pgxpool.ParseConfig(dbURL)
if err != nil {
return nil, fmt.Errorf("failed to parse connection string: %w", err)
}
poolConfig.MaxConns = 100
poolConfig.MinConns = 0
poolConfig.MaxConnLifetime = time.Hour
poolConfig.HealthCheckPeriod = 30 * time.Second
ctx := context.Background()
db, err := pgxpool.NewWithConfig(ctx, poolConfig)
if err != nil {
return nil, fmt.Errorf("failed to initialize DB connections pool: %w", err)
}
if err = db.Ping(ctx); err != nil {
return nil, fmt.Errorf("failed to ping database: %w", err)
}
return db, nil
}
// Transaction wrapper
func transaction(ctx context.Context, db *pgxpool.Pool, handler func(context.Context, pgx.Tx) *domain.DomainError) (domainErr *domain.DomainError) {
tx, err := db.Begin(ctx)
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
domainErr = handler(ctx, tx)
if domainErr != nil {
tx.Rollback(ctx)
return
}
err = tx.Commit(ctx)
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
}
return
}

View File

@ -1,331 +0,0 @@
package postgres
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/internal/domain"
)
type FileRepository struct {
db *pgxpool.Pool
}
func NewFileRepository(db *pgxpool.Pool) *FileRepository {
return &FileRepository{db: db}
}
// Get user permissions on file
func (s *FileRepository) GetAccess(ctx context.Context, user_id int, file_id string) (canView, canEdit bool, domainErr *domain.DomainError) {
row := s.db.QueryRow(ctx, `
SELECT
COALESCE(a.view, FALSE) OR f.creator_id=$1 OR COALESCE(u.is_admin, FALSE),
COALESCE(a.edit, FALSE) OR f.creator_id=$1 OR COALESCE(u.is_admin, FALSE)
FROM data.files f
LEFT JOIN acl.files a ON a.file_id=f.id AND a.user_id=$1
LEFT JOIN system.users u ON u.id=$1
WHERE f.id=$2
`, user_id, file_id)
err := row.Scan(&canView, &canEdit)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
domainErr = domain.NewErrorFileNotFound(file_id).Wrap(err)
return
}
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "22P02":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid file id: %q", file_id)).Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
}
return
}
// Get a set of files
func (s *FileRepository) GetSlice(ctx context.Context, user_id int, filter, sort string, limit, offset int) (files domain.Slice[domain.FileItem], domainErr *domain.DomainError) {
filterCond, err := filterToSQL(filter)
if err != nil {
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid filter string: %q", filter)).Wrap(err)
return
}
sortExpr, err := sortToSQL(sort)
if err != nil {
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid sorting parameter: %q", sort)).Wrap(err)
return
}
// prepare query
query := `
SELECT
f.id,
f.name,
m.name,
m.extension,
uuid_extract_timestamp(f.id),
u.name,
u.is_admin
FROM data.files f
JOIN system.mime m ON m.id=f.mime_id
JOIN system.users u ON u.id=f.creator_id
WHERE f.is_deleted IS FALSE AND (f.creator_id=$1 OR (SELECT view FROM acl.files WHERE file_id=f.id AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1)) AND
`
query += filterCond
queryCount := query
query += sortExpr
if limit >= 0 {
query += fmt.Sprintf(" LIMIT %d", limit)
}
if offset > 0 {
query += fmt.Sprintf(" OFFSET %d", offset)
}
// execute query
domainErr = transaction(ctx, s.db, func(ctx context.Context, tx pgx.Tx) (domainErr *domain.DomainError) {
rows, err := tx.Query(ctx, query, user_id)
if err != nil && !errors.Is(err, pgx.ErrNoRows) {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "42P10":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid sorting field: %q", sort[1:])).Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
defer rows.Close()
count := 0
for rows.Next() {
var file domain.FileItem
err = rows.Scan(&file.ID, &file.Name, &file.MIME.Name, &file.MIME.Extension, &file.CreatedAt, &file.Creator.Name, &file.Creator.IsAdmin)
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
files.Data = append(files.Data, file)
count++
}
err = rows.Err()
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
files.Pagination.Limit = limit
files.Pagination.Offset = offset
files.Pagination.Count = count
row := tx.QueryRow(ctx, fmt.Sprintf("SELECT COUNT(*) FROM (%s) tmp", queryCount), user_id)
err = row.Scan(&files.Pagination.Total)
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
}
return
})
return
}
// Get file
func (s *FileRepository) Get(ctx context.Context, user_id int, file_id string) (file domain.FileFull, domainErr *domain.DomainError) {
row := s.db.QueryRow(ctx, `
SELECT
f.id,
f.name,
m.name,
m.extension,
uuid_extract_timestamp(f.id),
u.name,
u.is_admin,
f.notes,
f.metadata,
(SELECT COUNT(*) FROM activity.file_views fv WHERE fv.file_id=$2 AND fv.user_id=$1)
FROM data.files f
JOIN system.mime m ON m.id=f.mime_id
JOIN system.users u ON u.id=f.creator_id
WHERE f.is_deleted IS FALSE
`, user_id, file_id)
err := row.Scan(&file.ID, &file.Name, &file.MIME.Name, &file.MIME.Extension, &file.CreatedAt, &file.Creator.Name, &file.Creator.IsAdmin, &file.Notes, &file.Metadata, &file.Viewed)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
domainErr = domain.NewErrorFileNotFound(file_id).Wrap(err)
return
}
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "22P02":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid file id: %q", file_id)).Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
return
}
// Add file
func (s *FileRepository) Add(ctx context.Context, user_id int, name, mime string, datetime time.Time, notes string, metadata json.RawMessage) (file domain.FileCore, domainErr *domain.DomainError) {
var mime_id int
var extension string
row := s.db.QueryRow(ctx, "SELECT id, extension FROM system.mime WHERE name=$1", mime)
err := row.Scan(&mime_id, &extension)
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
domainErr = domain.NewErrorMIMENotSupported(mime).Wrap(err)
return
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
row = s.db.QueryRow(ctx, `
INSERT INTO data.files (name, mime_id, datetime, creator_id, notes, metadata)
VALUES (NULLIF($1, ''), $2, $3, $4, NULLIF($5 ,''), $6)
RETURNING id
`, name, mime_id, datetime, user_id, notes, metadata)
err = row.Scan(&file.ID)
if err != nil {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "22007":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid datetime: %q", datetime)).Wrap(err)
return
case "23502":
domainErr = domain.NewErrorBadRequest("Unable to set NULL to some fields").Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
file.Name = &name
file.MIME.Name = mime
file.MIME.Extension = extension
return
}
// Update file
func (s *FileRepository) Update(ctx context.Context, file_id string, updates map[string]interface{}) (domainErr *domain.DomainError) {
if len(updates) == 0 {
// domainErr = domain.NewErrorBadRequest(nil, "No fields provided for update")
return
}
query := "UPDATE data.files SET"
newValues := []interface{}{file_id}
count := 2
for field, value := range updates {
switch field {
case "name", "notes":
query += fmt.Sprintf(" %s=NULLIF($%d, '')", field, count)
case "datetime":
query += fmt.Sprintf(" %s=NULLIF($%d, '')::timestamptz", field, count)
case "metadata":
query += fmt.Sprintf(" %s=NULLIF($%d, '')::jsonb", field, count)
default:
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Unknown field: %q", field))
return
}
newValues = append(newValues, value)
count++
}
query += fmt.Sprintf(" WHERE id=$1 AND is_deleted IS FALSE")
commandTag, err := s.db.Exec(ctx, query, newValues...)
if err != nil {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "22P02":
domainErr = domain.NewErrorBadRequest("Invalid format of some values").Wrap(err)
return
case "22007":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid datetime: %q", updates["datetime"])).Wrap(err)
return
case "23502":
domainErr = domain.NewErrorBadRequest("Some fields cannot be empty").Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
if commandTag.RowsAffected() == 0 {
domainErr = domain.NewErrorFileNotFound(file_id).Wrap(err)
return
}
return
}
// Delete file
func (s *FileRepository) Delete(ctx context.Context, file_id string) (domainErr *domain.DomainError) {
commandTag, err := s.db.Exec(ctx,
"UPDATE data.files SET is_deleted=true WHERE id=$1 AND is_deleted IS FALSE",
file_id)
if err != nil {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) {
switch pgErr.Code {
case "22P02":
domainErr = domain.NewErrorBadRequest(fmt.Sprintf("Invalid file id: %q", file_id)).Wrap(err)
return
}
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
if commandTag.RowsAffected() == 0 {
domainErr = domain.NewErrorFileNotFound(file_id).Wrap(err)
return
}
return
}
// Get list of tags of file
func (s *FileRepository) GetTags(ctx context.Context, user_id int, file_id string) (tags []domain.TagItem, domainErr *domain.DomainError) {
rows, err := s.db.Query(ctx, `
SELECT
t.id,
t.name,
t.color,
c.id,
c.name,
c.color
FROM data.tags t
LEFT JOIN data.categories c ON c.id=t.category_id
JOIN data.file_tag ft ON ft.tag_id=t.id AND ft.file_id=$2
JOIN data.files f ON f.id=$2
WHERE NOT f.is_deleted AND (f.creator_id=$1 OR (SELECT view FROM acl.files WHERE file_id=$2 AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1))
`, user_id, file_id)
if err != nil {
var pgErr *pgconn.PgError
if errors.As(err, &pgErr) && (pgErr.Code == "22P02" || pgErr.Code == "22007") {
domainErr = domain.NewErrorBadRequest(pgErr.Message).Wrap(err)
return
}
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
defer rows.Close()
for rows.Next() {
var tag domain.TagItem
err = rows.Scan(&tag.ID, &tag.Name, &tag.Color, &tag.Category.ID, &tag.Category.Name, &tag.Category.Color)
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
return
}
tags = append(tags, tag)
}
err = rows.Err()
if err != nil {
domainErr = domain.NewErrorUnexpected().Wrap(err)
}
return
}

View File

@ -0,0 +1,767 @@
// Package integration contains end-to-end tests that start a real HTTP server
// against a disposable PostgreSQL database created on the fly.
//
// The test connects to an admin DSN (defaults to the local PG 16 socket) to
// CREATE / DROP an ephemeral database per test suite run, then runs all goose
// migrations on it.
//
// Override the admin DSN with TANABATA_TEST_ADMIN_DSN:
//
// export TANABATA_TEST_ADMIN_DSN="host=/var/run/postgresql port=5434 user=h1k0 dbname=postgres sslmode=disable"
// go test -v -timeout 120s tanabata/backend/internal/integration
package integration
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/jackc/pgx/v5/stdlib"
"github.com/pressly/goose/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tanabata/backend/internal/db/postgres"
"tanabata/backend/internal/handler"
"tanabata/backend/internal/service"
"tanabata/backend/internal/storage"
"tanabata/backend/migrations"
)
// defaultAdminDSN is the fallback when TANABATA_TEST_ADMIN_DSN is unset.
// Targets the PG 16 cluster on this machine (port 5434, Unix socket).
const defaultAdminDSN = "host=/var/run/postgresql port=5434 user=h1k0 dbname=postgres sslmode=disable"
// ---------------------------------------------------------------------------
// Test harness
// ---------------------------------------------------------------------------
type harness struct {
t *testing.T
server *httptest.Server
client *http.Client
}
// setupSuite creates an ephemeral database, runs migrations, wires the full
// service graph into an httptest.Server, and registers cleanup.
func setupSuite(t *testing.T) *harness {
t.Helper()
ctx := context.Background()
// --- Create an isolated test database ------------------------------------
adminDSN := os.Getenv("TANABATA_TEST_ADMIN_DSN")
if adminDSN == "" {
adminDSN = defaultAdminDSN
}
// Use a unique name so parallel test runs don't collide.
dbName := fmt.Sprintf("tanabata_test_%d", time.Now().UnixNano())
adminConn, err := pgx.Connect(ctx, adminDSN)
require.NoError(t, err, "connect to admin DSN: %s", adminDSN)
_, err = adminConn.Exec(ctx, "CREATE DATABASE "+dbName)
require.NoError(t, err)
adminConn.Close(ctx)
// Build the DSN for the new database (replace dbname= in adminDSN).
testDSN := replaceDSNDatabase(adminDSN, dbName)
t.Cleanup(func() {
// Drop all connections then drop the database.
conn, err := pgx.Connect(context.Background(), adminDSN)
if err != nil {
return
}
defer conn.Close(context.Background())
_, _ = conn.Exec(context.Background(),
"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = $1", dbName)
_, _ = conn.Exec(context.Background(), "DROP DATABASE IF EXISTS "+dbName)
})
// --- Migrations ----------------------------------------------------------
pool, err := pgxpool.New(ctx, testDSN)
require.NoError(t, err)
t.Cleanup(pool.Close)
migDB := stdlib.OpenDBFromPool(pool)
goose.SetBaseFS(migrations.FS)
require.NoError(t, goose.SetDialect("postgres"))
require.NoError(t, goose.Up(migDB, "."))
migDB.Close()
// --- Temp directories for storage ----------------------------------------
filesDir := t.TempDir()
thumbsDir := t.TempDir()
diskStorage, err := storage.NewDiskStorage(filesDir, thumbsDir, 160, 160, 1920, 1080)
require.NoError(t, err)
// --- Repositories --------------------------------------------------------
userRepo := postgres.NewUserRepo(pool)
sessionRepo := postgres.NewSessionRepo(pool)
fileRepo := postgres.NewFileRepo(pool)
mimeRepo := postgres.NewMimeRepo(pool)
aclRepo := postgres.NewACLRepo(pool)
auditRepo := postgres.NewAuditRepo(pool)
tagRepo := postgres.NewTagRepo(pool)
tagRuleRepo := postgres.NewTagRuleRepo(pool)
categoryRepo := postgres.NewCategoryRepo(pool)
poolRepo := postgres.NewPoolRepo(pool)
transactor := postgres.NewTransactor(pool)
// --- Services ------------------------------------------------------------
authSvc := service.NewAuthService(userRepo, sessionRepo, "test-secret", 15*time.Minute, 720*time.Hour)
aclSvc := service.NewACLService(aclRepo)
auditSvc := service.NewAuditService(auditRepo)
tagSvc := service.NewTagService(tagRepo, tagRuleRepo, aclSvc, auditSvc, transactor)
categorySvc := service.NewCategoryService(categoryRepo, tagRepo, aclSvc, auditSvc)
poolSvc := service.NewPoolService(poolRepo, aclSvc, auditSvc)
fileSvc := service.NewFileService(fileRepo, mimeRepo, diskStorage, aclSvc, auditSvc, tagSvc, transactor, filesDir)
userSvc := service.NewUserService(userRepo, auditSvc)
// --- Handlers ------------------------------------------------------------
authMiddleware := handler.NewAuthMiddleware(authSvc)
authHandler := handler.NewAuthHandler(authSvc)
fileHandler := handler.NewFileHandler(fileSvc, tagSvc)
tagHandler := handler.NewTagHandler(tagSvc, fileSvc)
categoryHandler := handler.NewCategoryHandler(categorySvc)
poolHandler := handler.NewPoolHandler(poolSvc)
userHandler := handler.NewUserHandler(userSvc)
aclHandler := handler.NewACLHandler(aclSvc)
auditHandler := handler.NewAuditHandler(auditSvc)
r := handler.NewRouter(
authMiddleware, authHandler,
fileHandler, tagHandler, categoryHandler, poolHandler,
userHandler, aclHandler, auditHandler,
)
srv := httptest.NewServer(r)
t.Cleanup(srv.Close)
return &harness{
t: t,
server: srv,
client: srv.Client(),
}
}
// ---------------------------------------------------------------------------
// HTTP helpers
// ---------------------------------------------------------------------------
// testResponse wraps an HTTP response with the body already read into memory.
// This avoids the "body consumed by error-message arg before decode" pitfall.
type testResponse struct {
StatusCode int
bodyBytes []byte
}
// String returns the body as a string (for use in assertion messages).
func (r *testResponse) String() string { return string(r.bodyBytes) }
// decode unmarshals the body JSON into dst.
func (r *testResponse) decode(t *testing.T, dst any) {
t.Helper()
require.NoError(t, json.Unmarshal(r.bodyBytes, dst), "decode body: %s", r.String())
}
func (h *harness) url(path string) string {
return h.server.URL + "/api/v1" + path
}
func (h *harness) do(method, path string, body io.Reader, token string, contentType string) *testResponse {
h.t.Helper()
req, err := http.NewRequest(method, h.url(path), body)
require.NoError(h.t, err)
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
if contentType != "" {
req.Header.Set("Content-Type", contentType)
}
httpResp, err := h.client.Do(req)
require.NoError(h.t, err)
b, _ := io.ReadAll(httpResp.Body)
httpResp.Body.Close()
return &testResponse{StatusCode: httpResp.StatusCode, bodyBytes: b}
}
func (h *harness) doJSON(method, path string, payload any, token string) *testResponse {
h.t.Helper()
var buf io.Reader
if payload != nil {
b, err := json.Marshal(payload)
require.NoError(h.t, err)
buf = bytes.NewReader(b)
}
return h.do(method, path, buf, token, "application/json")
}
// login posts credentials and returns an access token.
func (h *harness) login(name, password string) string {
h.t.Helper()
resp := h.doJSON("POST", "/auth/login", map[string]string{
"name": name, "password": password,
}, "")
require.Equal(h.t, http.StatusOK, resp.StatusCode, "login failed: %s", resp)
var out struct {
AccessToken string `json:"access_token"`
}
resp.decode(h.t, &out)
require.NotEmpty(h.t, out.AccessToken)
return out.AccessToken
}
// uploadJPEG uploads a minimal valid JPEG and returns the created file object.
func (h *harness) uploadJPEG(token, originalName string) map[string]any {
h.t.Helper()
var buf bytes.Buffer
mw := multipart.NewWriter(&buf)
fw, err := mw.CreateFormFile("file", originalName)
require.NoError(h.t, err)
_, err = fw.Write(minimalJPEG())
require.NoError(h.t, err)
require.NoError(h.t, mw.Close())
resp := h.do("POST", "/files", &buf, token, mw.FormDataContentType())
require.Equal(h.t, http.StatusCreated, resp.StatusCode, "upload failed: %s", resp)
var out map[string]any
resp.decode(h.t, &out)
return out
}
// ---------------------------------------------------------------------------
// Main integration test
// ---------------------------------------------------------------------------
func TestFullFlow(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
h := setupSuite(t)
// =========================================================================
// 1. Admin login (seeded by 007_seed_data.sql)
// =========================================================================
adminToken := h.login("admin", "admin")
// =========================================================================
// 2. Create a regular user
// =========================================================================
resp := h.doJSON("POST", "/users", map[string]any{
"name": "alice", "password": "alicepass", "can_create": true,
}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
var aliceUser map[string]any
resp.decode(t, &aliceUser)
assert.Equal(t, "alice", aliceUser["name"])
// Create a second regular user for ACL testing.
resp = h.doJSON("POST", "/users", map[string]any{
"name": "bob", "password": "bobpass", "can_create": true,
}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
// =========================================================================
// 3. Log in as alice
// =========================================================================
aliceToken := h.login("alice", "alicepass")
bobToken := h.login("bob", "bobpass")
// =========================================================================
// 4. Alice uploads a private JPEG
// =========================================================================
fileObj := h.uploadJPEG(aliceToken, "sunset.jpg")
fileID, ok := fileObj["id"].(string)
require.True(t, ok, "file id missing")
assert.Equal(t, "sunset.jpg", fileObj["original_name"])
assert.Equal(t, false, fileObj["is_public"])
// =========================================================================
// 5. Create a tag and assign it to the file
// =========================================================================
resp = h.doJSON("POST", "/tags", map[string]any{
"name": "nature", "is_public": true,
}, aliceToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
var tagObj map[string]any
resp.decode(t, &tagObj)
tagID := tagObj["id"].(string)
resp = h.doJSON("PUT", "/files/"+fileID+"/tags", map[string]any{
"tag_ids": []string{tagID},
}, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// Verify tag is returned with the file.
resp = h.doJSON("GET", "/files/"+fileID, nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var fileWithTags map[string]any
resp.decode(t, &fileWithTags)
tags := fileWithTags["tags"].([]any)
require.Len(t, tags, 1)
assert.Equal(t, "nature", tags[0].(map[string]any)["name"])
// =========================================================================
// 6. Filter files by tag
// =========================================================================
resp = h.doJSON("GET", "/files?filter=%7Bt%3D"+tagID+"%7D", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var filePage map[string]any
resp.decode(t, &filePage)
items := filePage["items"].([]any)
require.Len(t, items, 1)
assert.Equal(t, fileID, items[0].(map[string]any)["id"])
// =========================================================================
// 7. ACL — Bob cannot see Alice's private file
// =========================================================================
resp = h.doJSON("GET", "/files/"+fileID, nil, bobToken)
assert.Equal(t, http.StatusForbidden, resp.StatusCode, resp.String())
// Grant Bob view access.
bobUserID := int(aliceUser["id"].(float64)) // alice's id used for reference; get bob's
// Resolve bob's real ID via admin.
resp = h.doJSON("GET", "/users", nil, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var usersPage map[string]any
resp.decode(t, &usersPage)
var bobID float64
for _, u := range usersPage["items"].([]any) {
um := u.(map[string]any)
if um["name"] == "bob" {
bobID = um["id"].(float64)
}
}
_ = bobUserID
require.NotZero(t, bobID)
resp = h.doJSON("PUT", "/acl/file/"+fileID, map[string]any{
"permissions": []map[string]any{
{"user_id": bobID, "can_view": true, "can_edit": false},
},
}, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// Now Bob can view.
resp = h.doJSON("GET", "/files/"+fileID, nil, bobToken)
assert.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// =========================================================================
// 8. Create a pool and add the file
// =========================================================================
resp = h.doJSON("POST", "/pools", map[string]any{
"name": "alice's pool", "is_public": false,
}, aliceToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
var poolObj map[string]any
resp.decode(t, &poolObj)
poolID := poolObj["id"].(string)
assert.Equal(t, "alice's pool", poolObj["name"])
resp = h.doJSON("POST", "/pools/"+poolID+"/files", map[string]any{
"file_ids": []string{fileID},
}, aliceToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
// Pool file count should now be 1.
resp = h.doJSON("GET", "/pools/"+poolID, nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var poolFull map[string]any
resp.decode(t, &poolFull)
assert.Equal(t, float64(1), poolFull["file_count"])
// List pool files and verify position.
resp = h.doJSON("GET", "/pools/"+poolID+"/files", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var poolFiles map[string]any
resp.decode(t, &poolFiles)
poolItems := poolFiles["items"].([]any)
require.Len(t, poolItems, 1)
assert.Equal(t, fileID, poolItems[0].(map[string]any)["id"])
// =========================================================================
// 9. Trash flow: soft-delete → list trash → restore → permanent delete
// =========================================================================
// Soft-delete the file.
resp = h.doJSON("DELETE", "/files/"+fileID, nil, aliceToken)
require.Equal(t, http.StatusNoContent, resp.StatusCode, resp.String())
// File no longer appears in normal listing.
resp = h.doJSON("GET", "/files", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var normalPage map[string]any
resp.decode(t, &normalPage)
normalItems, _ := normalPage["items"].([]any)
assert.Len(t, normalItems, 0)
// File appears in trash listing.
resp = h.doJSON("GET", "/files?trash=true", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var trashPage map[string]any
resp.decode(t, &trashPage)
trashItems := trashPage["items"].([]any)
require.Len(t, trashItems, 1)
assert.Equal(t, fileID, trashItems[0].(map[string]any)["id"])
assert.Equal(t, true, trashItems[0].(map[string]any)["is_deleted"])
// Restore the file.
resp = h.doJSON("POST", "/files/"+fileID+"/restore", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// File is back in normal listing.
resp = h.doJSON("GET", "/files", nil, aliceToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var restoredPage map[string]any
resp.decode(t, &restoredPage)
restoredItems := restoredPage["items"].([]any)
require.Len(t, restoredItems, 1)
assert.Equal(t, fileID, restoredItems[0].(map[string]any)["id"])
// Soft-delete again then permanently delete.
resp = h.doJSON("DELETE", "/files/"+fileID, nil, aliceToken)
require.Equal(t, http.StatusNoContent, resp.StatusCode)
resp = h.doJSON("DELETE", "/files/"+fileID+"/permanent", nil, aliceToken)
require.Equal(t, http.StatusNoContent, resp.StatusCode, resp.String())
// File is gone entirely.
resp = h.doJSON("GET", "/files/"+fileID, nil, aliceToken)
assert.Equal(t, http.StatusNotFound, resp.StatusCode, resp.String())
// =========================================================================
// 10. Audit log records actions (admin only)
// =========================================================================
resp = h.doJSON("GET", "/audit", nil, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var auditPage map[string]any
resp.decode(t, &auditPage)
auditItems := auditPage["items"].([]any)
assert.NotEmpty(t, auditItems, "audit log should have entries")
// Non-admin cannot read the audit log.
resp = h.doJSON("GET", "/audit", nil, aliceToken)
assert.Equal(t, http.StatusForbidden, resp.StatusCode, resp.String())
}
// ---------------------------------------------------------------------------
// Additional targeted tests
// ---------------------------------------------------------------------------
// TestBlockedUserCannotLogin verifies that blocking a user prevents login.
func TestBlockedUserCannotLogin(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
h := setupSuite(t)
adminToken := h.login("admin", "admin")
// Create user.
resp := h.doJSON("POST", "/users", map[string]any{
"name": "charlie", "password": "charliepass",
}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode)
var u map[string]any
resp.decode(t, &u)
userID := u["id"].(float64)
// Block charlie.
resp = h.doJSON("PATCH", fmt.Sprintf("/users/%.0f", userID), map[string]any{
"is_blocked": true,
}, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
// Login attempt should return 403.
resp = h.doJSON("POST", "/auth/login", map[string]any{
"name": "charlie", "password": "charliepass",
}, "")
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
}
// TestPoolReorder verifies gap-based position reassignment.
func TestPoolReorder(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
h := setupSuite(t)
adminToken := h.login("admin", "admin")
// Upload two files.
f1 := h.uploadJPEG(adminToken, "a.jpg")
f2 := h.uploadJPEG(adminToken, "b.jpg")
id1 := f1["id"].(string)
id2 := f2["id"].(string)
// Create pool and add both files.
resp := h.doJSON("POST", "/pools", map[string]any{"name": "reorder-test"}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode)
var pool map[string]any
resp.decode(t, &pool)
poolID := pool["id"].(string)
resp = h.doJSON("POST", "/pools/"+poolID+"/files", map[string]any{
"file_ids": []string{id1, id2},
}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode)
// Verify initial order: id1 before id2.
resp = h.doJSON("GET", "/pools/"+poolID+"/files", nil, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var page map[string]any
resp.decode(t, &page)
items := page["items"].([]any)
require.Len(t, items, 2)
assert.Equal(t, id1, items[0].(map[string]any)["id"])
assert.Equal(t, id2, items[1].(map[string]any)["id"])
// Reorder: id2 first.
resp = h.doJSON("PUT", "/pools/"+poolID+"/files/reorder", map[string]any{
"file_ids": []string{id2, id1},
}, adminToken)
require.Equal(t, http.StatusNoContent, resp.StatusCode, resp.String())
// Verify new order.
resp = h.doJSON("GET", "/pools/"+poolID+"/files", nil, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var page2 map[string]any
resp.decode(t, &page2)
items2 := page2["items"].([]any)
require.Len(t, items2, 2)
assert.Equal(t, id2, items2[0].(map[string]any)["id"])
assert.Equal(t, id1, items2[1].(map[string]any)["id"])
}
// TestTagRuleActivateApplyToExisting verifies that activating a rule with
// apply_to_existing=true retroactively tags existing files, including
// transitive rules (A→B active+apply, B→C already active → file gets A,B,C).
func TestTagRuleActivateApplyToExisting(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
h := setupSuite(t)
tok := h.login("admin", "admin")
// Create three tags: A, B, C.
mkTag := func(name string) string {
resp := h.doJSON("POST", "/tags", map[string]any{"name": name}, tok)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
var obj map[string]any
resp.decode(t, &obj)
return obj["id"].(string)
}
tagA := mkTag("animal")
tagB := mkTag("living-thing")
tagC := mkTag("organism")
// Rule A→B: created inactive so it does NOT fire on assign.
resp := h.doJSON("POST", "/tags/"+tagA+"/rules", map[string]any{
"then_tag_id": tagB,
"is_active": false,
}, tok)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
// Rule B→C: active, so it fires transitively when B is applied.
resp = h.doJSON("POST", "/tags/"+tagB+"/rules", map[string]any{
"then_tag_id": tagC,
"is_active": true,
}, tok)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
// Upload a file and assign only tag A. A→B is inactive so only A is set.
file := h.uploadJPEG(tok, "cat.jpg")
fileID := file["id"].(string)
resp = h.doJSON("PUT", "/files/"+fileID+"/tags", map[string]any{
"tag_ids": []string{tagA},
}, tok)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
tagNames := func() []string {
r := h.doJSON("GET", "/files/"+fileID+"/tags", nil, tok)
require.Equal(t, http.StatusOK, r.StatusCode)
var items []any
r.decode(t, &items)
names := make([]string, 0, len(items))
for _, it := range items {
names = append(names, it.(map[string]any)["name"].(string))
}
return names
}
// Before activation: file should only have tag A.
assert.ElementsMatch(t, []string{"animal"}, tagNames())
// Activate A→B WITHOUT apply_to_existing — existing file must not change.
resp = h.doJSON("PATCH", "/tags/"+tagA+"/rules/"+tagB, map[string]any{
"is_active": true,
"apply_to_existing": false,
}, tok)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
assert.ElementsMatch(t, []string{"animal"}, tagNames(), "file should be unchanged when apply_to_existing=false")
// Deactivate again so we can test the positive case cleanly.
resp = h.doJSON("PATCH", "/tags/"+tagA+"/rules/"+tagB, map[string]any{
"is_active": false,
}, tok)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// Activate A→B WITH apply_to_existing=true.
// Expectation: file gets B directly, and C transitively via the active B→C rule.
resp = h.doJSON("PATCH", "/tags/"+tagA+"/rules/"+tagB, map[string]any{
"is_active": true,
"apply_to_existing": true,
}, tok)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
assert.ElementsMatch(t, []string{"animal", "living-thing", "organism"}, tagNames())
}
// TestTagAutoRule verifies that adding a tag automatically applies then_tags.
func TestTagAutoRule(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
h := setupSuite(t)
adminToken := h.login("admin", "admin")
// Create two tags: "outdoor" and "nature".
resp := h.doJSON("POST", "/tags", map[string]any{"name": "outdoor"}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode)
var outdoor map[string]any
resp.decode(t, &outdoor)
outdoorID := outdoor["id"].(string)
resp = h.doJSON("POST", "/tags", map[string]any{"name": "nature"}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode)
var nature map[string]any
resp.decode(t, &nature)
natureID := nature["id"].(string)
// Create rule: when "outdoor" → also apply "nature".
resp = h.doJSON("POST", "/tags/"+outdoorID+"/rules", map[string]any{
"then_tag_id": natureID,
}, adminToken)
require.Equal(t, http.StatusCreated, resp.StatusCode, resp.String())
// Upload a file and assign only "outdoor".
file := h.uploadJPEG(adminToken, "park.jpg")
fileID := file["id"].(string)
resp = h.doJSON("PUT", "/files/"+fileID+"/tags", map[string]any{
"tag_ids": []string{outdoorID},
}, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode, resp.String())
// Both "outdoor" and "nature" should be on the file.
resp = h.doJSON("GET", "/files/"+fileID+"/tags", nil, adminToken)
require.Equal(t, http.StatusOK, resp.StatusCode)
var tagsResp []any
resp.decode(t, &tagsResp)
names := make([]string, 0, len(tagsResp))
for _, tg := range tagsResp {
names = append(names, tg.(map[string]any)["name"].(string))
}
assert.ElementsMatch(t, []string{"outdoor", "nature"}, names)
}
// ---------------------------------------------------------------------------
// Test helpers
// ---------------------------------------------------------------------------
// minimalJPEG returns the bytes of a 1×1 white JPEG image.
// Generated offline; no external dependency needed.
func minimalJPEG() []byte {
// This is a valid minimal JPEG: SOI + APP0 + DQT + SOF0 + DHT + SOS + EOI.
// 1×1 white pixel, quality ~50. Verified with `file` and browsers.
return []byte{
0xff, 0xd8, 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46, 0x00, 0x01,
0x01, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0xff, 0xdb, 0x00, 0x43,
0x00, 0x08, 0x06, 0x06, 0x07, 0x06, 0x05, 0x08, 0x07, 0x07, 0x07, 0x09,
0x09, 0x08, 0x0a, 0x0c, 0x14, 0x0d, 0x0c, 0x0b, 0x0b, 0x0c, 0x19, 0x12,
0x13, 0x0f, 0x14, 0x1d, 0x1a, 0x1f, 0x1e, 0x1d, 0x1a, 0x1c, 0x1c, 0x20,
0x24, 0x2e, 0x27, 0x20, 0x22, 0x2c, 0x23, 0x1c, 0x1c, 0x28, 0x37, 0x29,
0x2c, 0x30, 0x31, 0x34, 0x34, 0x34, 0x1f, 0x27, 0x39, 0x3d, 0x38, 0x32,
0x3c, 0x2e, 0x33, 0x34, 0x32, 0xff, 0xc0, 0x00, 0x0b, 0x08, 0x00, 0x01,
0x00, 0x01, 0x01, 0x01, 0x11, 0x00, 0xff, 0xc4, 0x00, 0x1f, 0x00, 0x00,
0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0xff, 0xc4, 0x00, 0xb5, 0x10, 0x00, 0x02, 0x01, 0x03,
0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06,
0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72,
0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45,
0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75,
0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4,
0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca,
0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3,
0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5,
0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xff, 0xda, 0x00, 0x08, 0x01, 0x01, 0x00,
0x00, 0x3f, 0x00, 0xfb, 0xd3, 0xff, 0xd9,
}
}
// replaceDSNDatabase returns a copy of dsn with the dbname parameter replaced.
// Handles both key=value libpq-style strings and postgres:// URLs.
func replaceDSNDatabase(dsn, newDB string) string {
// key=value style: replace dbname=xxx or append if absent.
if !strings.Contains(dsn, "://") {
const key = "dbname="
if idx := strings.Index(dsn, key); idx >= 0 {
end := strings.IndexByte(dsn[idx+len(key):], ' ')
if end < 0 {
return dsn[:idx] + key + newDB
}
return dsn[:idx] + key + newDB + dsn[idx+len(key)+end:]
}
return dsn + " dbname=" + newDB
}
// URL style: not used in our defaults, but handled for completeness.
return dsn
}
// freePort returns an available TCP port on localhost.
func freePort() int {
l, _ := net.Listen("tcp", ":0")
defer l.Close()
return l.Addr().(*net.TCPAddr).Port
}
// writeFile writes content to a temp file and returns its path.
func writeFile(t *testing.T, dir, name string, content []byte) string {
t.Helper()
path := filepath.Join(dir, name)
require.NoError(t, os.WriteFile(path, content, 0o644))
return path
}
// suppress unused-import warnings for helpers kept for future use.
var (
_ = freePort
_ = writeFile
)

View File

@ -1,43 +0,0 @@
package rest
import (
"net/http"
"tanabata/internal/domain"
)
type ErrorResponse struct {
Error string `json:"error"`
Code string `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
type ErrorMapper struct{}
func (m *ErrorMapper) MapError(err domain.DomainError) (int, ErrorResponse) {
switch err.Code {
case domain.ErrCodeFileNotFound:
return http.StatusNotFound, ErrorResponse{
Error: "Not Found",
Code: string(err.Code),
Message: err.Message,
}
case domain.ErrCodeMIMENotSupported:
return http.StatusNotFound, ErrorResponse{
Error: "MIME not supported",
Code: string(err.Code),
Message: err.Message,
}
case domain.ErrCodeBadRequest:
return http.StatusNotFound, ErrorResponse{
Error: "Bad Request",
Code: string(err.Code),
Message: err.Message,
}
}
return http.StatusInternalServerError, ErrorResponse{
Error: "Internal Server Error",
Code: string(err.Code),
Message: err.Message,
}
}

View File

@ -0,0 +1,170 @@
package port
import (
"context"
"time"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
)
// Transactor executes fn inside a single database transaction.
// All repository calls made within fn receive the transaction via context.
type Transactor interface {
WithTx(ctx context.Context, fn func(ctx context.Context) error) error
}
// OffsetParams holds common offset-pagination and sort parameters.
type OffsetParams struct {
Sort string
Order string // "asc" | "desc"
Search string
Offset int
Limit int
}
// PoolFileListParams holds parameters for listing files inside a pool.
type PoolFileListParams struct {
Cursor string
Limit int
Filter string // filter DSL expression
}
// FileRepo is the persistence interface for file records.
type FileRepo interface {
// List returns a cursor-based page of files.
List(ctx context.Context, params domain.FileListParams) (*domain.FilePage, error)
// GetByID returns the file with its tags loaded.
GetByID(ctx context.Context, id uuid.UUID) (*domain.File, error)
// Create inserts a new file record and returns it.
Create(ctx context.Context, f *domain.File) (*domain.File, error)
// Update applies partial metadata changes and returns the updated record.
Update(ctx context.Context, id uuid.UUID, f *domain.File) (*domain.File, error)
// SoftDelete moves a file to trash (sets is_deleted = true).
SoftDelete(ctx context.Context, id uuid.UUID) error
// Restore moves a file out of trash (sets is_deleted = false).
Restore(ctx context.Context, id uuid.UUID) (*domain.File, error)
// DeletePermanent removes a file record. Only allowed when is_deleted = true.
DeletePermanent(ctx context.Context, id uuid.UUID) error
// ListTags returns all tags assigned to a file.
ListTags(ctx context.Context, fileID uuid.UUID) ([]domain.Tag, error)
// SetTags replaces all tags on a file (full replace semantics).
SetTags(ctx context.Context, fileID uuid.UUID, tagIDs []uuid.UUID) error
}
// TagRepo is the persistence interface for tags.
type TagRepo interface {
List(ctx context.Context, params OffsetParams) (*domain.TagOffsetPage, error)
// ListByCategory returns tags belonging to a specific category.
ListByCategory(ctx context.Context, categoryID uuid.UUID, params OffsetParams) (*domain.TagOffsetPage, error)
GetByID(ctx context.Context, id uuid.UUID) (*domain.Tag, error)
Create(ctx context.Context, t *domain.Tag) (*domain.Tag, error)
Update(ctx context.Context, id uuid.UUID, t *domain.Tag) (*domain.Tag, error)
Delete(ctx context.Context, id uuid.UUID) error
// ListByFile returns all tags assigned to a specific file, ordered by name.
ListByFile(ctx context.Context, fileID uuid.UUID) ([]domain.Tag, error)
// AddFileTag inserts a single file→tag relation. No-op if already present.
AddFileTag(ctx context.Context, fileID, tagID uuid.UUID) error
// RemoveFileTag deletes a single file→tag relation.
RemoveFileTag(ctx context.Context, fileID, tagID uuid.UUID) error
// SetFileTags replaces all tags on a file (full replace semantics).
SetFileTags(ctx context.Context, fileID uuid.UUID, tagIDs []uuid.UUID) error
// CommonTagsForFiles returns tags present on every one of the given files.
CommonTagsForFiles(ctx context.Context, fileIDs []uuid.UUID) ([]domain.Tag, error)
// PartialTagsForFiles returns tags present on some but not all of the given files.
PartialTagsForFiles(ctx context.Context, fileIDs []uuid.UUID) ([]domain.Tag, error)
}
// TagRuleRepo is the persistence interface for auto-tag rules.
type TagRuleRepo interface {
// ListByTag returns all rules where WhenTagID == tagID.
ListByTag(ctx context.Context, tagID uuid.UUID) ([]domain.TagRule, error)
Create(ctx context.Context, r domain.TagRule) (*domain.TagRule, error)
// SetActive toggles a rule's is_active flag. When active and applyToExisting
// are both true, the full transitive expansion of thenTagID is retroactively
// applied to all files that already carry whenTagID.
SetActive(ctx context.Context, whenTagID, thenTagID uuid.UUID, active, applyToExisting bool) error
Delete(ctx context.Context, whenTagID, thenTagID uuid.UUID) error
}
// CategoryRepo is the persistence interface for categories.
type CategoryRepo interface {
List(ctx context.Context, params OffsetParams) (*domain.CategoryOffsetPage, error)
GetByID(ctx context.Context, id uuid.UUID) (*domain.Category, error)
Create(ctx context.Context, c *domain.Category) (*domain.Category, error)
Update(ctx context.Context, id uuid.UUID, c *domain.Category) (*domain.Category, error)
Delete(ctx context.Context, id uuid.UUID) error
}
// PoolRepo is the persistence interface for pools and poolfile membership.
type PoolRepo interface {
List(ctx context.Context, params OffsetParams) (*domain.PoolOffsetPage, error)
GetByID(ctx context.Context, id uuid.UUID) (*domain.Pool, error)
Create(ctx context.Context, p *domain.Pool) (*domain.Pool, error)
Update(ctx context.Context, id uuid.UUID, p *domain.Pool) (*domain.Pool, error)
Delete(ctx context.Context, id uuid.UUID) error
// ListFiles returns pool files ordered by position (cursor-based).
ListFiles(ctx context.Context, poolID uuid.UUID, params PoolFileListParams) (*domain.PoolFilePage, error)
// AddFiles appends files starting at position; nil position means append at end.
AddFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID, position *int) error
// RemoveFiles removes files from the pool.
RemoveFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error
// Reorder sets the full ordered sequence of file IDs in the pool.
Reorder(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error
}
// UserRepo is the persistence interface for users.
type UserRepo interface {
List(ctx context.Context, params OffsetParams) (*domain.UserPage, error)
GetByID(ctx context.Context, id int16) (*domain.User, error)
// GetByName is used during login to look up credentials.
GetByName(ctx context.Context, name string) (*domain.User, error)
Create(ctx context.Context, u *domain.User) (*domain.User, error)
Update(ctx context.Context, id int16, u *domain.User) (*domain.User, error)
Delete(ctx context.Context, id int16) error
}
// SessionRepo is the persistence interface for auth sessions.
type SessionRepo interface {
// ListByUser returns all active sessions for a user.
ListByUser(ctx context.Context, userID int16) (*domain.SessionList, error)
// GetByTokenHash looks up a session by the hashed refresh token.
GetByTokenHash(ctx context.Context, hash string) (*domain.Session, error)
Create(ctx context.Context, s *domain.Session) (*domain.Session, error)
// UpdateLastActivity refreshes the last_activity timestamp.
UpdateLastActivity(ctx context.Context, id int, t time.Time) error
// Delete terminates a single session.
Delete(ctx context.Context, id int) error
// DeleteByUserID terminates all sessions for a user (logout everywhere).
DeleteByUserID(ctx context.Context, userID int16) error
}
// ACLRepo is the persistence interface for per-object permissions.
type ACLRepo interface {
// List returns all permission entries for a given object.
List(ctx context.Context, objectTypeID int16, objectID uuid.UUID) ([]domain.Permission, error)
// Get returns the permission entry for a specific user and object; returns
// ErrNotFound if no entry exists.
Get(ctx context.Context, userID int16, objectTypeID int16, objectID uuid.UUID) (*domain.Permission, error)
// Set replaces all permissions for an object (full replace semantics).
Set(ctx context.Context, objectTypeID int16, objectID uuid.UUID, perms []domain.Permission) error
}
// AuditRepo is the persistence interface for the audit log.
type AuditRepo interface {
Log(ctx context.Context, entry domain.AuditEntry) error
List(ctx context.Context, filter domain.AuditFilter) (*domain.AuditPage, error)
}
// MimeRepo is the persistence interface for the MIME type whitelist.
type MimeRepo interface {
// List returns all supported MIME types.
List(ctx context.Context) ([]domain.MIMEType, error)
// GetByName returns the MIME type record for a given MIME name (e.g. "image/jpeg").
// Returns ErrUnsupportedMIME if not in the whitelist.
GetByName(ctx context.Context, name string) (*domain.MIMEType, error)
}

View File

@ -0,0 +1,31 @@
package port
import (
"context"
"io"
"github.com/google/uuid"
)
// FileStorage abstracts disk (or object-store) operations for file content,
// thumbnails, and previews.
type FileStorage interface {
// Save writes the reader's content to storage and returns the number of
// bytes written.
Save(ctx context.Context, id uuid.UUID, r io.Reader) (int64, error)
// Read opens the file content for reading. The caller must close the returned
// ReadCloser.
Read(ctx context.Context, id uuid.UUID) (io.ReadCloser, error)
// Delete removes the file content from storage.
Delete(ctx context.Context, id uuid.UUID) error
// Thumbnail opens the pre-generated thumbnail (JPEG). Returns ErrNotFound
// if the thumbnail has not been generated yet.
Thumbnail(ctx context.Context, id uuid.UUID) (io.ReadCloser, error)
// Preview opens the pre-generated preview image (JPEG). Returns ErrNotFound
// if the preview has not been generated yet.
Preview(ctx context.Context, id uuid.UUID) (io.ReadCloser, error)
}

View File

@ -0,0 +1,81 @@
package service
import (
"context"
"errors"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ACLService handles access control checks and permission management.
type ACLService struct {
aclRepo port.ACLRepo
}
func NewACLService(aclRepo port.ACLRepo) *ACLService {
return &ACLService{aclRepo: aclRepo}
}
// CanView returns true if the user may view the object.
// isAdmin, creatorID, isPublic must be populated from the object record by the caller.
func (s *ACLService) CanView(
ctx context.Context,
userID int16, isAdmin bool,
creatorID int16, isPublic bool,
objectTypeID int16, objectID uuid.UUID,
) (bool, error) {
if isAdmin {
return true, nil
}
if isPublic {
return true, nil
}
if userID == creatorID {
return true, nil
}
perm, err := s.aclRepo.Get(ctx, userID, objectTypeID, objectID)
if err != nil {
if errors.Is(err, domain.ErrNotFound) {
return false, nil
}
return false, err
}
return perm.CanView, nil
}
// CanEdit returns true if the user may edit the object.
// is_public does not grant edit access; only admins, creators, and explicit grants.
func (s *ACLService) CanEdit(
ctx context.Context,
userID int16, isAdmin bool,
creatorID int16,
objectTypeID int16, objectID uuid.UUID,
) (bool, error) {
if isAdmin {
return true, nil
}
if userID == creatorID {
return true, nil
}
perm, err := s.aclRepo.Get(ctx, userID, objectTypeID, objectID)
if err != nil {
if errors.Is(err, domain.ErrNotFound) {
return false, nil
}
return false, err
}
return perm.CanEdit, nil
}
// GetPermissions returns all explicit ACL entries for an object.
func (s *ACLService) GetPermissions(ctx context.Context, objectTypeID int16, objectID uuid.UUID) ([]domain.Permission, error) {
return s.aclRepo.List(ctx, objectTypeID, objectID)
}
// SetPermissions replaces all ACL entries for an object (full replace semantics).
func (s *ACLService) SetPermissions(ctx context.Context, objectTypeID int16, objectID uuid.UUID, perms []domain.Permission) error {
return s.aclRepo.Set(ctx, objectTypeID, objectID, perms)
}

View File

@ -0,0 +1,57 @@
package service
import (
"context"
"encoding/json"
"fmt"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// AuditService records user actions to the audit trail.
type AuditService struct {
repo port.AuditRepo
}
func NewAuditService(repo port.AuditRepo) *AuditService {
return &AuditService{repo: repo}
}
// Log records an action performed by the user in ctx.
// objectType and objectID are optional — pass nil when the action has no target object.
// details can be any JSON-serializable value, or nil.
func (s *AuditService) Log(
ctx context.Context,
action string,
objectType *string,
objectID *uuid.UUID,
details any,
) error {
userID, _, _ := domain.UserFromContext(ctx)
var raw json.RawMessage
if details != nil {
b, err := json.Marshal(details)
if err != nil {
return fmt.Errorf("AuditService.Log marshal details: %w", err)
}
raw = b
}
entry := domain.AuditEntry{
UserID: userID,
Action: action,
ObjectType: objectType,
ObjectID: objectID,
Details: raw,
}
return s.repo.Log(ctx, entry)
}
// Query returns a filtered, paginated page of audit log entries.
func (s *AuditService) Query(ctx context.Context, filter domain.AuditFilter) (*domain.AuditPage, error) {
return s.repo.List(ctx, filter)
}

View File

@ -0,0 +1,282 @@
package service
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"time"
"github.com/golang-jwt/jwt/v5"
"golang.org/x/crypto/bcrypt"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// Claims is the JWT payload for both access and refresh tokens.
type Claims struct {
jwt.RegisteredClaims
UserID int16 `json:"uid"`
IsAdmin bool `json:"adm"`
SessionID int `json:"sid"`
}
// TokenPair holds an issued access/refresh token pair with the access TTL.
type TokenPair struct {
AccessToken string
RefreshToken string
ExpiresIn int // access token TTL in seconds
}
// AuthService handles authentication and session lifecycle.
type AuthService struct {
users port.UserRepo
sessions port.SessionRepo
secret []byte
accessTTL time.Duration
refreshTTL time.Duration
}
// NewAuthService creates an AuthService.
func NewAuthService(
users port.UserRepo,
sessions port.SessionRepo,
jwtSecret string,
accessTTL time.Duration,
refreshTTL time.Duration,
) *AuthService {
return &AuthService{
users: users,
sessions: sessions,
secret: []byte(jwtSecret),
accessTTL: accessTTL,
refreshTTL: refreshTTL,
}
}
// Login validates credentials, creates a session, and returns a token pair.
func (s *AuthService) Login(ctx context.Context, name, password, userAgent string) (*TokenPair, error) {
user, err := s.users.GetByName(ctx, name)
if err != nil {
// Return ErrUnauthorized regardless of whether the user exists,
// to avoid username enumeration.
return nil, domain.ErrUnauthorized
}
if user.IsBlocked {
return nil, domain.ErrForbidden
}
if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)); err != nil {
return nil, domain.ErrUnauthorized
}
var expiresAt *time.Time
if s.refreshTTL > 0 {
t := time.Now().Add(s.refreshTTL)
expiresAt = &t
}
// Issue the refresh token first so we can store its hash.
refreshToken, err := s.issueToken(user.ID, user.IsAdmin, 0, s.refreshTTL)
if err != nil {
return nil, fmt.Errorf("issue refresh token: %w", err)
}
session, err := s.sessions.Create(ctx, &domain.Session{
TokenHash: hashToken(refreshToken),
UserID: user.ID,
UserAgent: userAgent,
ExpiresAt: expiresAt,
})
if err != nil {
return nil, fmt.Errorf("create session: %w", err)
}
accessToken, err := s.issueToken(user.ID, user.IsAdmin, session.ID, s.accessTTL)
if err != nil {
return nil, fmt.Errorf("issue access token: %w", err)
}
// Re-issue the refresh token with the real session ID now that we have it.
refreshToken, err = s.issueToken(user.ID, user.IsAdmin, session.ID, s.refreshTTL)
if err != nil {
return nil, fmt.Errorf("issue refresh token: %w", err)
}
return &TokenPair{
AccessToken: accessToken,
RefreshToken: refreshToken,
ExpiresIn: int(s.accessTTL.Seconds()),
}, nil
}
// Logout deactivates the session identified by sessionID.
func (s *AuthService) Logout(ctx context.Context, sessionID int) error {
if err := s.sessions.Delete(ctx, sessionID); err != nil {
return fmt.Errorf("logout: %w", err)
}
return nil
}
// Refresh validates a refresh token, issues a new token pair, and deactivates
// the old session.
func (s *AuthService) Refresh(ctx context.Context, refreshToken, userAgent string) (*TokenPair, error) {
claims, err := s.parseToken(refreshToken)
if err != nil {
return nil, domain.ErrUnauthorized
}
session, err := s.sessions.GetByTokenHash(ctx, hashToken(refreshToken))
if err != nil {
return nil, domain.ErrUnauthorized
}
if session.ExpiresAt != nil && time.Now().After(*session.ExpiresAt) {
_ = s.sessions.Delete(ctx, session.ID)
return nil, domain.ErrUnauthorized
}
// Rotate: deactivate old session.
if err := s.sessions.Delete(ctx, session.ID); err != nil {
return nil, fmt.Errorf("deactivate old session: %w", err)
}
user, err := s.users.GetByID(ctx, claims.UserID)
if err != nil {
return nil, domain.ErrUnauthorized
}
if user.IsBlocked {
return nil, domain.ErrForbidden
}
var expiresAt *time.Time
if s.refreshTTL > 0 {
t := time.Now().Add(s.refreshTTL)
expiresAt = &t
}
newRefresh, err := s.issueToken(user.ID, user.IsAdmin, 0, s.refreshTTL)
if err != nil {
return nil, fmt.Errorf("issue refresh token: %w", err)
}
newSession, err := s.sessions.Create(ctx, &domain.Session{
TokenHash: hashToken(newRefresh),
UserID: user.ID,
UserAgent: userAgent,
ExpiresAt: expiresAt,
})
if err != nil {
return nil, fmt.Errorf("create session: %w", err)
}
accessToken, err := s.issueToken(user.ID, user.IsAdmin, newSession.ID, s.accessTTL)
if err != nil {
return nil, fmt.Errorf("issue access token: %w", err)
}
newRefresh, err = s.issueToken(user.ID, user.IsAdmin, newSession.ID, s.refreshTTL)
if err != nil {
return nil, fmt.Errorf("issue refresh token: %w", err)
}
return &TokenPair{
AccessToken: accessToken,
RefreshToken: newRefresh,
ExpiresIn: int(s.accessTTL.Seconds()),
}, nil
}
// ListSessions returns all active sessions for the given user.
func (s *AuthService) ListSessions(ctx context.Context, userID int16, currentSessionID int) (*domain.SessionList, error) {
list, err := s.sessions.ListByUser(ctx, userID)
if err != nil {
return nil, fmt.Errorf("list sessions: %w", err)
}
for i := range list.Items {
list.Items[i].IsCurrent = list.Items[i].ID == currentSessionID
}
return list, nil
}
// TerminateSession deactivates a specific session, enforcing ownership.
func (s *AuthService) TerminateSession(ctx context.Context, callerID int16, isAdmin bool, sessionID int) error {
if !isAdmin {
list, err := s.sessions.ListByUser(ctx, callerID)
if err != nil {
return fmt.Errorf("terminate session: %w", err)
}
owned := false
for _, sess := range list.Items {
if sess.ID == sessionID {
owned = true
break
}
}
if !owned {
return domain.ErrForbidden
}
}
if err := s.sessions.Delete(ctx, sessionID); err != nil {
return fmt.Errorf("terminate session: %w", err)
}
return nil
}
// ParseAccessToken parses and validates an access token, returning its claims.
func (s *AuthService) ParseAccessToken(tokenStr string) (*Claims, error) {
claims, err := s.parseToken(tokenStr)
if err != nil {
return nil, domain.ErrUnauthorized
}
return claims, nil
}
// issueToken signs a JWT with the given parameters.
func (s *AuthService) issueToken(userID int16, isAdmin bool, sessionID int, ttl time.Duration) (string, error) {
now := time.Now()
claims := Claims{
RegisteredClaims: jwt.RegisteredClaims{
IssuedAt: jwt.NewNumericDate(now),
ExpiresAt: jwt.NewNumericDate(now.Add(ttl)),
},
UserID: userID,
IsAdmin: isAdmin,
SessionID: sessionID,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signed, err := token.SignedString(s.secret)
if err != nil {
return "", fmt.Errorf("sign token: %w", err)
}
return signed, nil
}
// parseToken verifies the signature and parses claims from a token string.
func (s *AuthService) parseToken(tokenStr string) (*Claims, error) {
token, err := jwt.ParseWithClaims(tokenStr, &Claims{}, func(t *jwt.Token) (any, error) {
if _, ok := t.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", t.Header["alg"])
}
return s.secret, nil
})
if err != nil || !token.Valid {
return nil, domain.ErrUnauthorized
}
claims, ok := token.Claims.(*Claims)
if !ok {
return nil, domain.ErrUnauthorized
}
return claims, nil
}
// hashToken returns the SHA-256 hex digest of a token string.
// The raw token is never stored; only the hash goes to the database.
func hashToken(token string) string {
sum := sha256.Sum256([]byte(token))
return hex.EncodeToString(sum[:])
}

View File

@ -0,0 +1,164 @@
package service
import (
"context"
"encoding/json"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
const categoryObjectType = "category"
const categoryObjectTypeID int16 = 3 // third row in 007_seed_data.sql object_types
// CategoryParams holds the fields for creating or patching a category.
type CategoryParams struct {
Name string
Notes *string
Color *string // nil = no change; pointer to empty string = clear
Metadata json.RawMessage
IsPublic *bool
}
// CategoryService handles category CRUD with ACL enforcement and audit logging.
type CategoryService struct {
categories port.CategoryRepo
tags port.TagRepo
acl *ACLService
audit *AuditService
}
// NewCategoryService creates a CategoryService.
func NewCategoryService(
categories port.CategoryRepo,
tags port.TagRepo,
acl *ACLService,
audit *AuditService,
) *CategoryService {
return &CategoryService{
categories: categories,
tags: tags,
acl: acl,
audit: audit,
}
}
// ---------------------------------------------------------------------------
// CRUD
// ---------------------------------------------------------------------------
// List returns a paginated, optionally filtered list of categories.
func (s *CategoryService) List(ctx context.Context, params port.OffsetParams) (*domain.CategoryOffsetPage, error) {
return s.categories.List(ctx, params)
}
// Get returns a category by ID.
func (s *CategoryService) Get(ctx context.Context, id uuid.UUID) (*domain.Category, error) {
return s.categories.GetByID(ctx, id)
}
// Create inserts a new category record.
func (s *CategoryService) Create(ctx context.Context, p CategoryParams) (*domain.Category, error) {
userID, _, _ := domain.UserFromContext(ctx)
c := &domain.Category{
Name: p.Name,
Notes: p.Notes,
Color: p.Color,
Metadata: p.Metadata,
CreatorID: userID,
}
if p.IsPublic != nil {
c.IsPublic = *p.IsPublic
}
created, err := s.categories.Create(ctx, c)
if err != nil {
return nil, err
}
objType := categoryObjectType
_ = s.audit.Log(ctx, "category_create", &objType, &created.ID, nil)
return created, nil
}
// Update applies a partial patch to a category.
func (s *CategoryService) Update(ctx context.Context, id uuid.UUID, p CategoryParams) (*domain.Category, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
current, err := s.categories.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, current.CreatorID, categoryObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
patch := *current
if p.Name != "" {
patch.Name = p.Name
}
if p.Notes != nil {
patch.Notes = p.Notes
}
if p.Color != nil {
patch.Color = p.Color
}
if len(p.Metadata) > 0 {
patch.Metadata = p.Metadata
}
if p.IsPublic != nil {
patch.IsPublic = *p.IsPublic
}
updated, err := s.categories.Update(ctx, id, &patch)
if err != nil {
return nil, err
}
objType := categoryObjectType
_ = s.audit.Log(ctx, "category_edit", &objType, &id, nil)
return updated, nil
}
// Delete removes a category by ID, enforcing edit ACL.
func (s *CategoryService) Delete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
c, err := s.categories.GetByID(ctx, id)
if err != nil {
return err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, c.CreatorID, categoryObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.categories.Delete(ctx, id); err != nil {
return err
}
objType := categoryObjectType
_ = s.audit.Log(ctx, "category_delete", &objType, &id, nil)
return nil
}
// ---------------------------------------------------------------------------
// Tags in category
// ---------------------------------------------------------------------------
// ListTags returns a paginated list of tags belonging to this category.
func (s *CategoryService) ListTags(ctx context.Context, categoryID uuid.UUID, params port.OffsetParams) (*domain.TagOffsetPage, error) {
return s.tags.ListByCategory(ctx, categoryID, params)
}

View File

@ -0,0 +1,569 @@
package service
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/gabriel-vasile/mimetype"
"github.com/google/uuid"
"github.com/rwcarlsen/goexif/exif"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
const fileObjectType = "file"
// fileObjectTypeID is the primary key of the "file" row in core.object_types.
// It matches the first value inserted in 007_seed_data.sql.
const fileObjectTypeID int16 = 1
// UploadParams holds the parameters for uploading a new file.
type UploadParams struct {
Reader io.Reader
MIMEType string
OriginalName *string
Notes *string
Metadata json.RawMessage
ContentDatetime *time.Time
IsPublic bool
TagIDs []uuid.UUID
}
// UpdateParams holds the parameters for updating file metadata.
type UpdateParams struct {
OriginalName *string
Notes *string
Metadata json.RawMessage
ContentDatetime *time.Time
IsPublic *bool
TagIDs *[]uuid.UUID // nil means don't change tags
}
// ContentResult holds the open reader and metadata for a file download.
type ContentResult struct {
Body io.ReadCloser
MIMEType string
OriginalName *string
}
// ImportFileError records a failed file during an import operation.
type ImportFileError struct {
Filename string `json:"filename"`
Reason string `json:"reason"`
}
// ImportResult summarises a directory import.
type ImportResult struct {
Imported int `json:"imported"`
Skipped int `json:"skipped"`
Errors []ImportFileError `json:"errors"`
}
// FileService handles business logic for file records.
type FileService struct {
files port.FileRepo
mimes port.MimeRepo
storage port.FileStorage
acl *ACLService
audit *AuditService
tags *TagService
tx port.Transactor
importPath string // default server-side import directory
}
// NewFileService creates a FileService.
func NewFileService(
files port.FileRepo,
mimes port.MimeRepo,
storage port.FileStorage,
acl *ACLService,
audit *AuditService,
tags *TagService,
tx port.Transactor,
importPath string,
) *FileService {
return &FileService{
files: files,
mimes: mimes,
storage: storage,
acl: acl,
audit: audit,
tags: tags,
tx: tx,
importPath: importPath,
}
}
// ---------------------------------------------------------------------------
// Core CRUD
// ---------------------------------------------------------------------------
// Upload validates the MIME type, saves the file to storage, creates the DB
// record, and applies any initial tags — all within a single transaction.
// If ContentDatetime is nil and EXIF DateTimeOriginal is present, it is used.
func (s *FileService) Upload(ctx context.Context, p UploadParams) (*domain.File, error) {
userID, _, _ := domain.UserFromContext(ctx)
// Validate MIME type against the whitelist.
mime, err := s.mimes.GetByName(ctx, p.MIMEType)
if err != nil {
return nil, err // ErrUnsupportedMIME or DB error
}
// Buffer the upload so we can extract EXIF without re-reading storage.
var buf bytes.Buffer
if _, err := io.Copy(&buf, p.Reader); err != nil {
return nil, fmt.Errorf("FileService.Upload: read body: %w", err)
}
data := buf.Bytes()
// Extract EXIF metadata (best-effort; non-image files will error silently).
exifData, exifDatetime := extractEXIFWithDatetime(data)
// Resolve content datetime: explicit > EXIF > zero value.
var contentDatetime time.Time
if p.ContentDatetime != nil {
contentDatetime = *p.ContentDatetime
} else if exifDatetime != nil {
contentDatetime = *exifDatetime
}
// Assign UUID v7 so CreatedAt can be derived from it later.
fileID, err := uuid.NewV7()
if err != nil {
return nil, fmt.Errorf("FileService.Upload: generate UUID: %w", err)
}
// Save file bytes to disk before opening the transaction so that a disk
// failure does not abort an otherwise healthy DB transaction.
if _, err := s.storage.Save(ctx, fileID, bytes.NewReader(data)); err != nil {
return nil, fmt.Errorf("FileService.Upload: save to storage: %w", err)
}
var created *domain.File
txErr := s.tx.WithTx(ctx, func(ctx context.Context) error {
f := &domain.File{
ID: fileID,
OriginalName: p.OriginalName,
MIMEType: mime.Name,
MIMEExtension: mime.Extension,
ContentDatetime: contentDatetime,
Notes: p.Notes,
Metadata: p.Metadata,
EXIF: exifData,
CreatorID: userID,
IsPublic: p.IsPublic,
}
var createErr error
created, createErr = s.files.Create(ctx, f)
if createErr != nil {
return createErr
}
if len(p.TagIDs) > 0 {
tags, err := s.tags.SetFileTags(ctx, created.ID, p.TagIDs)
if err != nil {
return err
}
created.Tags = tags
}
return nil
})
if txErr != nil {
// Attempt to clean up the orphaned file; ignore cleanup errors.
_ = s.storage.Delete(ctx, fileID)
return nil, txErr
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_create", &objType, &created.ID, nil)
return created, nil
}
// Get returns a file by ID, enforcing view ACL.
func (s *FileService) Get(ctx context.Context, id uuid.UUID) (*domain.File, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanView(ctx, userID, isAdmin, f.CreatorID, f.IsPublic, fileObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
return f, nil
}
// Update applies metadata changes to a file, enforcing edit ACL.
func (s *FileService) Update(ctx context.Context, id uuid.UUID, p UpdateParams) (*domain.File, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, f.CreatorID, fileObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
patch := &domain.File{}
if p.OriginalName != nil {
patch.OriginalName = p.OriginalName
}
if p.Notes != nil {
patch.Notes = p.Notes
}
if p.Metadata != nil {
patch.Metadata = p.Metadata
}
if p.ContentDatetime != nil {
patch.ContentDatetime = *p.ContentDatetime
}
if p.IsPublic != nil {
patch.IsPublic = *p.IsPublic
}
var updated *domain.File
txErr := s.tx.WithTx(ctx, func(ctx context.Context) error {
var updateErr error
updated, updateErr = s.files.Update(ctx, id, patch)
if updateErr != nil {
return updateErr
}
if p.TagIDs != nil {
tags, err := s.tags.SetFileTags(ctx, id, *p.TagIDs)
if err != nil {
return err
}
updated.Tags = tags
}
return nil
})
if txErr != nil {
return nil, txErr
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_edit", &objType, &id, nil)
return updated, nil
}
// Delete soft-deletes a file (moves to trash), enforcing edit ACL.
func (s *FileService) Delete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, f.CreatorID, fileObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.files.SoftDelete(ctx, id); err != nil {
return err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_delete", &objType, &id, nil)
return nil
}
// Restore moves a soft-deleted file out of trash, enforcing edit ACL.
func (s *FileService) Restore(ctx context.Context, id uuid.UUID) (*domain.File, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, f.CreatorID, fileObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
restored, err := s.files.Restore(ctx, id)
if err != nil {
return nil, err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_restore", &objType, &id, nil)
return restored, nil
}
// PermanentDelete removes the file record and its stored bytes. Only allowed
// when the file is already in trash.
func (s *FileService) PermanentDelete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return err
}
if !f.IsDeleted {
return domain.ErrConflict
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, f.CreatorID, fileObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.files.DeletePermanent(ctx, id); err != nil {
return err
}
_ = s.storage.Delete(ctx, id)
objType := fileObjectType
_ = s.audit.Log(ctx, "file_permanent_delete", &objType, &id, nil)
return nil
}
// Replace swaps the stored bytes for a file with new content.
func (s *FileService) Replace(ctx context.Context, id uuid.UUID, p UploadParams) (*domain.File, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
f, err := s.files.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, f.CreatorID, fileObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
mime, err := s.mimes.GetByName(ctx, p.MIMEType)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if _, err := io.Copy(&buf, p.Reader); err != nil {
return nil, fmt.Errorf("FileService.Replace: read body: %w", err)
}
data := buf.Bytes()
exifData, _ := extractEXIFWithDatetime(data)
if _, err := s.storage.Save(ctx, id, bytes.NewReader(data)); err != nil {
return nil, fmt.Errorf("FileService.Replace: save to storage: %w", err)
}
patch := &domain.File{
MIMEType: mime.Name,
MIMEExtension: mime.Extension,
EXIF: exifData,
}
if p.OriginalName != nil {
patch.OriginalName = p.OriginalName
}
updated, err := s.files.Update(ctx, id, patch)
if err != nil {
return nil, err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_replace", &objType, &id, nil)
return updated, nil
}
// List delegates to FileRepo with the given params.
func (s *FileService) List(ctx context.Context, params domain.FileListParams) (*domain.FilePage, error) {
return s.files.List(ctx, params)
}
// ---------------------------------------------------------------------------
// Content / thumbnail / preview streaming
// ---------------------------------------------------------------------------
// GetContent opens the raw file for download, enforcing view ACL.
func (s *FileService) GetContent(ctx context.Context, id uuid.UUID) (*ContentResult, error) {
f, err := s.Get(ctx, id) // ACL checked inside Get
if err != nil {
return nil, err
}
rc, err := s.storage.Read(ctx, id)
if err != nil {
return nil, err
}
return &ContentResult{
Body: rc,
MIMEType: f.MIMEType,
OriginalName: f.OriginalName,
}, nil
}
// GetThumbnail returns the thumbnail JPEG, enforcing view ACL.
func (s *FileService) GetThumbnail(ctx context.Context, id uuid.UUID) (io.ReadCloser, error) {
if _, err := s.Get(ctx, id); err != nil {
return nil, err
}
return s.storage.Thumbnail(ctx, id)
}
// GetPreview returns the preview JPEG, enforcing view ACL.
func (s *FileService) GetPreview(ctx context.Context, id uuid.UUID) (io.ReadCloser, error) {
if _, err := s.Get(ctx, id); err != nil {
return nil, err
}
return s.storage.Preview(ctx, id)
}
// ---------------------------------------------------------------------------
// Bulk operations
// ---------------------------------------------------------------------------
// BulkDelete soft-deletes multiple files. Files the caller cannot edit are silently skipped.
func (s *FileService) BulkDelete(ctx context.Context, fileIDs []uuid.UUID) error {
for _, id := range fileIDs {
if err := s.Delete(ctx, id); err != nil {
if err == domain.ErrNotFound || err == domain.ErrForbidden {
continue
}
return err
}
}
return nil
}
// ---------------------------------------------------------------------------
// Import
// ---------------------------------------------------------------------------
// Import scans a server-side directory and uploads all supported files.
// If path is empty, the configured default import path is used.
func (s *FileService) Import(ctx context.Context, path string) (*ImportResult, error) {
dir := path
if dir == "" {
dir = s.importPath
}
if dir == "" {
return nil, domain.ErrValidation
}
entries, err := os.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("FileService.Import: read dir %q: %w", dir, err)
}
result := &ImportResult{Errors: []ImportFileError{}}
for _, entry := range entries {
if entry.IsDir() {
result.Skipped++
continue
}
fullPath := filepath.Join(dir, entry.Name())
mt, err := mimetype.DetectFile(fullPath)
if err != nil {
result.Errors = append(result.Errors, ImportFileError{
Filename: entry.Name(),
Reason: fmt.Sprintf("MIME detection failed: %s", err),
})
continue
}
mimeStr := mt.String()
// Strip parameters (e.g. "text/plain; charset=utf-8" → "text/plain").
if idx := len(mimeStr); idx > 0 {
for i, c := range mimeStr {
if c == ';' {
mimeStr = mimeStr[:i]
break
}
}
}
if _, err := s.mimes.GetByName(ctx, mimeStr); err != nil {
result.Skipped++
continue
}
f, err := os.Open(fullPath)
if err != nil {
result.Errors = append(result.Errors, ImportFileError{
Filename: entry.Name(),
Reason: fmt.Sprintf("open failed: %s", err),
})
continue
}
name := entry.Name()
_, uploadErr := s.Upload(ctx, UploadParams{
Reader: f,
MIMEType: mimeStr,
OriginalName: &name,
})
f.Close()
if uploadErr != nil {
result.Errors = append(result.Errors, ImportFileError{
Filename: entry.Name(),
Reason: uploadErr.Error(),
})
continue
}
result.Imported++
}
return result, nil
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
// extractEXIFWithDatetime parses EXIF from raw bytes, returning both the JSON
// representation and the DateTimeOriginal (if present). Both may be nil.
func extractEXIFWithDatetime(data []byte) (json.RawMessage, *time.Time) {
x, err := exif.Decode(bytes.NewReader(data))
if err != nil {
return nil, nil
}
b, err := x.MarshalJSON()
if err != nil {
return nil, nil
}
var dt *time.Time
if t, err := x.DateTime(); err == nil {
dt = &t
}
return json.RawMessage(b), dt
}

View File

@ -0,0 +1,177 @@
package service
import (
"context"
"encoding/json"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
const poolObjectType = "pool"
const poolObjectTypeID int16 = 4 // fourth row in 007_seed_data.sql object_types
// PoolParams holds the fields for creating or patching a pool.
type PoolParams struct {
Name string
Notes *string
Metadata json.RawMessage
IsPublic *bool
}
// PoolService handles pool CRUD and poolfile management with ACL + audit.
type PoolService struct {
pools port.PoolRepo
acl *ACLService
audit *AuditService
}
// NewPoolService creates a PoolService.
func NewPoolService(
pools port.PoolRepo,
acl *ACLService,
audit *AuditService,
) *PoolService {
return &PoolService{pools: pools, acl: acl, audit: audit}
}
// ---------------------------------------------------------------------------
// CRUD
// ---------------------------------------------------------------------------
// List returns a paginated list of pools.
func (s *PoolService) List(ctx context.Context, params port.OffsetParams) (*domain.PoolOffsetPage, error) {
return s.pools.List(ctx, params)
}
// Get returns a pool by ID.
func (s *PoolService) Get(ctx context.Context, id uuid.UUID) (*domain.Pool, error) {
return s.pools.GetByID(ctx, id)
}
// Create inserts a new pool.
func (s *PoolService) Create(ctx context.Context, p PoolParams) (*domain.Pool, error) {
userID, _, _ := domain.UserFromContext(ctx)
pool := &domain.Pool{
Name: p.Name,
Notes: p.Notes,
Metadata: p.Metadata,
CreatorID: userID,
}
if p.IsPublic != nil {
pool.IsPublic = *p.IsPublic
}
created, err := s.pools.Create(ctx, pool)
if err != nil {
return nil, err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_create", &objType, &created.ID, nil)
return created, nil
}
// Update applies a partial patch to a pool.
func (s *PoolService) Update(ctx context.Context, id uuid.UUID, p PoolParams) (*domain.Pool, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
current, err := s.pools.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, current.CreatorID, poolObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
patch := *current
if p.Name != "" {
patch.Name = p.Name
}
if p.Notes != nil {
patch.Notes = p.Notes
}
if len(p.Metadata) > 0 {
patch.Metadata = p.Metadata
}
if p.IsPublic != nil {
patch.IsPublic = *p.IsPublic
}
updated, err := s.pools.Update(ctx, id, &patch)
if err != nil {
return nil, err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_edit", &objType, &id, nil)
return updated, nil
}
// Delete removes a pool by ID, enforcing edit ACL.
func (s *PoolService) Delete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
pool, err := s.pools.GetByID(ctx, id)
if err != nil {
return err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, pool.CreatorID, poolObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.pools.Delete(ctx, id); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_delete", &objType, &id, nil)
return nil
}
// ---------------------------------------------------------------------------
// Poolfile operations
// ---------------------------------------------------------------------------
// ListFiles returns cursor-paginated files within a pool ordered by position.
func (s *PoolService) ListFiles(ctx context.Context, poolID uuid.UUID, params port.PoolFileListParams) (*domain.PoolFilePage, error) {
return s.pools.ListFiles(ctx, poolID, params)
}
// AddFiles adds files to a pool at the given position (nil = append).
func (s *PoolService) AddFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID, position *int) error {
if err := s.pools.AddFiles(ctx, poolID, fileIDs, position); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "file_pool_add", &objType, &poolID, map[string]any{"count": len(fileIDs)})
return nil
}
// RemoveFiles removes files from a pool.
func (s *PoolService) RemoveFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
if err := s.pools.RemoveFiles(ctx, poolID, fileIDs); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "file_pool_remove", &objType, &poolID, map[string]any{"count": len(fileIDs)})
return nil
}
// Reorder sets the full ordered sequence of file IDs within a pool.
func (s *PoolService) Reorder(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
return s.pools.Reorder(ctx, poolID, fileIDs)
}

View File

@ -0,0 +1,412 @@
package service
import (
"context"
"encoding/json"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
const tagObjectType = "tag"
const tagObjectTypeID int16 = 2 // second row in 007_seed_data.sql object_types
// TagParams holds the fields for creating or patching a tag.
type TagParams struct {
Name string
Notes *string
Color *string // nil = no change; pointer to empty string = clear
CategoryID *uuid.UUID // nil = no change; Nil UUID = unassign
Metadata json.RawMessage
IsPublic *bool
}
// TagService handles tag CRUD, tag-rule management, and filetag operations
// including automatic recursive rule application.
type TagService struct {
tags port.TagRepo
rules port.TagRuleRepo
acl *ACLService
audit *AuditService
tx port.Transactor
}
// NewTagService creates a TagService.
func NewTagService(
tags port.TagRepo,
rules port.TagRuleRepo,
acl *ACLService,
audit *AuditService,
tx port.Transactor,
) *TagService {
return &TagService{
tags: tags,
rules: rules,
acl: acl,
audit: audit,
tx: tx,
}
}
// ---------------------------------------------------------------------------
// Tag CRUD
// ---------------------------------------------------------------------------
// List returns a paginated, optionally filtered list of tags.
func (s *TagService) List(ctx context.Context, params port.OffsetParams) (*domain.TagOffsetPage, error) {
return s.tags.List(ctx, params)
}
// Get returns a tag by ID.
func (s *TagService) Get(ctx context.Context, id uuid.UUID) (*domain.Tag, error) {
return s.tags.GetByID(ctx, id)
}
// Create inserts a new tag record.
func (s *TagService) Create(ctx context.Context, p TagParams) (*domain.Tag, error) {
userID, _, _ := domain.UserFromContext(ctx)
t := &domain.Tag{
Name: p.Name,
Notes: p.Notes,
Color: p.Color,
CategoryID: p.CategoryID,
Metadata: p.Metadata,
CreatorID: userID,
}
if p.IsPublic != nil {
t.IsPublic = *p.IsPublic
}
created, err := s.tags.Create(ctx, t)
if err != nil {
return nil, err
}
objType := tagObjectType
_ = s.audit.Log(ctx, "tag_create", &objType, &created.ID, nil)
return created, nil
}
// Update applies a partial patch to a tag.
// The service reads the current tag first so the caller only needs to supply
// the fields that should change.
func (s *TagService) Update(ctx context.Context, id uuid.UUID, p TagParams) (*domain.Tag, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
current, err := s.tags.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, current.CreatorID, tagObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
// Merge patch into current.
patch := *current // copy
if p.Name != "" {
patch.Name = p.Name
}
if p.Notes != nil {
patch.Notes = p.Notes
}
if p.Color != nil {
patch.Color = p.Color
}
if p.CategoryID != nil {
if *p.CategoryID == uuid.Nil {
patch.CategoryID = nil // explicit unassign
} else {
patch.CategoryID = p.CategoryID
}
}
if len(p.Metadata) > 0 {
patch.Metadata = p.Metadata
}
if p.IsPublic != nil {
patch.IsPublic = *p.IsPublic
}
updated, err := s.tags.Update(ctx, id, &patch)
if err != nil {
return nil, err
}
objType := tagObjectType
_ = s.audit.Log(ctx, "tag_edit", &objType, &id, nil)
return updated, nil
}
// Delete removes a tag by ID, enforcing edit ACL.
func (s *TagService) Delete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
t, err := s.tags.GetByID(ctx, id)
if err != nil {
return err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, t.CreatorID, tagObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.tags.Delete(ctx, id); err != nil {
return err
}
objType := tagObjectType
_ = s.audit.Log(ctx, "tag_delete", &objType, &id, nil)
return nil
}
// ---------------------------------------------------------------------------
// Tag rules
// ---------------------------------------------------------------------------
// ListRules returns all rules for a tag (when this tag is applied, these follow).
func (s *TagService) ListRules(ctx context.Context, tagID uuid.UUID) ([]domain.TagRule, error) {
return s.rules.ListByTag(ctx, tagID)
}
// CreateRule adds a tag rule. If applyToExisting is true, the then_tag is
// retroactively applied to all files that already carry the when_tag.
// Retroactive application requires a FileRepo; it is deferred until wired
// in a future iteration (see port.FileRepo.ListByTag).
func (s *TagService) CreateRule(ctx context.Context, whenTagID, thenTagID uuid.UUID, isActive, _ bool) (*domain.TagRule, error) {
return s.rules.Create(ctx, domain.TagRule{
WhenTagID: whenTagID,
ThenTagID: thenTagID,
IsActive: isActive,
})
}
// SetRuleActive toggles a rule's is_active flag and returns the updated rule.
// When active and applyToExisting are both true, the full transitive expansion
// of thenTagID is retroactively applied to files already carrying whenTagID.
func (s *TagService) SetRuleActive(ctx context.Context, whenTagID, thenTagID uuid.UUID, active, applyToExisting bool) (*domain.TagRule, error) {
if err := s.rules.SetActive(ctx, whenTagID, thenTagID, active, applyToExisting); err != nil {
return nil, err
}
rules, err := s.rules.ListByTag(ctx, whenTagID)
if err != nil {
return nil, err
}
for _, r := range rules {
if r.ThenTagID == thenTagID {
return &r, nil
}
}
return nil, domain.ErrNotFound
}
// DeleteRule removes a tag rule.
func (s *TagService) DeleteRule(ctx context.Context, whenTagID, thenTagID uuid.UUID) error {
return s.rules.Delete(ctx, whenTagID, thenTagID)
}
// ---------------------------------------------------------------------------
// Filetag operations (with auto-rule expansion)
// ---------------------------------------------------------------------------
// ListFileTags returns the tags on a file.
func (s *TagService) ListFileTags(ctx context.Context, fileID uuid.UUID) ([]domain.Tag, error) {
return s.tags.ListByFile(ctx, fileID)
}
// SetFileTags replaces all tags on a file, then applies active rules for all
// newly set tags (BFS expansion). Returns the full resulting tag set.
func (s *TagService) SetFileTags(ctx context.Context, fileID uuid.UUID, tagIDs []uuid.UUID) ([]domain.Tag, error) {
expanded, err := s.expandTagSet(ctx, tagIDs)
if err != nil {
return nil, err
}
if err := s.tags.SetFileTags(ctx, fileID, expanded); err != nil {
return nil, err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_tag_add", &objType, &fileID, nil)
return s.tags.ListByFile(ctx, fileID)
}
// AddFileTag adds a single tag to a file, then recursively applies active rules.
// Returns the full resulting tag set.
func (s *TagService) AddFileTag(ctx context.Context, fileID, tagID uuid.UUID) ([]domain.Tag, error) {
// Compute the full set including rule-expansion from tagID.
extra, err := s.expandTagSet(ctx, []uuid.UUID{tagID})
if err != nil {
return nil, err
}
// Fetch current tags so we don't lose them.
current, err := s.tags.ListByFile(ctx, fileID)
if err != nil {
return nil, err
}
// Union: existing + expanded new tags.
seen := make(map[uuid.UUID]bool, len(current)+len(extra))
for _, t := range current {
seen[t.ID] = true
}
merged := make([]uuid.UUID, len(current))
for i, t := range current {
merged[i] = t.ID
}
for _, id := range extra {
if !seen[id] {
seen[id] = true
merged = append(merged, id)
}
}
if err := s.tags.SetFileTags(ctx, fileID, merged); err != nil {
return nil, err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_tag_add", &objType, &fileID, map[string]any{"tag_id": tagID})
return s.tags.ListByFile(ctx, fileID)
}
// RemoveFileTag removes a single tag from a file.
func (s *TagService) RemoveFileTag(ctx context.Context, fileID, tagID uuid.UUID) error {
if err := s.tags.RemoveFileTag(ctx, fileID, tagID); err != nil {
return err
}
objType := fileObjectType
_ = s.audit.Log(ctx, "file_tag_remove", &objType, &fileID, map[string]any{"tag_id": tagID})
return nil
}
// BulkSetTags adds or removes tags on multiple files (with rule expansion for add).
// Returns the tagIDs that were applied (the expanded input set for add; empty for remove).
func (s *TagService) BulkSetTags(ctx context.Context, fileIDs []uuid.UUID, action string, tagIDs []uuid.UUID) ([]uuid.UUID, error) {
if action != "add" && action != "remove" {
return nil, domain.ErrValidation
}
// Pre-expand tag set once; all files get the same expansion.
var expanded []uuid.UUID
if action == "add" {
var err error
expanded, err = s.expandTagSet(ctx, tagIDs)
if err != nil {
return nil, err
}
}
for _, fileID := range fileIDs {
switch action {
case "add":
current, err := s.tags.ListByFile(ctx, fileID)
if err != nil {
if err == domain.ErrNotFound {
continue
}
return nil, err
}
seen := make(map[uuid.UUID]bool, len(current))
merged := make([]uuid.UUID, len(current))
for i, t := range current {
seen[t.ID] = true
merged[i] = t.ID
}
for _, id := range expanded {
if !seen[id] {
seen[id] = true
merged = append(merged, id)
}
}
if err := s.tags.SetFileTags(ctx, fileID, merged); err != nil {
return nil, err
}
case "remove":
current, err := s.tags.ListByFile(ctx, fileID)
if err != nil {
if err == domain.ErrNotFound {
continue
}
return nil, err
}
remove := make(map[uuid.UUID]bool, len(tagIDs))
for _, id := range tagIDs {
remove[id] = true
}
kept := make([]uuid.UUID, 0, len(current))
for _, t := range current {
if !remove[t.ID] {
kept = append(kept, t.ID)
}
}
if err := s.tags.SetFileTags(ctx, fileID, kept); err != nil {
return nil, err
}
}
}
if action == "add" {
return expanded, nil
}
return []uuid.UUID{}, nil
}
// CommonTags returns tags present on ALL given files and tags present on SOME.
func (s *TagService) CommonTags(ctx context.Context, fileIDs []uuid.UUID) (common, partial []domain.Tag, err error) {
common, err = s.tags.CommonTagsForFiles(ctx, fileIDs)
if err != nil {
return nil, nil, err
}
partial, err = s.tags.PartialTagsForFiles(ctx, fileIDs)
if err != nil {
return nil, nil, err
}
return common, partial, nil
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
// expandTagSet runs a BFS from the given seed tags, following active tag rules,
// and returns the full set of tag IDs that should be applied (seeds + auto-applied).
func (s *TagService) expandTagSet(ctx context.Context, seeds []uuid.UUID) ([]uuid.UUID, error) {
visited := make(map[uuid.UUID]bool, len(seeds))
queue := make([]uuid.UUID, 0, len(seeds))
for _, id := range seeds {
if !visited[id] {
visited[id] = true
queue = append(queue, id)
}
}
for i := 0; i < len(queue); i++ {
tagID := queue[i]
rules, err := s.rules.ListByTag(ctx, tagID)
if err != nil {
return nil, err
}
for _, r := range rules {
if r.IsActive && !visited[r.ThenTagID] {
visited[r.ThenTagID] = true
queue = append(queue, r.ThenTagID)
}
}
}
return queue, nil
}

View File

@ -0,0 +1,156 @@
package service
import (
"context"
"fmt"
"golang.org/x/crypto/bcrypt"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// UserService handles user CRUD and profile management.
type UserService struct {
users port.UserRepo
audit *AuditService
}
// NewUserService creates a UserService.
func NewUserService(users port.UserRepo, audit *AuditService) *UserService {
return &UserService{users: users, audit: audit}
}
// ---------------------------------------------------------------------------
// Self-service
// ---------------------------------------------------------------------------
// GetMe returns the profile of the currently authenticated user.
func (s *UserService) GetMe(ctx context.Context) (*domain.User, error) {
userID, _, _ := domain.UserFromContext(ctx)
return s.users.GetByID(ctx, userID)
}
// UpdateMeParams holds fields a user may change on their own profile.
type UpdateMeParams struct {
Name string // empty = no change
Password *string // nil = no change
}
// UpdateMe allows a user to change their own name and/or password.
func (s *UserService) UpdateMe(ctx context.Context, p UpdateMeParams) (*domain.User, error) {
userID, _, _ := domain.UserFromContext(ctx)
current, err := s.users.GetByID(ctx, userID)
if err != nil {
return nil, err
}
patch := *current
if p.Name != "" {
patch.Name = p.Name
}
if p.Password != nil {
hash, err := bcrypt.GenerateFromPassword([]byte(*p.Password), bcrypt.DefaultCost)
if err != nil {
return nil, fmt.Errorf("UserService.UpdateMe hash: %w", err)
}
patch.Password = string(hash)
}
return s.users.Update(ctx, userID, &patch)
}
// ---------------------------------------------------------------------------
// Admin CRUD
// ---------------------------------------------------------------------------
// List returns a paginated list of users (admin only — caller must enforce).
func (s *UserService) List(ctx context.Context, params port.OffsetParams) (*domain.UserPage, error) {
return s.users.List(ctx, params)
}
// Get returns a user by ID (admin only).
func (s *UserService) Get(ctx context.Context, id int16) (*domain.User, error) {
return s.users.GetByID(ctx, id)
}
// CreateParams holds fields for creating a new user.
type CreateUserParams struct {
Name string
Password string
IsAdmin bool
CanCreate bool
}
// Create inserts a new user with a bcrypt-hashed password (admin only).
func (s *UserService) Create(ctx context.Context, p CreateUserParams) (*domain.User, error) {
hash, err := bcrypt.GenerateFromPassword([]byte(p.Password), bcrypt.DefaultCost)
if err != nil {
return nil, fmt.Errorf("UserService.Create hash: %w", err)
}
u := &domain.User{
Name: p.Name,
Password: string(hash),
IsAdmin: p.IsAdmin,
CanCreate: p.CanCreate,
}
created, err := s.users.Create(ctx, u)
if err != nil {
return nil, err
}
_ = s.audit.Log(ctx, "user_create", nil, nil, map[string]any{"target_user_id": created.ID})
return created, nil
}
// UpdateAdminParams holds fields an admin may change on any user.
type UpdateAdminParams struct {
IsAdmin *bool
CanCreate *bool
IsBlocked *bool
}
// UpdateAdmin applies an admin-level patch to a user.
func (s *UserService) UpdateAdmin(ctx context.Context, id int16, p UpdateAdminParams) (*domain.User, error) {
current, err := s.users.GetByID(ctx, id)
if err != nil {
return nil, err
}
patch := *current
if p.IsAdmin != nil {
patch.IsAdmin = *p.IsAdmin
}
if p.CanCreate != nil {
patch.CanCreate = *p.CanCreate
}
if p.IsBlocked != nil {
patch.IsBlocked = *p.IsBlocked
}
updated, err := s.users.Update(ctx, id, &patch)
if err != nil {
return nil, err
}
// Log block/unblock specifically.
if p.IsBlocked != nil {
action := "user_unblock"
if *p.IsBlocked {
action = "user_block"
}
_ = s.audit.Log(ctx, action, nil, nil, map[string]any{"target_user_id": id})
}
return updated, nil
}
// Delete removes a user by ID (admin only).
func (s *UserService) Delete(ctx context.Context, id int16) error {
if err := s.users.Delete(ctx, id); err != nil {
return err
}
_ = s.audit.Log(ctx, "user_delete", nil, nil, map[string]any{"target_user_id": id})
return nil
}

View File

@ -0,0 +1,257 @@
// Package storage provides a local-filesystem implementation of port.FileStorage.
package storage
import (
"bytes"
"context"
"fmt"
"image"
"image/color"
"image/jpeg"
_ "image/gif" // register GIF decoder
_ "image/png" // register PNG decoder
_ "golang.org/x/image/webp" // register WebP decoder
"io"
"os"
"os/exec"
"path/filepath"
"github.com/disintegration/imaging"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// DiskStorage implements port.FileStorage using the local filesystem.
//
// Directory layout:
//
// {filesPath}/{id} — original file (UUID basename, no extension)
// {thumbsPath}/{id}_thumb.jpg — thumbnail cache
// {thumbsPath}/{id}_preview.jpg — preview cache
type DiskStorage struct {
filesPath string
thumbsPath string
thumbWidth int
thumbHeight int
previewWidth int
previewHeight int
}
var _ port.FileStorage = (*DiskStorage)(nil)
// NewDiskStorage creates a DiskStorage and ensures both directories exist.
func NewDiskStorage(
filesPath, thumbsPath string,
thumbW, thumbH, prevW, prevH int,
) (*DiskStorage, error) {
for _, p := range []string{filesPath, thumbsPath} {
if err := os.MkdirAll(p, 0o755); err != nil {
return nil, fmt.Errorf("storage: create directory %q: %w", p, err)
}
}
return &DiskStorage{
filesPath: filesPath,
thumbsPath: thumbsPath,
thumbWidth: thumbW,
thumbHeight: thumbH,
previewWidth: prevW,
previewHeight: prevH,
}, nil
}
// ---------------------------------------------------------------------------
// port.FileStorage implementation
// ---------------------------------------------------------------------------
// Save writes r to {filesPath}/{id} and returns the number of bytes written.
func (s *DiskStorage) Save(_ context.Context, id uuid.UUID, r io.Reader) (int64, error) {
dst := s.originalPath(id)
f, err := os.Create(dst)
if err != nil {
return 0, fmt.Errorf("storage.Save create %q: %w", dst, err)
}
n, copyErr := io.Copy(f, r)
closeErr := f.Close()
if copyErr != nil {
os.Remove(dst)
return 0, fmt.Errorf("storage.Save write: %w", copyErr)
}
if closeErr != nil {
os.Remove(dst)
return 0, fmt.Errorf("storage.Save close: %w", closeErr)
}
return n, nil
}
// Read opens the original file for reading. The caller must close the result.
func (s *DiskStorage) Read(_ context.Context, id uuid.UUID) (io.ReadCloser, error) {
f, err := os.Open(s.originalPath(id))
if err != nil {
if os.IsNotExist(err) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("storage.Read: %w", err)
}
return f, nil
}
// Delete removes the original file. Returns ErrNotFound if it does not exist.
func (s *DiskStorage) Delete(_ context.Context, id uuid.UUID) error {
if err := os.Remove(s.originalPath(id)); err != nil {
if os.IsNotExist(err) {
return domain.ErrNotFound
}
return fmt.Errorf("storage.Delete: %w", err)
}
return nil
}
// Thumbnail returns a JPEG that fits within the configured max width×height
// (never upscaled, never cropped). Generated on first call and cached.
// Video files are thumbnailed via ffmpeg; other non-image files get a placeholder.
func (s *DiskStorage) Thumbnail(ctx context.Context, id uuid.UUID) (io.ReadCloser, error) {
return s.serveGenerated(ctx, id, s.thumbCachePath(id), s.thumbWidth, s.thumbHeight)
}
// Preview returns a JPEG that fits within the configured max width×height
// (never upscaled, never cropped). Generated on first call and cached.
// Video files are thumbnailed via ffmpeg; other non-image files get a placeholder.
func (s *DiskStorage) Preview(ctx context.Context, id uuid.UUID) (io.ReadCloser, error) {
return s.serveGenerated(ctx, id, s.previewCachePath(id), s.previewWidth, s.previewHeight)
}
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
// serveGenerated is the shared implementation for Thumbnail and Preview.
// imaging.Thumbnail fits the source within maxW×maxH without upscaling or cropping.
//
// Resolution order:
// 1. Return cached JPEG if present.
// 2. Decode as still image (JPEG/PNG/GIF via imaging).
// 3. Extract a frame with ffmpeg (video files).
// 4. Solid-colour placeholder (archives, unrecognised formats, etc.).
func (s *DiskStorage) serveGenerated(ctx context.Context, id uuid.UUID, cachePath string, maxW, maxH int) (io.ReadCloser, error) {
// Fast path: cache hit.
if f, err := os.Open(cachePath); err == nil {
return f, nil
}
// Verify the original file exists before doing any work.
srcPath := s.originalPath(id)
if _, err := os.Stat(srcPath); err != nil {
if os.IsNotExist(err) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("storage: stat %q: %w", srcPath, err)
}
// 1. Try still-image decode (JPEG/PNG/GIF).
// 2. Try video frame extraction via ffmpeg.
// 3. Fall back to placeholder.
var img image.Image
if decoded, err := imaging.Open(srcPath, imaging.AutoOrientation(true)); err == nil {
img = imaging.Thumbnail(decoded, maxW, maxH, imaging.Lanczos)
} else if frame, err := extractVideoFrame(ctx, srcPath); err == nil {
img = imaging.Thumbnail(frame, maxW, maxH, imaging.Lanczos)
} else {
img = placeholder(maxW, maxH)
}
// Write to cache atomically (temp→rename) and return an open reader.
if rc, err := writeCache(cachePath, img); err == nil {
return rc, nil
}
// Cache write failed (read-only fs, disk full, …). Serve from an
// in-memory buffer so the request still succeeds.
var buf bytes.Buffer
if err := jpeg.Encode(&buf, img, &jpeg.Options{Quality: 85}); err != nil {
return nil, fmt.Errorf("storage: encode in-memory JPEG: %w", err)
}
return io.NopCloser(&buf), nil
}
// writeCache encodes img as JPEG to cachePath via an atomic temp→rename write,
// then opens and returns the cache file.
func writeCache(cachePath string, img image.Image) (io.ReadCloser, error) {
dir := filepath.Dir(cachePath)
tmp, err := os.CreateTemp(dir, ".cache-*.tmp")
if err != nil {
return nil, fmt.Errorf("storage: create temp file: %w", err)
}
tmpName := tmp.Name()
encErr := jpeg.Encode(tmp, img, &jpeg.Options{Quality: 85})
closeErr := tmp.Close()
if encErr != nil {
os.Remove(tmpName)
return nil, fmt.Errorf("storage: encode cache JPEG: %w", encErr)
}
if closeErr != nil {
os.Remove(tmpName)
return nil, fmt.Errorf("storage: close temp file: %w", closeErr)
}
if err := os.Rename(tmpName, cachePath); err != nil {
os.Remove(tmpName)
return nil, fmt.Errorf("storage: rename cache file: %w", err)
}
f, err := os.Open(cachePath)
if err != nil {
return nil, fmt.Errorf("storage: open cache file: %w", err)
}
return f, nil
}
// extractVideoFrame uses ffmpeg to extract a single frame from a video file.
// It seeks 1 second in (keyframe-accurate fast seek) and pipes the frame out
// as PNG. If the video is shorter than 1 s the seek is silently ignored by
// ffmpeg and the first available frame is returned instead.
// Returns an error if ffmpeg is not installed or produces no output.
func extractVideoFrame(ctx context.Context, srcPath string) (image.Image, error) {
var out bytes.Buffer
cmd := exec.CommandContext(ctx, "ffmpeg",
"-ss", "1", // fast input seek; ignored gracefully on short files
"-i", srcPath,
"-vframes", "1",
"-f", "image2",
"-vcodec", "png",
"pipe:1",
)
cmd.Stdout = &out
cmd.Stderr = io.Discard // suppress ffmpeg progress output
if err := cmd.Run(); err != nil || out.Len() == 0 {
return nil, fmt.Errorf("ffmpeg frame extract: %w", err)
}
return imaging.Decode(&out)
}
// ---------------------------------------------------------------------------
// Path helpers
// ---------------------------------------------------------------------------
func (s *DiskStorage) originalPath(id uuid.UUID) string {
return filepath.Join(s.filesPath, id.String())
}
func (s *DiskStorage) thumbCachePath(id uuid.UUID) string {
return filepath.Join(s.thumbsPath, id.String()+"_thumb.jpg")
}
func (s *DiskStorage) previewCachePath(id uuid.UUID) string {
return filepath.Join(s.thumbsPath, id.String()+"_preview.jpg")
}
// ---------------------------------------------------------------------------
// Image helpers
// ---------------------------------------------------------------------------
// placeholder returns a solid-colour image of size w×h for files that cannot
// be decoded as images. Uses #444455 from the design palette.
func placeholder(w, h int) *image.NRGBA {
return imaging.New(w, h, color.NRGBA{R: 0x44, G: 0x44, B: 0x55, A: 0xFF})
}

View File

@ -0,0 +1,65 @@
-- +goose Up
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE SCHEMA IF NOT EXISTS core;
CREATE SCHEMA IF NOT EXISTS data;
CREATE SCHEMA IF NOT EXISTS acl;
CREATE SCHEMA IF NOT EXISTS activity;
-- UUID v7 generator
-- +goose StatementBegin
CREATE OR REPLACE FUNCTION public.uuid_v7(cts timestamptz DEFAULT clock_timestamp())
RETURNS uuid LANGUAGE plpgsql AS $$
DECLARE
state text = current_setting('uuidv7.old_tp', true);
old_tp text = split_part(state, ':', 1);
base int = coalesce(nullif(split_part(state, ':', 4), '')::int, (random()*16777215/2-1)::int);
tp text;
entropy text;
seq text = base;
seqn int = split_part(state, ':', 2);
ver text = coalesce(split_part(state, ':', 3), to_hex(8+(random()*3)::int));
BEGIN
base = (random()*16777215/2-1)::int;
tp = lpad(to_hex(floor(extract(epoch from cts)*1000)::int8), 12, '0') || '7';
IF tp IS DISTINCT FROM old_tp THEN
old_tp = tp;
ver = to_hex(8+(random()*3)::int);
base = (random()*16777215/2-1)::int;
seqn = base;
ELSE
seqn = seqn + (random()*1000)::int;
END IF;
PERFORM set_config('uuidv7.old_tp', old_tp||':'||seqn||':'||ver||':'||base, false);
entropy = md5(gen_random_uuid()::text);
seq = lpad(to_hex(seqn), 6, '0');
RETURN (tp || substring(seq from 1 for 3) || ver || substring(seq from 4 for 3) ||
substring(entropy from 1 for 12))::uuid;
END;
$$;
-- +goose StatementEnd
-- Extract timestamp from UUID v7
-- +goose StatementBegin
CREATE OR REPLACE FUNCTION public.uuid_extract_timestamp(uuid_val uuid)
RETURNS timestamptz LANGUAGE sql IMMUTABLE PARALLEL SAFE AS $$
SELECT to_timestamp(
('x' || left(replace(uuid_val::text, '-', ''), 12))::bit(48)::bigint / 1000.0
);
$$;
-- +goose StatementEnd
-- +goose Down
DROP FUNCTION IF EXISTS public.uuid_extract_timestamp(uuid);
DROP FUNCTION IF EXISTS public.uuid_v7(timestamptz);
DROP SCHEMA IF EXISTS activity;
DROP SCHEMA IF EXISTS acl;
DROP SCHEMA IF EXISTS data;
DROP SCHEMA IF EXISTS core;
DROP EXTENSION IF EXISTS "uuid-ossp";
DROP EXTENSION IF EXISTS pgcrypto;

View File

@ -0,0 +1,37 @@
-- +goose Up
CREATE TABLE core.users (
id smallint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name varchar(32) NOT NULL,
password text NOT NULL, -- bcrypt hash via pgcrypto
is_admin boolean NOT NULL DEFAULT false,
can_create boolean NOT NULL DEFAULT false,
is_blocked boolean NOT NULL DEFAULT false,
CONSTRAINT uni__users__name UNIQUE (name)
);
CREATE TABLE core.mime_types (
id smallint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name varchar(127) NOT NULL,
extension varchar(16) NOT NULL,
CONSTRAINT uni__mime_types__name UNIQUE (name)
);
CREATE TABLE core.object_types (
id smallint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name varchar(32) NOT NULL,
CONSTRAINT uni__object_types__name UNIQUE (name)
);
COMMENT ON TABLE core.users IS 'Application users';
COMMENT ON TABLE core.mime_types IS 'Whitelist of supported MIME types';
COMMENT ON TABLE core.object_types IS 'Reference: entity types for ACL and audit log';
-- +goose Down
DROP TABLE IF EXISTS core.object_types;
DROP TABLE IF EXISTS core.mime_types;
DROP TABLE IF EXISTS core.users;

View File

@ -0,0 +1,118 @@
-- +goose Up
CREATE TABLE data.categories (
id uuid NOT NULL DEFAULT public.uuid_v7() PRIMARY KEY,
name varchar(256) NOT NULL,
notes text,
color char(6),
metadata jsonb,
creator_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
is_public boolean NOT NULL DEFAULT false,
CONSTRAINT uni__categories__name UNIQUE (name),
CONSTRAINT chk__categories__color_hex
CHECK (color IS NULL OR color ~* '^[A-Fa-f0-9]{6}$')
);
CREATE TABLE data.tags (
id uuid NOT NULL DEFAULT public.uuid_v7() PRIMARY KEY,
name varchar(256) NOT NULL,
notes text,
color char(6),
category_id uuid REFERENCES data.categories(id)
ON UPDATE CASCADE ON DELETE SET NULL,
metadata jsonb,
creator_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
is_public boolean NOT NULL DEFAULT false,
CONSTRAINT uni__tags__name UNIQUE (name),
CONSTRAINT chk__tags__color_hex
CHECK (color IS NULL OR color ~* '^[A-Fa-f0-9]{6}$')
);
CREATE TABLE data.tag_rules (
when_tag_id uuid NOT NULL REFERENCES data.tags(id)
ON UPDATE CASCADE ON DELETE CASCADE,
then_tag_id uuid NOT NULL REFERENCES data.tags(id)
ON UPDATE CASCADE ON DELETE CASCADE,
is_active boolean NOT NULL DEFAULT true,
PRIMARY KEY (when_tag_id, then_tag_id)
);
CREATE TABLE data.files (
id uuid NOT NULL DEFAULT public.uuid_v7() PRIMARY KEY,
original_name varchar(256), -- original filename at upload time
mime_id smallint NOT NULL REFERENCES core.mime_types(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
content_datetime timestamptz NOT NULL DEFAULT clock_timestamp(), -- content datetime (e.g. photo taken)
notes text,
metadata jsonb, -- user-editable key-value data
exif jsonb, -- EXIF data extracted at upload (immutable)
phash bigint, -- perceptual hash for duplicate detection (future)
creator_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
is_public boolean NOT NULL DEFAULT false,
is_deleted boolean NOT NULL DEFAULT false -- soft delete (trash)
);
CREATE TABLE data.file_tag (
file_id uuid NOT NULL REFERENCES data.files(id)
ON UPDATE CASCADE ON DELETE CASCADE,
tag_id uuid NOT NULL REFERENCES data.tags(id)
ON UPDATE CASCADE ON DELETE CASCADE,
PRIMARY KEY (file_id, tag_id)
);
CREATE TABLE data.pools (
id uuid NOT NULL DEFAULT public.uuid_v7() PRIMARY KEY,
name varchar(256) NOT NULL,
notes text,
metadata jsonb,
creator_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
is_public boolean NOT NULL DEFAULT false,
CONSTRAINT uni__pools__name UNIQUE (name)
);
-- `position` uses integer with gaps (e.g. 1000, 2000, 3000) to allow
-- insertions without renumbering. Compact when gaps get too small.
CREATE TABLE data.file_pool (
file_id uuid NOT NULL REFERENCES data.files(id)
ON UPDATE CASCADE ON DELETE CASCADE,
pool_id uuid NOT NULL REFERENCES data.pools(id)
ON UPDATE CASCADE ON DELETE CASCADE,
position integer NOT NULL DEFAULT 0,
PRIMARY KEY (file_id, pool_id)
);
COMMENT ON TABLE data.categories IS 'Logical grouping of tags';
COMMENT ON TABLE data.tags IS 'File labels/tags';
COMMENT ON TABLE data.tag_rules IS 'Auto-tagging rules: when when_tag is assigned, then_tag follows';
COMMENT ON TABLE data.files IS 'Managed files; actual content stored on disk as {id}.{ext}';
COMMENT ON TABLE data.file_tag IS 'Many-to-many: files <-> tags';
COMMENT ON TABLE data.pools IS 'Ordered collections of files';
COMMENT ON TABLE data.file_pool IS 'Many-to-many: files <-> pools, with ordering';
COMMENT ON COLUMN data.files.original_name IS 'Original filename at upload time';
COMMENT ON COLUMN data.files.content_datetime IS 'Content datetime (e.g. when photo was taken); falls back to EXIF DateTimeOriginal';
COMMENT ON COLUMN data.files.metadata IS 'User-editable key-value metadata';
COMMENT ON COLUMN data.files.exif IS 'EXIF data extracted at upload time (immutable, system-managed)';
COMMENT ON COLUMN data.files.phash IS 'Perceptual hash for image/video duplicate detection';
COMMENT ON COLUMN data.files.is_deleted IS 'Soft-deleted files (trash); true = in recycle bin';
COMMENT ON COLUMN data.file_pool.position IS 'Manual ordering within pool; uses gapped integers';
-- +goose Down
DROP TABLE IF EXISTS data.file_pool;
DROP TABLE IF EXISTS data.pools;
DROP TABLE IF EXISTS data.file_tag;
DROP TABLE IF EXISTS data.files;
DROP TABLE IF EXISTS data.tag_rules;
DROP TABLE IF EXISTS data.tags;
DROP TABLE IF EXISTS data.categories;

View File

@ -0,0 +1,22 @@
-- +goose Up
-- If is_public=true on the object, it is accessible to everyone (ACL ignored).
-- If is_public=false, only creator and users with can_view=true see it.
-- Admins bypass all ACL checks.
CREATE TABLE acl.permissions (
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE CASCADE,
object_type_id smallint NOT NULL REFERENCES core.object_types(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
object_id uuid NOT NULL,
can_view boolean NOT NULL DEFAULT true,
can_edit boolean NOT NULL DEFAULT false,
PRIMARY KEY (user_id, object_type_id, object_id)
);
COMMENT ON TABLE acl.permissions IS 'Per-object permissions (used when is_public=false)';
-- +goose Down
DROP TABLE IF EXISTS acl.permissions;

View File

@ -0,0 +1,82 @@
-- +goose Up
CREATE TABLE activity.action_types (
id smallint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
name varchar(64) NOT NULL,
CONSTRAINT uni__action_types__name UNIQUE (name)
);
CREATE TABLE activity.sessions (
id integer GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
token_hash text NOT NULL, -- hashed session token
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE CASCADE,
user_agent varchar(256) NOT NULL,
started_at timestamptz NOT NULL DEFAULT statement_timestamp(),
expires_at timestamptz,
last_activity timestamptz NOT NULL DEFAULT statement_timestamp(),
is_active boolean NOT NULL DEFAULT true,
CONSTRAINT uni__sessions__token_hash UNIQUE (token_hash)
);
CREATE TABLE activity.file_views (
file_id uuid NOT NULL REFERENCES data.files(id)
ON UPDATE CASCADE ON DELETE CASCADE,
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE CASCADE,
viewed_at timestamptz NOT NULL DEFAULT statement_timestamp(),
PRIMARY KEY (file_id, viewed_at, user_id)
);
CREATE TABLE activity.pool_views (
pool_id uuid NOT NULL REFERENCES data.pools(id)
ON UPDATE CASCADE ON DELETE CASCADE,
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE CASCADE,
viewed_at timestamptz NOT NULL DEFAULT statement_timestamp(),
PRIMARY KEY (pool_id, viewed_at, user_id)
);
CREATE TABLE activity.tag_uses (
tag_id uuid NOT NULL REFERENCES data.tags(id)
ON UPDATE CASCADE ON DELETE CASCADE,
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE CASCADE,
used_at timestamptz NOT NULL DEFAULT statement_timestamp(),
is_included boolean NOT NULL, -- true=included in filter, false=excluded
PRIMARY KEY (tag_id, used_at, user_id)
);
CREATE TABLE activity.audit_log (
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
user_id smallint NOT NULL REFERENCES core.users(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
action_type_id smallint NOT NULL REFERENCES activity.action_types(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
object_type_id smallint REFERENCES core.object_types(id)
ON UPDATE CASCADE ON DELETE RESTRICT,
object_id uuid,
details jsonb, -- action-specific payload
performed_at timestamptz NOT NULL DEFAULT statement_timestamp()
);
COMMENT ON TABLE activity.action_types IS 'Reference: types of auditable user actions';
COMMENT ON TABLE activity.sessions IS 'Active user sessions';
COMMENT ON TABLE activity.file_views IS 'File view history';
COMMENT ON TABLE activity.pool_views IS 'Pool view history';
COMMENT ON TABLE activity.tag_uses IS 'Tag usage in filters';
COMMENT ON TABLE activity.audit_log IS 'Unified audit trail for all user actions';
-- +goose Down
DROP TABLE IF EXISTS activity.audit_log;
DROP TABLE IF EXISTS activity.tag_uses;
DROP TABLE IF EXISTS activity.pool_views;
DROP TABLE IF EXISTS activity.file_views;
DROP TABLE IF EXISTS activity.sessions;
DROP TABLE IF EXISTS activity.action_types;

View File

@ -0,0 +1,87 @@
-- +goose Up
-- core
CREATE INDEX idx__users__name ON core.users USING hash (name);
-- data.categories
CREATE INDEX idx__categories__creator_id ON data.categories USING hash (creator_id);
-- data.tags
CREATE INDEX idx__tags__category_id ON data.tags USING hash (category_id);
CREATE INDEX idx__tags__creator_id ON data.tags USING hash (creator_id);
-- data.tag_rules
CREATE INDEX idx__tag_rules__when ON data.tag_rules USING hash (when_tag_id);
CREATE INDEX idx__tag_rules__then ON data.tag_rules USING hash (then_tag_id);
-- data.files
CREATE INDEX idx__files__mime_id ON data.files USING hash (mime_id);
CREATE INDEX idx__files__creator_id ON data.files USING hash (creator_id);
CREATE INDEX idx__files__content_datetime ON data.files USING btree (content_datetime DESC NULLS LAST);
CREATE INDEX idx__files__is_deleted ON data.files USING btree (is_deleted) WHERE is_deleted = true;
CREATE INDEX idx__files__phash ON data.files USING btree (phash) WHERE phash IS NOT NULL;
-- data.file_tag
CREATE INDEX idx__file_tag__tag_id ON data.file_tag USING hash (tag_id);
CREATE INDEX idx__file_tag__file_id ON data.file_tag USING hash (file_id);
-- data.pools
CREATE INDEX idx__pools__creator_id ON data.pools USING hash (creator_id);
-- data.file_pool
CREATE INDEX idx__file_pool__pool_id ON data.file_pool USING hash (pool_id);
CREATE INDEX idx__file_pool__file_id ON data.file_pool USING hash (file_id);
-- acl.permissions
CREATE INDEX idx__acl__object ON acl.permissions USING btree (object_type_id, object_id);
CREATE INDEX idx__acl__user ON acl.permissions USING hash (user_id);
-- activity.sessions
CREATE INDEX idx__sessions__user_id ON activity.sessions USING hash (user_id);
CREATE INDEX idx__sessions__token_hash ON activity.sessions USING hash (token_hash);
-- activity.file_views
CREATE INDEX idx__file_views__user_id ON activity.file_views USING hash (user_id);
-- activity.pool_views
CREATE INDEX idx__pool_views__user_id ON activity.pool_views USING hash (user_id);
-- activity.tag_uses
CREATE INDEX idx__tag_uses__user_id ON activity.tag_uses USING hash (user_id);
-- activity.audit_log
CREATE INDEX idx__audit_log__user_id ON activity.audit_log USING hash (user_id);
CREATE INDEX idx__audit_log__action_type_id ON activity.audit_log USING hash (action_type_id);
CREATE INDEX idx__audit_log__object ON activity.audit_log USING btree (object_type_id, object_id)
WHERE object_id IS NOT NULL;
CREATE INDEX idx__audit_log__performed_at ON activity.audit_log USING btree (performed_at DESC);
-- +goose Down
DROP INDEX IF EXISTS activity.idx__audit_log__performed_at;
DROP INDEX IF EXISTS activity.idx__audit_log__object;
DROP INDEX IF EXISTS activity.idx__audit_log__action_type_id;
DROP INDEX IF EXISTS activity.idx__audit_log__user_id;
DROP INDEX IF EXISTS activity.idx__tag_uses__user_id;
DROP INDEX IF EXISTS activity.idx__pool_views__user_id;
DROP INDEX IF EXISTS activity.idx__file_views__user_id;
DROP INDEX IF EXISTS activity.idx__sessions__token_hash;
DROP INDEX IF EXISTS activity.idx__sessions__user_id;
DROP INDEX IF EXISTS acl.idx__acl__user;
DROP INDEX IF EXISTS acl.idx__acl__object;
DROP INDEX IF EXISTS data.idx__file_pool__file_id;
DROP INDEX IF EXISTS data.idx__file_pool__pool_id;
DROP INDEX IF EXISTS data.idx__pools__creator_id;
DROP INDEX IF EXISTS data.idx__file_tag__file_id;
DROP INDEX IF EXISTS data.idx__file_tag__tag_id;
DROP INDEX IF EXISTS data.idx__files__phash;
DROP INDEX IF EXISTS data.idx__files__is_deleted;
DROP INDEX IF EXISTS data.idx__files__content_datetime;
DROP INDEX IF EXISTS data.idx__files__creator_id;
DROP INDEX IF EXISTS data.idx__files__mime_id;
DROP INDEX IF EXISTS data.idx__tag_rules__then;
DROP INDEX IF EXISTS data.idx__tag_rules__when;
DROP INDEX IF EXISTS data.idx__tags__creator_id;
DROP INDEX IF EXISTS data.idx__tags__category_id;
DROP INDEX IF EXISTS data.idx__categories__creator_id;
DROP INDEX IF EXISTS core.idx__users__name;

View File

@ -0,0 +1,49 @@
-- +goose Up
INSERT INTO core.mime_types (name, extension) VALUES
('image/jpeg', 'jpg'),
('image/png', 'png'),
('image/gif', 'gif'),
('image/webp', 'webp'),
('video/mp4', 'mp4'),
('video/quicktime', 'mov'),
('video/x-msvideo', 'avi'),
('video/webm', 'webm'),
('video/3gpp', '3gp'),
('video/x-m4v', 'm4v');
INSERT INTO core.object_types (name) VALUES
('file'), ('tag'), ('category'), ('pool');
INSERT INTO activity.action_types (name) VALUES
-- Auth
('user_login'), ('user_logout'),
-- Files
('file_create'), ('file_edit'), ('file_delete'), ('file_restore'),
('file_permanent_delete'), ('file_replace'),
-- Tags
('tag_create'), ('tag_edit'), ('tag_delete'),
-- Categories
('category_create'), ('category_edit'), ('category_delete'),
-- Pools
('pool_create'), ('pool_edit'), ('pool_delete'),
-- Relations
('file_tag_add'), ('file_tag_remove'),
('file_pool_add'), ('file_pool_remove'),
-- ACL
('acl_change'),
-- Admin
('user_create'), ('user_delete'), ('user_block'), ('user_unblock'),
('user_role_change'),
-- Sessions
('session_terminate');
INSERT INTO core.users (name, password, is_admin, can_create) VALUES
('admin', '$2a$10$zk.VTFjRRxbkTE7cKfc7KOWeZfByk1VEkbkgZMJggI1fFf.yDEHZy', true, true);
-- +goose Down
DELETE FROM core.users WHERE name = 'admin';
DELETE FROM activity.action_types;
DELETE FROM core.object_types;
DELETE FROM core.mime_types;

View File

@ -0,0 +1,8 @@
package migrations
import "embed"
// FS holds all goose migration SQL files, embedded at build time.
//
//go:embed *.sql
var FS embed.FS

View File

@ -1,592 +0,0 @@
--
-- PostgreSQL database dump
--
-- Dumped from database version 14.18 (Ubuntu 14.18-0ubuntu0.22.04.1)
-- Dumped by pg_dump version 17.4
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET transaction_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
--
-- Name: acl; Type: SCHEMA; Schema: -; Owner: -
--
CREATE SCHEMA acl;
--
-- Name: activity; Type: SCHEMA; Schema: -; Owner: -
--
CREATE SCHEMA activity;
--
-- Name: data; Type: SCHEMA; Schema: -; Owner: -
--
CREATE SCHEMA data;
--
-- Name: public; Type: SCHEMA; Schema: -; Owner: -
--
-- *not* creating schema, since initdb creates it
--
-- Name: system; Type: SCHEMA; Schema: -; Owner: -
--
CREATE SCHEMA system;
--
-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner: -
--
CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
--
-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner: -
--
COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
--
-- Name: uuid-ossp; Type: EXTENSION; Schema: -; Owner: -
--
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public;
--
-- Name: EXTENSION "uuid-ossp"; Type: COMMENT; Schema: -; Owner: -
--
COMMENT ON EXTENSION "uuid-ossp" IS 'generate universally unique identifiers (UUIDs)';
--
-- Name: add_file_to_tag_recursive(uuid, uuid); Type: FUNCTION; Schema: data; Owner: -
--
CREATE FUNCTION data.add_file_to_tag_recursive(f_id uuid, t_id uuid) RETURNS SETOF uuid
LANGUAGE plpgsql
AS $$
DECLARE
tmp uuid;
tt_id uuid;
ttt_id uuid;
BEGIN
INSERT INTO data.file_tag VALUES (f_id, t_id) ON CONFLICT DO NOTHING RETURNING tag_id INTO tmp;
IF tmp IS NULL THEN
RETURN;
END IF;
RETURN NEXT t_id;
FOR tt_id IN
SELECT a.add_tag_id FROM data.autotags a WHERE a.trigger_tag_id=t_id AND a.is_active
LOOP
FOR ttt_id IN SELECT data.add_file_to_tag_recursive(f_id, tt_id)
LOOP
RETURN NEXT ttt_id;
END LOOP;
END LOOP;
END;
$$;
--
-- Name: uuid_extract_timestamp(uuid); Type: FUNCTION; Schema: public; Owner: -
--
CREATE FUNCTION public.uuid_extract_timestamp(uuid_val uuid) RETURNS timestamp with time zone
LANGUAGE sql IMMUTABLE
AS $$
SELECT to_timestamp(
('x' || LEFT(REPLACE(uuid_val::TEXT, '-', ''), 12))::BIT(48)::BIGINT
/ 1000.0
);
$$;
--
-- Name: uuid_v7(timestamp with time zone); Type: FUNCTION; Schema: public; Owner: -
--
CREATE FUNCTION public.uuid_v7(cts timestamp with time zone DEFAULT clock_timestamp()) RETURNS uuid
LANGUAGE plpgsql
AS $$
DECLARE
state text = current_setting('uuidv7.old_tp',true);
old_tp text = split_part(state, ':',1);
base int = coalesce(nullif(split_part(state,':',4),'')::int,(random()*16777215/2-1)::int);
tp text;
entropy text;
seq text=base;
seqn int=split_part(state,':',2);
ver text = coalesce(split_part(state,':',3),to_hex(8+(random()*3)::int));
BEGIN
base = (random()*16777215/2-1)::int;
tp = lpad(to_hex(floor(extract(epoch from cts)*1000)::int8),12,'0')||'7';
if tp is distinct from old_tp then
old_tp = tp;
ver = to_hex(8+(random()*3)::int);
base = (random()*16777215/2-1)::int;
seqn = base;
else
seqn = seqn+(random()*1000)::int;
end if;
perform set_config('uuidv7.old_tp',old_tp||':'||seqn||':'||ver||':'||base, false);
entropy = md5(gen_random_uuid()::text);
seq = lpad(to_hex(seqn),6,'0');
return (tp || substring(seq from 1 for 3) || ver || substring(seq from 4 for 3) ||
substring(entropy from 1 for 12))::uuid;
END
$$;
SET default_table_access_method = heap;
--
-- Name: categories; Type: TABLE; Schema: acl; Owner: -
--
CREATE TABLE acl.categories (
user_id smallint NOT NULL,
category_id uuid NOT NULL,
view boolean NOT NULL,
edit boolean NOT NULL
);
--
-- Name: files; Type: TABLE; Schema: acl; Owner: -
--
CREATE TABLE acl.files (
user_id smallint NOT NULL,
file_id uuid NOT NULL,
view boolean NOT NULL,
edit boolean NOT NULL
);
--
-- Name: pools; Type: TABLE; Schema: acl; Owner: -
--
CREATE TABLE acl.pools (
user_id smallint NOT NULL,
pool_id uuid NOT NULL,
view boolean NOT NULL,
edit boolean NOT NULL
);
--
-- Name: tags; Type: TABLE; Schema: acl; Owner: -
--
CREATE TABLE acl.tags (
user_id smallint NOT NULL,
tag_id uuid NOT NULL,
view boolean NOT NULL,
edit boolean NOT NULL
);
--
-- Name: file_views; Type: TABLE; Schema: activity; Owner: -
--
CREATE TABLE activity.file_views (
file_id uuid NOT NULL,
"timestamp" timestamp with time zone NOT NULL,
user_id smallint NOT NULL
);
--
-- Name: pool_views; Type: TABLE; Schema: activity; Owner: -
--
CREATE TABLE activity.pool_views (
pool_id uuid NOT NULL,
"timestamp" timestamp with time zone NOT NULL,
user_id smallint NOT NULL
);
--
-- Name: sessions; Type: TABLE; Schema: activity; Owner: -
--
CREATE TABLE activity.sessions (
id integer NOT NULL,
token text NOT NULL,
user_id smallint NOT NULL,
user_agent character varying(256) NOT NULL,
started_at timestamp with time zone DEFAULT statement_timestamp() NOT NULL,
expires_at timestamp with time zone,
last_activity timestamp with time zone DEFAULT statement_timestamp() NOT NULL
);
--
-- Name: sessions_id_seq; Type: SEQUENCE; Schema: activity; Owner: -
--
CREATE SEQUENCE activity.sessions_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: sessions_id_seq; Type: SEQUENCE OWNED BY; Schema: activity; Owner: -
--
ALTER SEQUENCE activity.sessions_id_seq OWNED BY activity.sessions.id;
--
-- Name: tag_uses; Type: TABLE; Schema: activity; Owner: -
--
CREATE TABLE activity.tag_uses (
tag_id uuid NOT NULL,
"timestamp" timestamp with time zone NOT NULL,
user_id smallint NOT NULL,
included boolean NOT NULL
);
--
-- Name: autotags; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.autotags (
trigger_tag_id uuid NOT NULL,
add_tag_id uuid NOT NULL,
is_active boolean DEFAULT true NOT NULL
);
--
-- Name: categories; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.categories (
id uuid DEFAULT public.uuid_v7() NOT NULL,
name character varying(256) NOT NULL,
notes text DEFAULT ''::text NOT NULL,
color character(6),
creator_id smallint NOT NULL
);
--
-- Name: file_pool; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.file_pool (
file_id uuid NOT NULL,
pool_id uuid NOT NULL,
number smallint NOT NULL
);
--
-- Name: file_tag; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.file_tag (
file_id uuid NOT NULL,
tag_id uuid NOT NULL
);
--
-- Name: files; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.files (
id uuid DEFAULT public.uuid_v7() NOT NULL,
name character varying(256),
mime_id smallint NOT NULL,
datetime timestamp with time zone DEFAULT clock_timestamp() NOT NULL,
notes text,
metadata jsonb NOT NULL,
creator_id smallint NOT NULL,
is_deleted boolean DEFAULT false NOT NULL
);
--
-- Name: pools; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.pools (
id uuid DEFAULT public.uuid_v7() NOT NULL,
name character varying(256) NOT NULL,
notes text,
creator_id smallint NOT NULL
);
--
-- Name: tags; Type: TABLE; Schema: data; Owner: -
--
CREATE TABLE data.tags (
id uuid DEFAULT public.uuid_v7() NOT NULL,
name character varying(256) NOT NULL,
notes text,
color character(6),
category_id uuid,
creator_id smallint NOT NULL
);
--
-- Name: mime; Type: TABLE; Schema: system; Owner: -
--
CREATE TABLE system.mime (
id smallint NOT NULL,
name character varying(127) NOT NULL,
extension character varying(16) NOT NULL
);
--
-- Name: mime_id_seq; Type: SEQUENCE; Schema: system; Owner: -
--
CREATE SEQUENCE system.mime_id_seq
AS smallint
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: mime_id_seq; Type: SEQUENCE OWNED BY; Schema: system; Owner: -
--
ALTER SEQUENCE system.mime_id_seq OWNED BY system.mime.id;
--
-- Name: users; Type: TABLE; Schema: system; Owner: -
--
CREATE TABLE system.users (
id smallint NOT NULL,
name character varying(32) NOT NULL,
password text NOT NULL,
is_admin boolean DEFAULT false NOT NULL,
can_create boolean DEFAULT false NOT NULL
);
--
-- Name: users_id_seq; Type: SEQUENCE; Schema: system; Owner: -
--
CREATE SEQUENCE system.users_id_seq
AS smallint
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
--
-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: system; Owner: -
--
ALTER SEQUENCE system.users_id_seq OWNED BY system.users.id;
--
-- Name: sessions id; Type: DEFAULT; Schema: activity; Owner: -
--
ALTER TABLE ONLY activity.sessions ALTER COLUMN id SET DEFAULT nextval('activity.sessions_id_seq'::regclass);
--
-- Name: mime id; Type: DEFAULT; Schema: system; Owner: -
--
ALTER TABLE ONLY system.mime ALTER COLUMN id SET DEFAULT nextval('system.mime_id_seq'::regclass);
--
-- Name: users id; Type: DEFAULT; Schema: system; Owner: -
--
ALTER TABLE ONLY system.users ALTER COLUMN id SET DEFAULT nextval('system.users_id_seq'::regclass);
--
-- Name: categories categories_pkey; Type: CONSTRAINT; Schema: acl; Owner: -
--
ALTER TABLE ONLY acl.categories
ADD CONSTRAINT categories_pkey PRIMARY KEY (user_id, category_id);
--
-- Name: files files_pkey; Type: CONSTRAINT; Schema: acl; Owner: -
--
ALTER TABLE ONLY acl.files
ADD CONSTRAINT files_pkey PRIMARY KEY (user_id, file_id);
--
-- Name: pools pools_pkey; Type: CONSTRAINT; Schema: acl; Owner: -
--
ALTER TABLE ONLY acl.pools
ADD CONSTRAINT pools_pkey PRIMARY KEY (user_id, pool_id);
--
-- Name: tags tags_pkey; Type: CONSTRAINT; Schema: acl; Owner: -
--
ALTER TABLE ONLY acl.tags
ADD CONSTRAINT tags_pkey PRIMARY KEY (user_id, tag_id);
--
-- Name: file_views file_views_pkey; Type: CONSTRAINT; Schema: activity; Owner: -
--
ALTER TABLE ONLY activity.file_views
ADD CONSTRAINT file_views_pkey PRIMARY KEY (file_id, "timestamp", user_id);
--
-- Name: pool_views pool_views_pkey; Type: CONSTRAINT; Schema: activity; Owner: -
--
ALTER TABLE ONLY activity.pool_views
ADD CONSTRAINT pool_views_pkey PRIMARY KEY (pool_id, "timestamp", user_id);
--
-- Name: sessions sessions_pkey; Type: CONSTRAINT; Schema: activity; Owner: -
--
ALTER TABLE ONLY activity.sessions
ADD CONSTRAINT sessions_pkey PRIMARY KEY (id);
--
-- Name: tag_uses tag_uses_pkey; Type: CONSTRAINT; Schema: activity; Owner: -
--
ALTER TABLE ONLY activity.tag_uses
ADD CONSTRAINT tag_uses_pkey PRIMARY KEY (tag_id, "timestamp", user_id);
--
-- Name: autotags autotags_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.autotags
ADD CONSTRAINT autotags_pkey PRIMARY KEY (trigger_tag_id, add_tag_id);
--
-- Name: categories categories_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.categories
ADD CONSTRAINT categories_pkey PRIMARY KEY (id);
--
-- Name: file_pool file_pool_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.file_pool
ADD CONSTRAINT file_pool_pkey PRIMARY KEY (file_id, pool_id, number);
--
-- Name: file_tag file_tag_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.file_tag
ADD CONSTRAINT file_tag_pkey PRIMARY KEY (file_id, tag_id);
--
-- Name: files files_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.files
ADD CONSTRAINT files_pkey PRIMARY KEY (id);
--
-- Name: pools pools_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.pools
ADD CONSTRAINT pools_pkey PRIMARY KEY (id);
--
-- Name: tags tags_pkey; Type: CONSTRAINT; Schema: data; Owner: -
--
ALTER TABLE ONLY data.tags
ADD CONSTRAINT tags_pkey PRIMARY KEY (id);
--
-- Name: mime mime_pkey; Type: CONSTRAINT; Schema: system; Owner: -
--
ALTER TABLE ONLY system.mime
ADD CONSTRAINT mime_pkey PRIMARY KEY (id);
--
-- Name: users users_pkey; Type: CONSTRAINT; Schema: system; Owner: -
--
ALTER TABLE ONLY system.users
ADD CONSTRAINT users_pkey PRIMARY KEY (id);
--
-- PostgreSQL database dump complete
--

383
docs/FRONTEND_STRUCTURE.md Normal file
View File

@ -0,0 +1,383 @@
# Tanabata File Manager — Frontend Structure
## Stack
- **Framework**: SvelteKit (SPA mode, `ssr: false`)
- **Language**: TypeScript
- **CSS**: Tailwind CSS + CSS custom properties (hybrid)
- **API types**: Auto-generated via openapi-typescript
- **PWA**: Service worker + web manifest
- **Font**: Epilogue (variable weight)
- **Package manager**: npm
## Monorepo Layout
```
tanabata/
├── backend/ ← Go project (go.mod in here)
│ ├── cmd/
│ ├── internal/
│ ├── migrations/
│ ├── go.mod
│ └── go.sum
├── frontend/ ← SvelteKit project (package.json in here)
│ └── (see below)
├── openapi.yaml ← Shared API contract (root level)
├── docker-compose.yml
├── Dockerfile
├── .env.example
└── README.md
```
`openapi.yaml` lives at repository root — both backend and frontend
reference it. The frontend generates types from it; the backend
validates its handlers against it.
## Frontend Directory Layout
```
frontend/
├── package.json
├── svelte.config.js
├── vite.config.ts
├── tsconfig.json
├── tailwind.config.ts
├── postcss.config.js
├── src/
│ ├── app.html # Shell HTML (PWA meta, font preload)
│ ├── app.css # Tailwind directives + CSS custom properties
│ ├── hooks.server.ts # Server hooks (not used in SPA mode)
│ ├── hooks.client.ts # Client hooks (global error handling)
│ │
│ ├── lib/ # Shared code ($lib/ alias)
│ │ │
│ │ ├── api/ # API client layer
│ │ │ ├── client.ts # Base fetch wrapper: auth headers, token refresh,
│ │ │ │ # error parsing, base URL
│ │ │ ├── files.ts # listFiles, getFile, uploadFile, deleteFile, etc.
│ │ │ ├── tags.ts # listTags, createTag, getTag, updateTag, etc.
│ │ │ ├── categories.ts # Category API functions
│ │ │ ├── pools.ts # Pool API functions
│ │ │ ├── auth.ts # login, logout, refresh, listSessions
│ │ │ ├── acl.ts # getPermissions, setPermissions
│ │ │ ├── users.ts # getMe, updateMe, admin user CRUD
│ │ │ ├── audit.ts # queryAuditLog
│ │ │ ├── schema.ts # AUTO-GENERATED from openapi.yaml (do not edit)
│ │ │ └── types.ts # Friendly type aliases:
│ │ │ # export type File = components["schemas"]["File"]
│ │ │ # export type Tag = components["schemas"]["Tag"]
│ │ │
│ │ ├── components/ # Reusable UI components
│ │ │ │
│ │ │ ├── layout/ # App shell
│ │ │ │ ├── Navbar.svelte # Bottom navigation bar (mobile-first)
│ │ │ │ ├── Header.svelte # Section header with sorting controls
│ │ │ │ ├── SelectionBar.svelte # Floating bar for multi-select actions
│ │ │ │ └── Loader.svelte # Full-screen loading overlay
│ │ │ │
│ │ │ ├── file/ # File-related components
│ │ │ │ ├── FileGrid.svelte # Thumbnail grid with infinite scroll
│ │ │ │ ├── FileCard.svelte # Single thumbnail (160×160, selectable)
│ │ │ │ ├── FileViewer.svelte # Full-screen preview with prev/next navigation
│ │ │ │ ├── FileUpload.svelte # Upload form + drag-and-drop zone
│ │ │ │ ├── FileDetail.svelte # Metadata editor (notes, datetime, tags)
│ │ │ │ └── FilterBar.svelte # DSL filter builder UI
│ │ │ │
│ │ │ ├── tag/ # Tag-related components
│ │ │ │ ├── TagBadge.svelte # Colored pill with tag name
│ │ │ │ ├── TagPicker.svelte # Searchable tag selector (add/remove)
│ │ │ │ ├── TagList.svelte # Tag grid for section view
│ │ │ │ └── TagRuleEditor.svelte # Auto-tag rule management
│ │ │ │
│ │ │ ├── pool/ # Pool-related components
│ │ │ │ ├── PoolCard.svelte # Pool preview card
│ │ │ │ ├── PoolFileList.svelte # Ordered file list with drag reorder
│ │ │ │ └── PoolDetail.svelte # Pool metadata editor
│ │ │ │
│ │ │ ├── acl/ # Access control components
│ │ │ │ └── PermissionEditor.svelte # User permission grid
│ │ │ │
│ │ │ └── common/ # Shared primitives
│ │ │ ├── Button.svelte
│ │ │ ├── Modal.svelte
│ │ │ ├── ConfirmDialog.svelte
│ │ │ ├── Toast.svelte
│ │ │ ├── InfiniteScroll.svelte
│ │ │ ├── Pagination.svelte
│ │ │ ├── SortDropdown.svelte
│ │ │ ├── SearchInput.svelte
│ │ │ ├── ColorPicker.svelte
│ │ │ ├── Checkbox.svelte # Three-state: checked, unchecked, partial
│ │ │ └── EmptyState.svelte
│ │ │
│ │ ├── stores/ # Svelte stores (global state)
│ │ │ ├── auth.ts # Current user, JWT tokens, isAuthenticated
│ │ │ ├── selection.ts # Selected item IDs, selection mode toggle
│ │ │ ├── sorting.ts # Per-section sort key + order (persisted to localStorage)
│ │ │ ├── theme.ts # Dark/light mode (persisted, respects prefers-color-scheme)
│ │ │ └── toast.ts # Notification queue (success, error, info)
│ │ │
│ │ └── utils/ # Pure helper functions
│ │ ├── format.ts # formatDate, formatFileSize, formatDuration
│ │ ├── dsl.ts # Filter DSL builder: UI state → query string
│ │ ├── pwa.ts # PWA reset, cache clear, update prompt
│ │ └── keyboard.ts # Keyboard shortcut helpers (Ctrl+A, Escape, etc.)
│ │
│ ├── routes/ # SvelteKit file-based routing
│ │ │
│ │ ├── +layout.svelte # Root layout: Navbar, theme wrapper, toast container
│ │ ├── +layout.ts # Root load: auth guard → redirect to /login if no token
│ │ │
│ │ ├── +page.svelte # / → redirect to /files
│ │ │
│ │ ├── login/
│ │ │ └── +page.svelte # Login form (decorative Tanabata images)
│ │ │
│ │ ├── files/
│ │ │ ├── +page.svelte # File grid: filter bar, sort, multi-select, upload
│ │ │ ├── +page.ts # Load: initial file list (cursor page)
│ │ │ ├── [id]/
│ │ │ │ ├── +page.svelte # File view: preview, metadata, tags, ACL
│ │ │ │ └── +page.ts # Load: file detail + tags
│ │ │ └── trash/
│ │ │ ├── +page.svelte # Trash: restore / permanent delete
│ │ │ └── +page.ts
│ │ │
│ │ ├── tags/
│ │ │ ├── +page.svelte # Tag list: search, sort, multi-select
│ │ │ ├── +page.ts
│ │ │ ├── new/
│ │ │ │ └── +page.svelte # Create tag form
│ │ │ └── [id]/
│ │ │ ├── +page.svelte # Tag detail: edit, category, rules, parent tags
│ │ │ └── +page.ts
│ │ │
│ │ ├── categories/
│ │ │ ├── +page.svelte # Category list
│ │ │ ├── +page.ts
│ │ │ ├── new/
│ │ │ │ └── +page.svelte
│ │ │ └── [id]/
│ │ │ ├── +page.svelte # Category detail: edit, view tags
│ │ │ └── +page.ts
│ │ │
│ │ ├── pools/
│ │ │ ├── +page.svelte # Pool list
│ │ │ ├── +page.ts
│ │ │ ├── new/
│ │ │ │ └── +page.svelte
│ │ │ └── [id]/
│ │ │ ├── +page.svelte # Pool detail: files (reorderable), filter, edit
│ │ │ └── +page.ts
│ │ │
│ │ ├── settings/
│ │ │ ├── +page.svelte # Profile: name, password, active sessions
│ │ │ └── +page.ts
│ │ │
│ │ └── admin/
│ │ ├── +layout.svelte # Admin layout: restrict to is_admin
│ │ ├── users/
│ │ │ ├── +page.svelte # User management list
│ │ │ ├── +page.ts
│ │ │ └── [id]/
│ │ │ ├── +page.svelte # User detail: role, block/unblock
│ │ │ └── +page.ts
│ │ └── audit/
│ │ ├── +page.svelte # Audit log with filters
│ │ └── +page.ts
│ │
│ └── service-worker.ts # PWA: offline cache for pinned files, app shell caching
└── static/
├── favicon.png
├── favicon.ico
├── manifest.webmanifest # PWA manifest (name, icons, theme_color)
├── images/
│ ├── tanabata-left.png # Login page decorations (from current design)
│ ├── tanabata-right.png
│ └── icons/ # PWA icons (192×192, 512×512, etc.)
└── fonts/
└── Epilogue-VariableFont_wght.ttf
```
## Key Architecture Decisions
### CSS Hybrid: Tailwind + Custom Properties
Theme colors defined as CSS custom properties in `app.css`:
```css
@tailwind base;
@tailwind components;
@tailwind utilities;
:root {
--color-bg-primary: #312F45;
--color-bg-secondary: #181721;
--color-bg-elevated: #111118;
--color-accent: #9592B5;
--color-accent-hover: #7D7AA4;
--color-text-primary: #f0f0f0;
--color-text-muted: #9999AD;
--color-danger: #DB6060;
--color-info: #4DC7ED;
--color-warning: #F5E872;
--color-tag-default: #444455;
}
:root[data-theme="light"] {
--color-bg-primary: #f5f5f5;
--color-bg-secondary: #ffffff;
/* ... */
}
```
Tailwind references them in `tailwind.config.ts`:
```ts
export default {
theme: {
extend: {
colors: {
bg: {
primary: 'var(--color-bg-primary)',
secondary: 'var(--color-bg-secondary)',
elevated: 'var(--color-bg-elevated)',
},
accent: {
DEFAULT: 'var(--color-accent)',
hover: 'var(--color-accent-hover)',
},
// ...
},
fontFamily: {
sans: ['Epilogue', 'sans-serif'],
},
},
},
darkMode: 'class', // controlled via data-theme attribute
};
```
Usage in components: `<div class="bg-bg-primary text-text-primary rounded-xl p-4">`.
Complex cases use scoped `<style>` inside `.svelte` files.
### API Client Pattern
`client.ts` — thin wrapper around fetch:
```ts
// $lib/api/client.ts
import { authStore } from '$lib/stores/auth';
const BASE = '/api/v1';
async function request<T>(path: string, init?: RequestInit): Promise<T> {
const token = get(authStore).accessToken;
const res = await fetch(BASE + path, {
...init,
headers: {
'Content-Type': 'application/json',
...(token && { Authorization: `Bearer ${token}` }),
...init?.headers,
},
});
if (res.status === 401) {
// attempt refresh, retry once
}
if (!res.ok) {
const err = await res.json();
throw new ApiError(res.status, err.code, err.message, err.details);
}
if (res.status === 204) return undefined as T;
return res.json();
}
export const api = {
get: <T>(path: string) => request<T>(path),
post: <T>(path: string, body?: unknown) =>
request<T>(path, { method: 'POST', body: JSON.stringify(body) }),
patch: <T>(path: string, body?: unknown) =>
request<T>(path, { method: 'PATCH', body: JSON.stringify(body) }),
put: <T>(path: string, body?: unknown) =>
request<T>(path, { method: 'PUT', body: JSON.stringify(body) }),
delete: <T>(path: string) => request<T>(path, { method: 'DELETE' }),
upload: <T>(path: string, formData: FormData) =>
request<T>(path, { method: 'POST', body: formData, headers: {} }),
};
```
Domain-specific modules use it:
```ts
// $lib/api/files.ts
import { api } from './client';
import type { File, FileCursorPage } from './types';
export function listFiles(params: Record<string, string>) {
const qs = new URLSearchParams(params).toString();
return api.get<FileCursorPage>(`/files?${qs}`);
}
export function uploadFile(formData: FormData) {
return api.upload<File>('/files', formData);
}
```
### Type Generation
Script in `package.json`:
```json
{
"scripts": {
"generate:types": "openapi-typescript ../openapi.yaml -o src/lib/api/schema.ts",
"dev": "npm run generate:types && vite dev",
"build": "npm run generate:types && vite build"
}
}
```
Friendly aliases in `types.ts`:
```ts
import type { components } from './schema';
export type File = components['schemas']['File'];
export type Tag = components['schemas']['Tag'];
export type Category = components['schemas']['Category'];
export type Pool = components['schemas']['Pool'];
export type FileCursorPage = components['schemas']['FileCursorPage'];
export type TagOffsetPage = components['schemas']['TagOffsetPage'];
export type Error = components['schemas']['Error'];
// ...
```
### SPA Mode
`svelte.config.js`:
```js
import adapter from '@sveltejs/adapter-static';
export default {
kit: {
adapter: adapter({ fallback: 'index.html' }),
// SPA: all routes handled client-side
},
};
```
The Go backend serves `index.html` for all non-API routes (SPA fallback).
In development, Vite dev server proxies `/api` to the Go backend.
### PWA
`service-worker.ts` handles:
- App shell caching (HTML, CSS, JS, fonts)
- User-pinned file caching (explicit, via UI button)
- Cache versioning and cleanup on update
- Reset function (clear all caches except pinned files)

View File

@ -0,0 +1,320 @@
# Tanabata File Manager — Go Project Structure
## Stack
- **Router**: Gin
- **Database**: pgx v5 (pgxpool)
- **Migrations**: goose v3 + go:embed (auto-migrate on startup)
- **Auth**: JWT (golang-jwt/jwt/v5)
- **Config**: environment variables via .env (joho/godotenv)
- **Logging**: slog (stdlib, Go 1.21+)
- **Validation**: go-playground/validator/v10
- **EXIF**: rwcarlsen/goexif or dsoprea/go-exif
- **Image processing**: disintegration/imaging (thumbnails, previews)
- **Architecture**: Clean Architecture (domain → service → repository/handler)
## Monorepo Layout
```
tanabata/
├── backend/ ← Go project
├── frontend/ ← SvelteKit project
├── openapi.yaml ← Shared API contract
├── docker-compose.yml
├── Dockerfile
├── .env.example
└── README.md
```
## Backend Directory Layout
```
backend/
├── cmd/
│ └── server/
│ └── main.go # Entrypoint: config → DB → migrate → wire → run
├── internal/
│ │
│ ├── domain/ # Pure business entities & value objects
│ │ ├── file.go # File, FileFilter, FilePage
│ │ ├── tag.go # Tag, TagRule
│ │ ├── category.go # Category
│ │ ├── pool.go # Pool, PoolFile
│ │ ├── user.go # User, Session
│ │ ├── acl.go # Permission, ObjectType
│ │ ├── audit.go # AuditEntry, ActionType
│ │ └── errors.go # Domain error types (ErrNotFound, ErrForbidden, etc.)
│ │
│ ├── port/ # Interfaces (ports) — contracts between layers
│ │ ├── repository.go # FileRepo, TagRepo, CategoryRepo, PoolRepo,
│ │ │ # UserRepo, SessionRepo, ACLRepo, AuditRepo,
│ │ │ # MimeRepo, TagRuleRepo
│ │ └── storage.go # FileStorage interface (disk operations)
│ │
│ ├── service/ # Business logic (use cases)
│ │ ├── file_service.go # Upload, update, delete, trash/restore, replace,
│ │ │ # import, filter/list, duplicate detection
│ │ ├── tag_service.go # CRUD + auto-tag application logic
│ │ ├── category_service.go # CRUD (thin, delegates to repo + ACL + audit)
│ │ ├── pool_service.go # CRUD + file ordering, add/remove files
│ │ ├── auth_service.go # Login, logout, JWT issue/refresh, session management
│ │ ├── acl_service.go # Permission checks, grant/revoke
│ │ ├── audit_service.go # Log actions, query audit log
│ │ └── user_service.go # Profile update, admin CRUD, block/unblock
│ │
│ ├── handler/ # HTTP layer (Gin handlers)
│ │ ├── router.go # Route registration, middleware wiring
│ │ ├── middleware.go # Auth middleware (JWT extraction → context)
│ │ ├── request.go # Common request parsing helpers
│ │ ├── response.go # Error/success response builders
│ │ ├── file_handler.go # /files endpoints
│ │ ├── tag_handler.go # /tags endpoints
│ │ ├── category_handler.go # /categories endpoints
│ │ ├── pool_handler.go # /pools endpoints
│ │ ├── auth_handler.go # /auth endpoints
│ │ ├── acl_handler.go # /acl endpoints
│ │ ├── user_handler.go # /users endpoints
│ │ └── audit_handler.go # /audit endpoints
│ │
│ ├── db/ # Database adapters
│ │ ├── db.go # Common helpers: pagination, repo factory, transactor base
│ │ └── postgres/ # PostgreSQL implementation
│ │ ├── postgres.go # pgxpool init, tx-from-context helpers
│ │ ├── file_repo.go # FileRepo implementation
│ │ ├── tag_repo.go # TagRepo + TagRuleRepo implementation
│ │ ├── category_repo.go # CategoryRepo implementation
│ │ ├── pool_repo.go # PoolRepo implementation
│ │ ├── user_repo.go # UserRepo implementation
│ │ ├── session_repo.go # SessionRepo implementation
│ │ ├── acl_repo.go # ACLRepo implementation
│ │ ├── audit_repo.go # AuditRepo implementation
│ │ ├── mime_repo.go # MimeRepo implementation
│ │ └── filter_parser.go # DSL → SQL WHERE clause builder
│ │
│ ├── storage/ # File storage adapter
│ │ └── disk.go # FileStorage implementation (read/write/delete on disk)
│ │
│ └── config/ # Configuration
│ └── config.go # Struct + loader from env vars
├── migrations/ # SQL migration files (goose format)
│ ├── 001_init_schemas.sql
│ ├── 002_core_tables.sql
│ ├── 003_data_tables.sql
│ ├── 004_acl_tables.sql
│ ├── 005_activity_tables.sql
│ ├── 006_indexes.sql
│ └── 007_seed_data.sql
├── go.mod
└── go.sum
```
## Layer Dependency Rules
```
handler → service → port (interfaces) ← db/postgres / storage
domain (entities, value objects, errors)
```
- **domain/**: zero imports from other internal packages. Only stdlib.
- **port/**: imports only domain/. Defines interfaces.
- **service/**: imports domain/ and port/. Never imports db/ or handler/.
- **handler/**: imports domain/ and service/. Never imports db/.
- **db/postgres/**: imports domain/, port/, and db/ (common helpers). Implements port interfaces.
- **db/**: imports domain/ and port/. Shared utilities for all DB adapters.
- **storage/**: imports domain/ and port/. Implements FileStorage.
No layer may import a layer above it. No circular dependencies.
## Key Design Decisions
### Dependency Injection (Wiring)
Manual wiring in `cmd/server/main.go`. No DI frameworks.
```go
// Pseudocode
pool := postgres.NewPool(cfg.DatabaseURL)
goose.Up(pool, migrations)
// Repos (all from internal/db/postgres/)
fileRepo := postgres.NewFileRepo(pool)
tagRepo := postgres.NewTagRepo(pool)
// ...
// Storage
diskStore := storage.NewDiskStorage(cfg.FilesPath)
// Services
aclSvc := service.NewACLService(aclRepo, objectTypeRepo)
auditSvc := service.NewAuditService(auditRepo, actionTypeRepo)
fileSvc := service.NewFileService(fileRepo, mimeRepo, tagRepo, diskStore, aclSvc, auditSvc)
tagSvc := service.NewTagService(tagRepo, tagRuleRepo, aclSvc, auditSvc)
// ...
// Handlers
fileHandler := handler.NewFileHandler(fileSvc, tagSvc)
// ...
router := handler.NewRouter(cfg, fileHandler, tagHandler, ...)
router.Run(cfg.ListenAddr)
```
### Context Propagation
Every service method receives `context.Context` as the first argument.
The handler extracts user info from JWT (via middleware) and puts it
into context. Services read the current user from context for ACL checks
and audit logging.
```go
// middleware.go
func (m *AuthMiddleware) Handle(c *gin.Context) {
claims := parseJWT(c.GetHeader("Authorization"))
ctx := domain.WithUser(c.Request.Context(), claims.UserID, claims.IsAdmin)
c.Request = c.Request.WithContext(ctx)
c.Next()
}
// domain/context.go
type ctxKey int
const userKey ctxKey = iota
func WithUser(ctx context.Context, userID int16, isAdmin bool) context.Context { ... }
func UserFromContext(ctx context.Context) (userID int16, isAdmin bool) { ... }
```
### Transaction Management
Repository interfaces include a `Transactor`:
```go
// port/repository.go
type Transactor interface {
WithTx(ctx context.Context, fn func(ctx context.Context) error) error
}
```
The postgres implementation wraps `pgxpool.Pool.BeginTx`. Inside `fn`,
all repo calls use the transaction from context. This allows services
to compose multiple repo calls in a single transaction:
```go
// service/file_service.go
func (s *FileService) Upload(ctx context.Context, input UploadInput) (*domain.File, error) {
return s.tx.WithTx(ctx, func(ctx context.Context) error {
file, err := s.fileRepo.Create(ctx, ...) // uses tx
if err != nil { return err }
for _, tagID := range input.TagIDs {
s.tagRepo.AddFileTag(ctx, file.ID, tagID) // same tx
}
s.auditRepo.Log(ctx, ...) // same tx
return nil
})
}
```
### ACL Check Pattern
ACL logic is centralized in `ACLService`. Other services call it before
any data mutation or retrieval:
```go
// service/acl_service.go
func (s *ACLService) CanView(ctx context.Context, objectType string, objectID uuid.UUID) error {
userID, isAdmin := domain.UserFromContext(ctx)
if isAdmin { return nil }
// Check is_public on the object
// If not public, check creator_id == userID
// If not creator, check acl.permissions
// Return domain.ErrForbidden if none match
}
```
### Error Mapping
Domain errors → HTTP status codes (handled in handler/response.go):
| Domain Error | HTTP Status | Error Code |
|-----------------------|-------------|-------------------|
| ErrNotFound | 404 | not_found |
| ErrForbidden | 403 | forbidden |
| ErrUnauthorized | 401 | unauthorized |
| ErrConflict | 409 | conflict |
| ErrValidation | 400 | validation_error |
| ErrUnsupportedMIME | 415 | unsupported_mime |
| (unexpected) | 500 | internal_error |
### Filter DSL
The DSL parser lives in `db/postgres/filter_parser.go` because it produces
SQL WHERE clauses — it is a PostgreSQL-specific adapter concern.
The service layer passes the raw DSL string to the repository; the
repository parses it and builds the query.
For a different DBMS, a corresponding parser would live in
`db/<dbms>/filter_parser.go`.
The interface:
```go
// port/repository.go
type FileRepo interface {
List(ctx context.Context, params FileListParams) (*domain.FilePage, error)
// ...
}
// domain/file.go
type FileListParams struct {
Filter string // raw DSL string
Sort string
Order string
Cursor string
Anchor *uuid.UUID
Direction string // "forward" or "backward"
Limit int
Trash bool
Search string
}
```
### JWT Structure
```go
type Claims struct {
jwt.RegisteredClaims
UserID int16 `json:"uid"`
IsAdmin bool `json:"adm"`
SessionID int `json:"sid"`
}
```
Access token: short-lived (15 min). Refresh token: long-lived (30 days),
stored as hash in `activity.sessions.token_hash`.
### Configuration (.env)
```env
# Server
LISTEN_ADDR=:8080
JWT_SECRET=<random-32-bytes>
JWT_ACCESS_TTL=15m
JWT_REFRESH_TTL=720h
# Database
DATABASE_URL=postgres://user:pass@host:5432/tanabata?sslmode=disable
# Storage
FILES_PATH=/data/files
THUMBS_CACHE_PATH=/data/thumbs
# Thumbnails
THUMB_WIDTH=160
THUMB_HEIGHT=160
PREVIEW_WIDTH=1920
PREVIEW_HEIGHT=1080
# Import
IMPORT_PATH=/data/import
```

View File

@ -1,212 +0,0 @@
@startuml Tanabata File Manager entity relationship diagram
' skinparam linetype ortho
' ========== SYSTEM ==========
entity "system.users" as usr {
* id : smallserial <<generated>>
--
* name : varchar(32)
* password : text
* is_admin : boolean
* can_create : boolean
}
entity "system.mime" as mime {
* id : smallserial <<generated>>
--
* name : varchar(127)
* extension : varchar(16)
}
' ========== DATA ==========
entity "data.categories" as cty {
* id : uuid <<generated>>
--
* name : varchar(256)
notes : text
color : char(6)
' * created_at : timestamptz <<generated>>
* creator_id : smallint
' * is_private : boolean
}
cty::creator_id }o--|| usr::id
entity "data.files" as fle {
* id : uuid <<generated>>
--
name : varchar(256)
* mime_id : smallint
* datetime : timestamptz
notes : text
* metadata : jsonb
' * created_at : timestamptz <<generated>>
* creator_id : smallint
' * is_private : boolean
* is_deleted : boolean
}
fle::mime_id }o--|| mime::id
fle::creator_id }o--|| usr::id
entity "data.tags" as tag {
* id : uuid <<generated>>
--
* name : varchar(256)
notes : text
color : char(6)
category_id : uuid
' * created_at : timestamptz <<generated>>
* creator_id : smallint
' * is_private : boolean
}
tag::category_id }o--o| cty::id
tag::creator_id }o--|| usr::id
entity "data.file_tag" as ft {
* file_id : uuid
* tag_id : uuid
}
ft::file_id }o--|| fle::id
ft::tag_id }o--|| tag::id
entity "data.autotags" as atg {
* trigger_tag_id : uuid
* add_tag_id : uuid
--
* is_active : boolean
}
atg::trigger_tag_id }o--|| tag::id
atg::add_tag_id }o--|| tag::id
entity "data.pools" as pool {
* id : uuid <<generated>>
--
* name : varchar(256)
notes : text
' parent_id : uuid
' * created_at : timestamptz
* creator_id : smallint
' * is_private : boolean
}
pool::creator_id }o--|| usr::id
' pool::parent_id }o--o| pool::id
entity "data.file_pool" as fp {
* file_id : uuid
* pool_id : uuid
* number : smallint
}
fp::file_id }o--|| fle::id
fp::pool_id }o--|| pool::id
' ========== ACL ==========
entity "acl.files" as acl_f {
* user_id : smallint
* file_id : uuid
--
* view : boolean
* edit : boolean
}
acl_f::user_id }o--|| usr::id
acl_f::file_id }o--|| fle::id
entity "acl.tags" as acl_t {
* user_id : smallint
* tag_id : uuid
--
* view : boolean
* edit : boolean
' * files_view : boolean
' * files_edit : boolean
}
acl_t::user_id }o--|| usr::id
acl_t::tag_id }o--|| tag::id
entity "acl.categories" as acl_c {
* user_id : smallint
* category_id : uuid
--
* view : boolean
* edit : boolean
' * tags_view : boolean
' * tags_edit : boolean
}
acl_c::user_id }o--|| usr::id
acl_c::category_id }o--|| cty::id
entity "acl.pools" as acl_p {
* user_id : smallint
* pool_id : uuid
--
* view : boolean
* edit : boolean
' * files_view : boolean
' * files_edit : boolean
}
acl_p::user_id }o--|| usr::id
acl_p::pool_id }o--|| pool::id
' ========== ACTIVITY ==========
entity "activity.sessions" as ssn {
* id : serial <<generated>>
--
* token : text
* user_id : smallint
* user_agent : varchar(512)
* started_at : timestamptz
expires_at : timestamptz
* last_activity : timestamptz
}
ssn::user_id }o--|| usr::id
entity "activity.file_views" as fv {
* file_id : uuid
* timestamp : timestamptz
* user_id : smallint
}
fv::file_id }o--|| fle::id
fv::user_id }o--|| usr::id
entity "activity.tag_uses" as tu {
* tag_id : uuid
* timestamp : timestamptz
* user_id : smallint
--
* included : boolean
}
tu::tag_id }o--|| tag::id
tu::user_id }o--|| usr::id
entity "activity.pool_views" as pv {
* pool_id : uuid
* timestamp : timestamptz
* user_id : smallint
}
pv::pool_id }o--|| pool::id
pv::user_id }o--|| usr::id
@enduml

View File

@ -0,0 +1,374 @@
from configparser import ConfigParser
from psycopg2.pool import ThreadedConnectionPool
from psycopg2.extras import RealDictCursor
from contextlib import contextmanager
from os import access, W_OK, makedirs, chmod, system
from os.path import isfile, join, basename
from shutil import move
from magic import Magic
from preview_generator.manager import PreviewManager
conf = None
mage = None
previewer = None
db_pool = None
DEFAULT_SORTING = {
"files": {
"key": "created",
"asc": False
},
"tags": {
"key": "created",
"asc": False
},
"categories": {
"key": "created",
"asc": False
},
"pools": {
"key": "created",
"asc": False
},
}
def Initialize(conf_path="/etc/tfm/tfm.conf"):
global mage, previewer
load_config(conf_path)
mage = Magic(mime=True)
previewer = PreviewManager(conf["Paths"]["Thumbs"])
db_connect(conf["DB.limits"]["MinimumConnections"], conf["DB.limits"]["MaximumConnections"], **conf["DB.params"])
def load_config(path):
global conf
conf = ConfigParser()
conf.read(path)
def db_connect(minconn, maxconn, **kwargs):
global db_pool
db_pool = ThreadedConnectionPool(minconn, maxconn, **kwargs)
@contextmanager
def _db_cursor():
global db_pool
try:
conn = db_pool.getconn()
except:
raise RuntimeError("Database not connected")
try:
with conn.cursor(cursor_factory=RealDictCursor) as cur:
yield cur
conn.commit()
except:
conn.rollback()
raise
finally:
db_pool.putconn(conn)
def _validate_column_name(cur, table, column):
cur.execute("SELECT get_column_names(%s) AS name", (table,))
if all([column!=col["name"] for col in cur.fetchall()]):
raise RuntimeError("Invalid column name")
def authorize(username, password, useragent):
with _db_cursor() as cur:
cur.execute("SELECT tfm_session_request(tfm_user_auth(%s, %s), %s) AS sid", (username, password, useragent))
sid = cur.fetchone()["sid"]
return TSession(sid)
class TSession:
sid = None
def __init__(self, sid):
with _db_cursor() as cur:
cur.execute("SELECT tfm_session_validate(%s) IS NOT NULL AS valid", (sid,))
if not cur.fetchone()["valid"]:
raise RuntimeError("Invalid sid")
self.sid = sid
def terminate(self):
with _db_cursor() as cur:
cur.execute("CALL tfm_session_terminate(%s)", (self.sid,))
del self
@property
def username(self):
with _db_cursor() as cur:
cur.execute("SELECT tfm_session_username(%s) AS name", (self.sid,))
return cur.fetchone()["name"]
@property
def is_admin(self):
with _db_cursor() as cur:
cur.execute("SELECT * FROM tfm_user_get_info(%s)", (self.sid,))
return cur.fetchone()["can_edit"]
def get_files(self, order_key=DEFAULT_SORTING["files"]["key"], order_asc=DEFAULT_SORTING["files"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_files", order_key)
cur.execute("SELECT * FROM tfm_get_files(%%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_files_by_filter(self, philter=None, order_key=DEFAULT_SORTING["files"]["key"], order_asc=DEFAULT_SORTING["files"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_files", order_key)
cur.execute("SELECT * FROM tfm_get_files_by_filter(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, philter))
return list(map(dict, cur.fetchall()))
def get_tags(self, order_key=DEFAULT_SORTING["tags"]["key"], order_asc=DEFAULT_SORTING["tags"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_tags", order_key)
cur.execute("SELECT * FROM tfm_get_tags(%%s) ORDER BY %s %s, name ASC OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_categories(self, order_key=DEFAULT_SORTING["categories"]["key"], order_asc=DEFAULT_SORTING["categories"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_categories", order_key)
cur.execute("SELECT * FROM tfm_get_categories(%%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_pools(self, order_key=DEFAULT_SORTING["pools"]["key"], order_asc=DEFAULT_SORTING["pools"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_pools", order_key)
cur.execute("SELECT * FROM tfm_get_pools(%%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_autotags(self, order_key="child_id", order_asc=True, offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_autotags", order_key)
cur.execute("SELECT * FROM tfm_get_autotags(%%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_my_sessions(self, order_key="started", order_asc=False, offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_sessions", order_key)
cur.execute("SELECT * FROM tfm_get_my_sessions(%%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid,))
return list(map(dict, cur.fetchall()))
def get_tags_by_file(self, file_id, order_key=DEFAULT_SORTING["tags"]["key"], order_asc=DEFAULT_SORTING["tags"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_tags", order_key)
cur.execute("SELECT * FROM tfm_get_tags_by_file(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, file_id))
return list(map(dict, cur.fetchall()))
def get_files_by_tag(self, tag_id, order_key=DEFAULT_SORTING["files"]["key"], order_asc=DEFAULT_SORTING["files"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_files", order_key)
cur.execute("SELECT * FROM tfm_get_files_by_tag(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, tag_id))
return list(map(dict, cur.fetchall()))
def get_files_by_pool(self, pool_id, order_key=DEFAULT_SORTING["files"]["key"], order_asc=DEFAULT_SORTING["files"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_files", order_key)
cur.execute("SELECT * FROM tfm_get_files_by_pool(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, pool_id))
return list(map(dict, cur.fetchall()))
def get_parent_tags(self, tag_id, order_key=DEFAULT_SORTING["tags"]["key"], order_asc=DEFAULT_SORTING["tags"]["asc"], offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_tags", order_key)
cur.execute("SELECT * FROM tfm_get_parent_tags(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, tag_id))
return list(map(dict, cur.fetchall()))
def get_my_file_views(self, file_id=None, order_key="datetime", order_asc=False, offset=0, limit=None):
with _db_cursor() as cur:
_validate_column_name(cur, "v_files", order_key)
cur.execute("SELECT * FROM tfm_get_my_file_views(%%s, %%s) ORDER BY %s %s OFFSET %s LIMIT %s" % (
order_key,
"ASC" if order_asc else "DESC",
int(offset),
int(limit) if limit is not None else "ALL"
), (self.sid, file_id))
return list(map(dict, cur.fetchall()))
def get_file(self, file_id):
with _db_cursor() as cur:
cur.execute("SELECT * FROM tfm_get_files(%s) WHERE id=%s", (self.sid, file_id))
return cur.fetchone()
def get_tag(self, tag_id):
with _db_cursor() as cur:
cur.execute("SELECT * FROM tfm_get_tags(%s) WHERE id=%s", (self.sid, tag_id))
return cur.fetchone()
def get_category(self, category_id):
with _db_cursor() as cur:
cur.execute("SELECT * FROM tfm_get_categories(%s) WHERE id=%s", (self.sid, category_id))
return cur.fetchone()
def view_file(self, file_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_view_file(%s, %s)", (self.sid, file_id))
def add_file(self, path, datetime=None, notes=None, is_private=None, orig_name=True):
if not isfile(path):
raise FileNotFoundError("No such file '%s'" % path)
if not access(conf["Paths"]["Files"], W_OK) or not access(conf["Paths"]["Thumbs"], W_OK):
raise PermissionError("Invalid directories for files and thumbs")
mime = mage.from_file(path)
if orig_name == True:
orig_name = basename(path)
with _db_cursor() as cur:
cur.execute("SELECT * FROM tfm_add_file(%s, %s, %s, %s, %s, %s)", (self.sid, mime, datetime, notes, is_private, orig_name))
res = cur.fetchone()
file_id = res["f_id"]
ext = res["ext"]
file_path = join(conf["Paths"]["Files"], file_id)
move(path, file_path)
thumb_path = previewer.get_jpeg_preview(file_path, height=160, width=160)
preview_path = previewer.get_jpeg_preview(file_path, height=1080, width=1920)
chmod(file_path, 0o664)
chmod(thumb_path, 0o664)
chmod(preview_path, 0o664)
return file_id, ext
def add_tag(self, name, notes=None, color=None, category_id=None, is_private=None):
if color is not None:
color = color.replace('#', '')
if not category_id:
category_id = None
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_tag(%s, %s, %s, %s, %s, %s) AS id", (self.sid, name, notes, color, category_id, is_private))
return cur.fetchone()["id"]
def add_category(self, name, notes=None, color=None, is_private=None):
if color is not None:
color = color.replace('#', '')
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_category(%s, %s, %s, %s, %s) AS id", (self.sid, name, notes, color, is_private))
return cur.fetchone()["id"]
def add_pool(self, name, notes=None, parent_id=None, is_private=None):
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_pool(%s, %s, %s, %s, %s) AS id", (self.sid, name, notes, parent_id, is_private))
return cur.fetchone()["id"]
def add_autotag(self, child_id, parent_id, is_active=None, apply_to_existing=None):
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_autotag(%s, %s, %s, %s, %s) AS added", (self.sid, child_id, parent_id, is_active, apply_to_existing))
return cur.fetchone()["added"]
def add_file_to_tag(self, file_id, tag_id):
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_file_to_tag(%s, %s, %s) AS id", (self.sid, file_id, tag_id))
return list(map(lambda t: t["id"], cur.fetchall()))
def add_file_to_pool(self, file_id, pool_id):
with _db_cursor() as cur:
cur.execute("SELECT tfm_add_file_to_pool(%s, %s, %s) AS added", (self.sid, file_id, pool_id))
return cur.fetchone()["added"]
def edit_file(self, file_id, mime=None, datetime=None, notes=None, is_private=None):
with _db_cursor() as cur:
cur.execute("CALL tfm_edit_file(%s, %s, %s, %s, %s, %s)", (self.sid, file_id, mime, datetime, notes, is_private))
def edit_tag(self, tag_id, name=None, notes=None, color=None, category_id=None, is_private=None):
if color is not None:
color = color.replace('#', '')
if not category_id:
category_id = None
with _db_cursor() as cur:
cur.execute("CALL tfm_edit_tag(%s, %s, %s, %s, %s, %s, %s)", (self.sid, tag_id, name, notes, color, category_id, is_private))
def edit_category(self, category_id, name=None, notes=None, color=None, is_private=None):
if color is not None:
color = color.replace('#', '')
with _db_cursor() as cur:
cur.execute("CALL tfm_edit_category(%s, %s, %s, %s, %s, %s)", (self.sid, category_id, name, notes, color, is_private))
def edit_pool(self, pool_id, name=None, notes=None, parent_id=None, is_private=None):
with _db_cursor() as cur:
cur.execute("CALL tfm_edit_pool(%s, %s, %s, %s, %s, %s)", (self.sid, pool_id, name, notes, parent_id, is_private))
def remove_file(self, file_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_file(%s, %s)", (self.sid, file_id))
if system("rm %s/%s*" % (conf["Paths"]["Files"], file_id)):
raise RuntimeError("Failed to remove file '%s'" % file_id)
def remove_tag(self, tag_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_tag(%s, %s)", (self.sid, tag_id))
def remove_category(self, category_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_category(%s, %s)", (self.sid, category_id))
def remove_pool(self, pool_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_pool(%s, %s)", (self.sid, pool_id))
def remove_autotag(self, child_id, parent_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_autotag(%s, %s, %s)", (self.sid, child_id, parent_id))
def remove_file_to_tag(self, file_id, tag_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_file_to_tag(%s, %s, %s)", (self.sid, file_id, tag_id))
def remove_file_to_pool(self, file_id, pool_id):
with _db_cursor() as cur:
cur.execute("CALL tfm_remove_file_to_pool(%s, %s, %s)", (self.sid, file_id, pool_id))

View File

@ -0,0 +1,22 @@
package main
import (
"tanabata/internal/storage/postgres"
)
func main() {
postgres.InitDB("postgres://hiko:taikibansei@192.168.0.25/Tanabata_new?application_name=Tanabata%20testing")
// test_json := json.RawMessage([]byte("{\"valery\": \"ponosoff\"}"))
// data, statusCode, err := db.FileGetSlice(1, "", "+2", -2, 0)
// data, statusCode, err := db.FileGet(1, "0197d056-cfb0-76b5-97e0-bd588826393c")
// data, statusCode, err := db.FileAdd(1, "ABOBA.png", "image/png", time.Now(), "slkdfjsldkflsdkfj;sldkf", test_json)
// statusCode, err := db.FileUpdate(2, "0197d159-bf3a-7617-a3a8-a4a9fc39eca6", map[string]interface{}{
// "name": "ponos.png",
// })
// statusCode, err := db.FileDelete(1, "0197d155-848f-7221-ba4a-4660f257c7d5")
// v, e, err := postgres.FileGetAccess(1, "0197d15a-57f9-712c-991e-c512290e774f")
// fmt.Printf("V: %s, E: %s\n", v, e)
// fmt.Printf("Status: %d\n", statusCode)
// fmt.Printf("Error: %s\n", err)
// fmt.Printf("%+v\n", data)
}

View File

@ -0,0 +1,22 @@
package main
import (
"fmt"
"tanabata/db"
)
func main() {
db.InitDB("postgres://hiko:taikibansei@192.168.0.25/Tanabata_new?application_name=Tanabata%20testing")
// test_json := json.RawMessage([]byte("{\"valery\": \"ponosoff\"}"))
// data, statusCode, err := db.FileGetSlice(2, "", "+2", -2, 0)
// data, statusCode, err := db.FileGet(1, "0197d056-cfb0-76b5-97e0-bd588826393c")
// data, statusCode, err := db.FileAdd(1, "ABOBA.png", "image/png", time.Now(), "slkdfjsldkflsdkfj;sldkf", test_json)
// statusCode, err := db.FileUpdate(2, "0197d159-bf3a-7617-a3a8-a4a9fc39eca6", map[string]interface{}{
// "name": "ponos.png",
// })
statusCode, err := db.FileDelete(1, "0197d155-848f-7221-ba4a-4660f257c7d5")
fmt.Printf("Status: %d\n", statusCode)
fmt.Printf("Error: %s\n", err)
// fmt.Printf("%+v\n", data)
}

View File

@ -0,0 +1,79 @@
package db
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
)
var connPool *pgxpool.Pool
func InitDB(connString string) error {
poolConfig, err := pgxpool.ParseConfig(connString)
if err != nil {
return fmt.Errorf("error while parsing connection string: %w", err)
}
poolConfig.MaxConns = 100
poolConfig.MinConns = 0
poolConfig.MaxConnLifetime = time.Hour
poolConfig.HealthCheckPeriod = 30 * time.Second
connPool, err = pgxpool.NewWithConfig(context.Background(), poolConfig)
if err != nil {
return fmt.Errorf("error while initializing DB connections pool: %w", err)
}
return nil
}
func transaction(handler func(context.Context, pgx.Tx) (statusCode int, err error)) (statusCode int, err error) {
ctx := context.Background()
tx, err := connPool.Begin(ctx)
if err != nil {
statusCode = http.StatusInternalServerError
return
}
statusCode, err = handler(ctx, tx)
if err != nil {
tx.Rollback(ctx)
return
}
err = tx.Commit(ctx)
if err != nil {
statusCode = http.StatusInternalServerError
}
return
}
// Handle database error
func handleDBError(errIn error) (statusCode int, err error) {
if errIn == nil {
statusCode = http.StatusOK
return
}
if errors.Is(errIn, pgx.ErrNoRows) {
err = fmt.Errorf("not found")
statusCode = http.StatusNotFound
return
}
var pgErr *pgconn.PgError
if errors.As(errIn, &pgErr) {
switch pgErr.Code {
case "22P02", "22007": // Invalid data format
err = fmt.Errorf("%s", pgErr.Message)
statusCode = http.StatusBadRequest
return
case "23505": // Unique constraint violation
err = fmt.Errorf("already exists")
statusCode = http.StatusConflict
return
}
}
return http.StatusInternalServerError, errIn
}

View File

@ -0,0 +1,53 @@
package db
import (
"fmt"
"net/http"
"strconv"
"strings"
)
// Convert "filter" URL param to SQL "WHERE" condition
func filterToSQL(filter string) (sql string, statusCode int, err error) {
// filterTokens := strings.Split(string(filter), ";")
sql = "(true)"
return
}
// Convert "sort" URL param to SQL "ORDER BY"
func sortToSQL(sort string) (sql string, statusCode int, err error) {
if sort == "" {
return
}
sortOptions := strings.Split(sort, ",")
sql = " ORDER BY "
for i, sortOption := range sortOptions {
sortOrder := sortOption[:1]
sortColumn := sortOption[1:]
// parse sorting order marker
switch sortOrder {
case "+":
sortOrder = "ASC"
case "-":
sortOrder = "DESC"
default:
err = fmt.Errorf("invalid sorting order mark: %q", sortOrder)
statusCode = http.StatusBadRequest
return
}
// validate sorting column
var n int
n, err = strconv.Atoi(sortColumn)
if err != nil || n < 0 {
err = fmt.Errorf("invalid sorting column: %q", sortColumn)
statusCode = http.StatusBadRequest
return
}
// add sorting option to query
if i > 0 {
sql += ","
}
sql += fmt.Sprintf("%s %s NULLS LAST", sortColumn, sortOrder)
}
return
}

View File

@ -0,0 +1,19 @@
module tanabata
go 1.23.0
toolchain go1.23.10
require github.com/jackc/pgx/v5 v5.7.5
require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx v3.6.2+incompatible // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/stretchr/testify v1.9.0 // indirect
golang.org/x/crypto v0.37.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/text v0.24.0 // indirect
)

View File

@ -0,0 +1,32 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs=
github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -1,119 +1,122 @@
package domain package domain
import ( import (
"encoding/json" "encoding/json"
"time" "time"
)
"github.com/jackc/pgx/v5/pgtype"
type User struct { )
Name string `json:"name"`
IsAdmin bool `json:"isAdmin"` type User struct {
CanCreate bool `json:"canCreate"` Name string `json:"name"`
} IsAdmin bool `json:"isAdmin"`
CanCreate bool `json:"canCreate"`
type MIME struct { }
Name string `json:"name"`
Extension string `json:"extension"` type MIME struct {
} Name string `json:"name"`
Extension string `json:"extension"`
type ( }
CategoryCore struct {
ID *string `json:"id"` type (
Name *string `json:"name"` CategoryCore struct {
Color *string `json:"color"` ID string `json:"id"`
} Name string `json:"name"`
CategoryItem struct { Color pgtype.Text `json:"color"`
CategoryCore }
} CategoryItem struct {
CategoryFull struct { CategoryCore
CategoryCore }
CreatedAt time.Time `json:"createdAt"` CategoryFull struct {
Creator User `json:"creator"` CategoryCore
Notes *string `json:"notes"` CreatedAt time.Time `json:"createdAt"`
} Creator User `json:"creator"`
) Notes pgtype.Text `json:"notes"`
}
type ( )
FileCore struct {
ID string `json:"id"` type (
Name *string `json:"name"` FileCore struct {
MIME MIME `json:"mime"` ID string `json:"id"`
} Name pgtype.Text `json:"name"`
FileItem struct { MIME MIME `json:"mime"`
FileCore }
CreatedAt time.Time `json:"createdAt"` FileItem struct {
Creator User `json:"creator"` FileCore
} CreatedAt time.Time `json:"createdAt"`
FileFull struct { Creator User `json:"creator"`
FileCore }
CreatedAt time.Time `json:"createdAt"` FileFull struct {
Creator User `json:"creator"` FileCore
Notes *string `json:"notes"` CreatedAt time.Time `json:"createdAt"`
Metadata json.RawMessage `json:"metadata"` Creator User `json:"creator"`
Viewed int `json:"viewed"` Notes pgtype.Text `json:"notes"`
} Metadata json.RawMessage `json:"metadata"`
) Tags []TagCore `json:"tags"`
Viewed int `json:"viewed"`
type ( }
TagCore struct { )
ID string `json:"id"`
Name string `json:"name"` type (
Color *string `json:"color"` TagCore struct {
} ID string `json:"id"`
TagItem struct { Name string `json:"name"`
TagCore Color pgtype.Text `json:"color"`
Category CategoryCore `json:"category"` }
} TagItem struct {
TagFull struct { TagCore
TagCore Category CategoryCore `json:"category"`
Category CategoryCore `json:"category"` }
CreatedAt time.Time `json:"createdAt"` TagFull struct {
Creator User `json:"creator"` TagCore
Notes *string `json:"notes"` Category CategoryCore `json:"category"`
UsedIncl int `json:"usedIncl"` CreatedAt time.Time `json:"createdAt"`
UsedExcl int `json:"usedExcl"` Creator User `json:"creator"`
} Notes pgtype.Text `json:"notes"`
) UsedIncl int `json:"usedIncl"`
UsedExcl int `json:"usedExcl"`
type Autotag struct { }
TriggerTag TagCore `json:"triggerTag"` )
AddTag TagCore `json:"addTag"`
IsActive bool `json:"isActive"` type Autotag struct {
} TriggerTag TagCore `json:"triggerTag"`
AddTag TagCore `json:"addTag"`
type ( IsActive bool `json:"isActive"`
PoolCore struct { }
ID string `json:"id"`
Name string `json:"name"` type (
} PoolCore struct {
PoolItem struct { ID string `json:"id"`
PoolCore Name string `json:"name"`
} }
PoolFull struct { PoolItem struct {
PoolCore PoolCore
CreatedAt time.Time `json:"createdAt"` }
Creator User `json:"creator"` PoolFull struct {
Notes *string `json:"notes"` PoolCore
Viewed int `json:"viewed"` CreatedAt time.Time `json:"createdAt"`
} Creator User `json:"creator"`
) Notes pgtype.Text `json:"notes"`
Viewed int `json:"viewed"`
type Session struct { }
ID int `json:"id"` )
UserAgent string `json:"userAgent"`
StartedAt time.Time `json:"startedAt"` type Session struct {
ExpiresAt time.Time `json:"expiresAt"` ID int `json:"id"`
LastActivity time.Time `json:"lastActivity"` UserAgent string `json:"userAgent"`
} StartedAt time.Time `json:"startedAt"`
ExpiresAt time.Time `json:"expiresAt"`
type Pagination struct { LastActivity time.Time `json:"lastActivity"`
Total int `json:"total"` }
Offset int `json:"offset"`
Limit int `json:"limit"` type Pagination struct {
Count int `json:"count"` Total int `json:"total"`
} Offset int `json:"offset"`
Limit int `json:"limit"`
type Slice[T any] struct { Count int `json:"count"`
Pagination Pagination `json:"pagination"` }
Data []T `json:"data"`
} type Slice[T any] struct {
Pagination Pagination `json:"pagination"`
Data []T `json:"data"`
}

View File

@ -0,0 +1,16 @@
package postgres
import "context"
func UserLogin(ctx context.Context, name, password string) (user_id int, err error) {
row := connPool.QueryRow(ctx, "SELECT id FROM users WHERE name=$1 AND password=crypt($2, password)", name, password)
err = row.Scan(&user_id)
return
}
func UserAuth(ctx context.Context, user_id int) (ok, isAdmin bool) {
row := connPool.QueryRow(ctx, "SELECT is_admin FROM users WHERE id=$1", user_id)
err := row.Scan(&isAdmin)
ok = (err == nil)
return
}

View File

@ -0,0 +1,268 @@
package postgres
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/internal/domain"
)
type FileStore struct {
db *pgxpool.Pool
}
func NewFileStore(db *pgxpool.Pool) *FileStore {
return &FileStore{db: db}
}
// Get user's access rights to file
func (s *FileStore) getAccess(user_id int, file_id string) (canView, canEdit bool, err error) {
ctx := context.Background()
row := connPool.QueryRow(ctx, `
SELECT
COALESCE(a.view, FALSE) OR f.creator_id=$1 OR COALESCE(u.is_admin, FALSE),
COALESCE(a.edit, FALSE) OR f.creator_id=$1 OR COALESCE(u.is_admin, FALSE)
FROM data.files f
LEFT JOIN acl.files a ON a.file_id=f.id AND a.user_id=$1
LEFT JOIN system.users u ON u.id=$1
WHERE f.id=$2
`, user_id, file_id)
err = row.Scan(&canView, &canEdit)
return
}
// Get a set of files
func (s *FileStore) GetSlice(user_id int, filter, sort string, limit, offset int) (files domain.Slice[domain.FileItem], statusCode int, err error) {
filterCond, statusCode, err := filterToSQL(filter)
if err != nil {
return
}
sortExpr, statusCode, err := sortToSQL(sort)
if err != nil {
return
}
// prepare query
query := `
SELECT
f.id,
f.name,
m.name,
m.extension,
uuid_extract_timestamp(f.id),
u.name,
u.is_admin
FROM data.files f
JOIN system.mime m ON m.id=f.mime_id
JOIN system.users u ON u.id=f.creator_id
WHERE NOT f.is_deleted AND (f.creator_id=$1 OR (SELECT view FROM acl.files WHERE file_id=f.id AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1)) AND
`
query += filterCond
queryCount := query
query += sortExpr
if limit >= 0 {
query += fmt.Sprintf(" LIMIT %d", limit)
}
if offset > 0 {
query += fmt.Sprintf(" OFFSET %d", offset)
}
// execute query
statusCode, err = transaction(func(ctx context.Context, tx pgx.Tx) (statusCode int, err error) {
rows, err := tx.Query(ctx, query, user_id)
if err != nil {
statusCode, err = handleDBError(err)
return
}
defer rows.Close()
count := 0
for rows.Next() {
var file domain.FileItem
err = rows.Scan(&file.ID, &file.Name, &file.MIME.Name, &file.MIME.Extension, &file.CreatedAt, &file.Creator.Name, &file.Creator.IsAdmin)
if err != nil {
statusCode = http.StatusInternalServerError
return
}
files.Data = append(files.Data, file)
count++
}
err = rows.Err()
if err != nil {
statusCode = http.StatusInternalServerError
return
}
files.Pagination.Limit = limit
files.Pagination.Offset = offset
files.Pagination.Count = count
row := tx.QueryRow(ctx, fmt.Sprintf("SELECT COUNT(*) FROM (%s) tmp", queryCount), user_id)
err = row.Scan(&files.Pagination.Total)
if err != nil {
statusCode = http.StatusInternalServerError
}
return
})
if err == nil {
statusCode = http.StatusOK
}
return
}
// Get file
func (s *FileStore) Get(user_id int, file_id string) (file domain.FileFull, statusCode int, err error) {
ctx := context.Background()
row := connPool.QueryRow(ctx, `
SELECT
f.id,
f.name,
m.name,
m.extension,
uuid_extract_timestamp(f.id),
u.name,
u.is_admin,
f.notes,
f.metadata,
(SELECT COUNT(*) FROM activity.file_views fv WHERE fv.file_id=$2 AND fv.user_id=$1)
FROM data.files f
JOIN system.mime m ON m.id=f.mime_id
JOIN system.users u ON u.id=f.creator_id
WHERE NOT f.is_deleted AND f.id=$2 AND (f.creator_id=$1 OR (SELECT view FROM acl.files WHERE file_id=$2 AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1))
`, user_id, file_id)
err = row.Scan(&file.ID, &file.Name, &file.MIME.Name, &file.MIME.Extension, &file.CreatedAt, &file.Creator.Name, &file.Creator.IsAdmin, &file.Notes, &file.Metadata, &file.Viewed)
if err != nil {
statusCode, err = handleDBError(err)
return
}
rows, err := connPool.Query(ctx, `
SELECT
t.id,
t.name,
COALESCE(t.color, c.color)
FROM data.tags t
LEFT JOIN data.categories c ON c.id=t.category_id
JOIN data.file_tag ft ON ft.tag_id=t.id
WHERE ft.file_id=$1
`, file_id)
if err != nil {
statusCode, err = handleDBError(err)
return
}
defer rows.Close()
for rows.Next() {
var tag domain.TagCore
err = rows.Scan(&tag.ID, &tag.Name, &tag.Color)
if err != nil {
statusCode = http.StatusInternalServerError
return
}
file.Tags = append(file.Tags, tag)
}
err = rows.Err()
if err != nil {
statusCode = http.StatusInternalServerError
return
}
statusCode = http.StatusOK
return
}
// Add file
func (s *FileStore) Add(user_id int, name, mime string, datetime time.Time, notes string, metadata json.RawMessage) (file domain.FileCore, statusCode int, err error) {
ctx := context.Background()
var mime_id int
var extension string
row := connPool.QueryRow(ctx, "SELECT id, extension FROM system.mime WHERE name=$1", mime)
err = row.Scan(&mime_id, &extension)
if err != nil {
if err == pgx.ErrNoRows {
err = fmt.Errorf("unsupported file type: %q", mime)
statusCode = http.StatusBadRequest
} else {
statusCode, err = handleDBError(err)
}
return
}
row = connPool.QueryRow(ctx, `
INSERT INTO data.files (name, mime_id, datetime, creator_id, notes, metadata)
VALUES (NULLIF($1, ''), $2, $3, $4, NULLIF($5 ,''), $6)
RETURNING id
`, name, mime_id, datetime, user_id, notes, metadata)
err = row.Scan(&file.ID)
if err != nil {
statusCode, err = handleDBError(err)
return
}
file.Name.String = name
file.Name.Valid = (name != "")
file.MIME.Name = mime
file.MIME.Extension = extension
statusCode = http.StatusOK
return
}
// Update file
func (s *FileStore) Update(user_id int, file_id string, updates map[string]interface{}) (statusCode int, err error) {
if len(updates) == 0 {
err = fmt.Errorf("no fields provided for update")
statusCode = http.StatusBadRequest
return
}
writableFields := map[string]bool{
"name": true,
"datetime": true,
"notes": true,
"metadata": true,
}
query := "UPDATE data.files SET"
newValues := []interface{}{user_id}
count := 2
for field, value := range updates {
if !writableFields[field] {
err = fmt.Errorf("invalid field: %q", field)
statusCode = http.StatusBadRequest
return
}
query += fmt.Sprintf(" %s=NULLIF($%d, '')", field, count)
newValues = append(newValues, value)
count++
}
query += fmt.Sprintf(
" WHERE id=$%d AND (creator_id=$1 OR (SELECT edit FROM acl.files WHERE file_id=$%d AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1))",
count, count)
newValues = append(newValues, file_id)
ctx := context.Background()
commandTag, err := connPool.Exec(ctx, query, newValues...)
if err != nil {
statusCode, err = handleDBError(err)
return
}
if commandTag.RowsAffected() == 0 {
err = fmt.Errorf("not found")
statusCode = http.StatusNotFound
return
}
statusCode = http.StatusNoContent
return
}
// Delete file
func (s *FileStore) Delete(user_id int, file_id string) (statusCode int, err error) {
ctx := context.Background()
commandTag, err := connPool.Exec(ctx,
"DELETE FROM data.files WHERE id=$2 AND (creator_id=$1 OR (SELECT edit FROM acl.files WHERE file_id=$2 AND user_id=$1) OR (SELECT is_admin FROM system.users WHERE id=$1))",
user_id, file_id)
if err != nil {
statusCode, err = handleDBError(err)
return
}
if commandTag.RowsAffected() == 0 {
err = fmt.Errorf("not found")
statusCode = http.StatusNotFound
return
}
statusCode = http.StatusNoContent
return
}

View File

@ -0,0 +1,92 @@
package postgres
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
"github.com/jackc/pgx/v5/pgxpool"
)
type Storage struct {
db *pgxpool.Pool
}
var connPool *pgxpool.Pool
// Initialize new database storage
func New(dbURL string) (*Storage, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
config, err := pgxpool.ParseConfig(dbURL)
if err != nil {
return nil, fmt.Errorf("failed to parse DB URL: %w", err)
}
config.MaxConns = 10
config.MinConns = 2
config.HealthCheckPeriod = time.Minute
db, err := pgxpool.NewWithConfig(ctx, config)
if err != nil {
return nil, fmt.Errorf("failed to connect to database: %w", err)
}
err = db.Ping(ctx)
if err != nil {
return nil, fmt.Errorf("database ping failed: %w", err)
}
return &Storage{db: db}, nil
}
// Close database storage
func (s *Storage) Close() {
s.db.Close()
}
// Run handler inside transaction
func (s *Storage) transaction(ctx context.Context, handler func(context.Context, pgx.Tx) (statusCode int, err error)) (statusCode int, err error) {
tx, err := connPool.Begin(ctx)
if err != nil {
statusCode = http.StatusInternalServerError
return
}
statusCode, err = handler(ctx, tx)
if err != nil {
tx.Rollback(ctx)
return
}
err = tx.Commit(ctx)
if err != nil {
statusCode = http.StatusInternalServerError
}
return
}
// Handle database error
func (s *Storage) handleDBError(errIn error) (statusCode int, err error) {
if errIn == nil {
statusCode = http.StatusOK
return
}
if errors.Is(errIn, pgx.ErrNoRows) {
err = fmt.Errorf("not found")
statusCode = http.StatusNotFound
return
}
var pgErr *pgconn.PgError
if errors.As(errIn, &pgErr) {
switch pgErr.Code {
case "22P02", "22007": // Invalid data format
err = fmt.Errorf("%s", pgErr.Message)
statusCode = http.StatusBadRequest
return
case "23505": // Unique constraint violation
err = fmt.Errorf("already exists")
statusCode = http.StatusConflict
return
}
}
return http.StatusInternalServerError, errIn
}

View File

@ -1,52 +1,21 @@
package postgres package postgres
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn"
) )
// Handle database error
func handleDBError(errIn error) (statusCode int, err error) {
if errIn == nil {
statusCode = http.StatusOK
return
}
if errors.Is(errIn, pgx.ErrNoRows) {
err = fmt.Errorf("not found")
statusCode = http.StatusNotFound
return
}
var pgErr *pgconn.PgError
if errors.As(errIn, &pgErr) {
switch pgErr.Code {
case "22P02", "22007": // Invalid data format
err = fmt.Errorf("%s", pgErr.Message)
statusCode = http.StatusBadRequest
return
case "23505": // Unique constraint violation
err = fmt.Errorf("already exists")
statusCode = http.StatusConflict
return
}
}
return http.StatusInternalServerError, errIn
}
// Convert "filter" URL param to SQL "WHERE" condition // Convert "filter" URL param to SQL "WHERE" condition
func filterToSQL(filter string) (sql string, err error) { func filterToSQL(filter string) (sql string, statusCode int, err error) {
// filterTokens := strings.Split(string(filter), ";") // filterTokens := strings.Split(string(filter), ";")
sql = "(true)" sql = "(true)"
return return
} }
// Convert "sort" URL param to SQL "ORDER BY" // Convert "sort" URL param to SQL "ORDER BY"
func sortToSQL(sort string) (sql string, err error) { func sortToSQL(sort string) (sql string, statusCode int, err error) {
if sort == "" { if sort == "" {
return return
} }
@ -63,6 +32,7 @@ func sortToSQL(sort string) (sql string, err error) {
sortOrder = "DESC" sortOrder = "DESC"
default: default:
err = fmt.Errorf("invalid sorting order mark: %q", sortOrder) err = fmt.Errorf("invalid sorting order mark: %q", sortOrder)
statusCode = http.StatusBadRequest
return return
} }
// validate sorting column // validate sorting column
@ -70,6 +40,7 @@ func sortToSQL(sort string) (sql string, err error) {
n, err = strconv.Atoi(sortColumn) n, err = strconv.Atoi(sortColumn)
if err != nil || n < 0 { if err != nil || n < 0 {
err = fmt.Errorf("invalid sorting column: %q", sortColumn) err = fmt.Errorf("invalid sorting column: %q", sortColumn)
statusCode = http.StatusBadRequest
return return
} }
// add sorting option to query // add sorting option to query

View File

@ -0,0 +1,21 @@
package storage
import (
"encoding/json"
"time"
"tanabata/internal/domain"
)
type Storage interface {
FileRepository
Close()
}
type FileRepository interface {
GetSlice(user_id int, filter, sort string, limit, offset int) (files domain.Slice[domain.FileItem], statusCode int, err error)
Get(user_id int, file_id string) (file domain.FileFull, statusCode int, err error)
Add(user_id int, name, mime string, datetime time.Time, notes string, metadata json.RawMessage) (file domain.FileCore, statusCode int, err error)
Update(user_id int, file_id string, updates map[string]interface{}) (statusCode int, err error)
Delete(user_id int, file_id string) (statusCode int, err error)
}

View File

@ -0,0 +1,120 @@
package models
import (
"encoding/json"
"time"
"github.com/jackc/pgx/v5/pgtype"
)
type User struct {
Name string `json:"name"`
IsAdmin bool `json:"isAdmin"`
CanCreate bool `json:"canCreate"`
}
type MIME struct {
Name string `json:"name"`
Extension string `json:"extension"`
}
type (
CategoryCore struct {
ID string `json:"id"`
Name string `json:"name"`
Color pgtype.Text `json:"color"`
}
CategoryItem struct {
CategoryCore
}
CategoryFull struct {
CategoryCore
CreatedAt time.Time `json:"createdAt"`
Creator User `json:"creator"`
Notes pgtype.Text `json:"notes"`
}
)
type (
FileCore struct {
ID string `json:"id"`
Name pgtype.Text `json:"name"`
MIME MIME `json:"mime"`
}
FileItem struct {
FileCore
CreatedAt time.Time `json:"createdAt"`
Creator User `json:"creator"`
}
FileFull struct {
FileCore
CreatedAt time.Time `json:"createdAt"`
Creator User `json:"creator"`
Notes pgtype.Text `json:"notes"`
Metadata json.RawMessage `json:"metadata"`
Tags []TagCore `json:"tags"`
Viewed int `json:"viewed"`
}
)
type (
TagCore struct {
ID string `json:"id"`
Name string `json:"name"`
Color pgtype.Text `json:"color"`
}
TagItem struct {
TagCore
Category CategoryCore `json:"category"`
}
TagFull struct {
TagCore
Category CategoryCore `json:"category"`
CreatedAt time.Time `json:"createdAt"`
Creator User `json:"creator"`
Notes pgtype.Text `json:"notes"`
}
)
type Autotag struct {
TriggerTag TagCore `json:"triggerTag"`
AddTag TagCore `json:"addTag"`
IsActive bool `json:"isActive"`
}
type (
PoolCore struct {
ID string `json:"id"`
Name string `json:"name"`
}
PoolItem struct {
PoolCore
}
PoolFull struct {
PoolCore
CreatedAt time.Time `json:"createdAt"`
Creator User `json:"creator"`
Notes pgtype.Text `json:"notes"`
Viewed int `json:"viewed"`
}
)
type Session struct {
ID int `json:"id"`
UserAgent string `json:"userAgent"`
StartedAt time.Time `json:"startedAt"`
ExpiresAt time.Time `json:"expiresAt"`
LastActivity time.Time `json:"lastActivity"`
}
type Pagination struct {
Total int `json:"total"`
Offset int `json:"offset"`
Limit int `json:"limit"`
Count int `json:"count"`
}
type Slice[T any] struct {
Pagination Pagination `json:"pagination"`
Data []T `json:"data"`
}

View File

@ -0,0 +1,36 @@
body {
justify-content: center;
align-items: center;
}
.decoration {
position: absolute;
top: 0;
}
.decoration.left {
left: 0;
width: 20vw;
}
.decoration.right {
right: 0;
width: 20vw;
}
#auth {
max-width: 100%;
}
#auth h1 {
margin-bottom: 28px;
}
#auth .form-control {
margin: 14px 0;
border-radius: 14px;
}
#login {
margin-top: 20px;
}

View File

@ -0,0 +1,54 @@
html, body {
margin: 0;
padding: 0;
}
body {
background-color: #312F45;
color: #f0f0f0;
position: absolute;
top: 0;
left: 0;
bottom: 0;
right: 0;
min-height: 100vh;
display: flex;
flex-direction: column;
justify-content: space-between;
align-items: stretch;
font-family: Epilogue;
overflow-x: hidden;
}
.btn {
height: 50px;
width: 100%;
border-radius: 14px;
font-size: 1.5rem;
font-weight: 500;
}
.btn-primary {
background-color: #9592B5;
border-color: #454261;
}
.btn-primary:hover {
background-color: #7D7AA4;
border-color: #454261;
}
.btn-danger {
background-color: #DB6060;
border-color: #851E1E;
}
.btn-danger:hover {
background-color: #D64848;
border-color: #851E1E;
}
.btn-row {
display: flex;
justify-content: space-between;
}

View File

@ -0,0 +1,388 @@
header, footer {
margin: 0;
padding: 10px;
box-sizing: border-box;
}
header {
padding: 20px;
box-shadow: 0 5px 5px #0004;
}
.icon-header {
height: .8em;
}
#select {
cursor: pointer;
}
.sorting {
position: relative;
display: flex;
justify-content: space-between;
}
#sorting {
cursor: pointer;
}
.highlighted {
color: #9999AD;
}
#icon-expand {
height: 10px;
}
#sorting-options {
position: absolute;
right: 0;
top: 114%;
padding: 4px 10px;
box-sizing: border-box;
background-color: #111118;
border-radius: 10px;
text-align: left;
box-shadow: 0 0 10px black;
z-index: 9999;
}
.sorting-option {
padding: 4px 0;
display: flex;
justify-content: space-between;
}
.sorting-option input[type="radio"] {
float: unset;
margin-left: 1.8em;
}
.filtering-wrapper {
margin-top: 10px;
}
.filtering-block {
position: absolute;
top: 128px;
left: 14px;
right: 14px;
padding: 14px;
background-color: #111118;
border-radius: 10px;
box-shadow: 0 0 10px 4px #0004;
z-index: 9998;
}
main {
margin: 0;
padding: 10px;
height: 100%;
display: flex;
justify-content: space-between;
align-content: flex-start;
align-items: flex-start;
flex-wrap: wrap;
box-sizing: border-box;
overflow-x: hidden;
overflow-y: scroll;
}
main:after {
content: "";
flex: auto;
}
.item-preview {
position: relative;
}
.item-selected:after {
content: "";
display: block;
position: absolute;
top: 10px;
right: 10px;
width: 100%;
height: 50%;
background-image: url("/static/images/icon-select.svg");
background-size: contain;
background-position: right;
background-repeat: no-repeat;
}
.file-preview {
margin: 1px 0;
padding: 0;
width: 160px;
height: 160px;
max-width: calc(33vw - 7px);
max-height: calc(33vw - 7px);
overflow: hidden;
cursor: pointer;
}
.file-thumb {
width: 100%;
height: 100%;
object-fit: contain;
object-position: center;
}
.file-preview .overlay {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
background-color: #0002;
}
.file-preview:hover .overlay {
background-color: #0004;
}
.tag-preview, .filtering-token {
margin: 5px 5px;
padding: 5px 10px;
border-radius: 5px;
background-color: #444455;
cursor: pointer;
}
.category-preview {
margin: 5px 5px;
padding: 5px 10px;
border-radius: 5px;
background-color: #444455;
cursor: pointer;
}
.file {
margin: 0;
padding: 0;
min-width: 100vw;
min-height: 100vh;
max-width: 100vw;
max-height: 100vh;
display: flex;
justify-content: center;
align-items: center;
background-color: black;
}
.file .preview-img {
max-width: 100vw;
max-height: 100vh;
}
.selection-manager {
position: fixed;
left: 10px;
right: 10px;
bottom: 65px;
box-sizing: border-box;
max-height: 40vh;
display: flex;
flex-direction: column;
padding: 15px 10px;
background-color: #181721;
border-radius: 10px;
box-shadow: 0 0 5px #0008;
}
.selection-manager hr {
margin: 5px 0;
}
.selection-header {
display: flex;
justify-content: space-between;
}
.selection-header > * {
cursor: pointer;
}
#selection-edit-tags {
color: #4DC7ED;
}
#selection-add-to-pool {
color: #F5E872;
}
#selection-delete {
color: #DB6060;
}
.selection-tags {
max-height: 100%;
overflow-x: hidden;
overflow-y: scroll;
}
input[type="color"] {
width: 100%;
}
.tags-container, .filtering-operators, .filtering-tokens {
padding: 5px;
background-color: #212529;
border: 1px solid #495057;
border-radius: .375rem;
display: flex;
justify-content: space-between;
align-content: flex-start;
align-items: flex-start;
flex-wrap: wrap;
box-sizing: border-box;
}
.filtering-operators {
margin-bottom: 15px;
}
.tags-container, .filtering-tokens {
margin: 15px 0;
height: 200px;
overflow-x: hidden;
overflow-y: scroll;
}
.tags-container:after, .filtering-tokens:after {
content: "";
flex: auto;
}
.tags-container-selected {
height: 100px;
}
#files-filter {
margin-bottom: 0;
height: 56px;
}
.viewer-wrapper {
position: absolute;
left: 0;
top: 0;
right: 0;
bottom: 0;
background-color: #000a;
display: flex;
justify-content: center;
/* overflow-y: scroll;*/
}
.viewer-nav {
position: absolute;
top: 0;
bottom: 0;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
}
.viewer-nav:hover {
background-color: #b4adff40;
}
.viewer-nav-prev {
left: 0;
right: 80vw;
}
.viewer-nav-next {
left: 80vw;
right: 0;
}
.viewer-nav-close {
left: 0;
right: 0;
bottom: unset;
height: 15vh;
}
.viewer-nav-icon {
width: 20px;
height: 32px;
}
.viewer-nav-close > .viewer-nav-icon {
width: 16px;
height: 16px;
}
#viewer {
width: 100%;
height: 100%;
max-width: 100%;
}
.sessions-wrapper {
padding: 14px;
background-color: #111118;
border-radius: 10px;
}
.btn-terminate {
height: 20px;
cursor: pointer;
}
footer {
display: flex;
justify-content: space-between;
align-items: center;
background-color: #0007;
}
.nav {
display: flex;
justify-content: center;
align-items: center;
height: 40px;
width: 18vw;
background: transparent;
border: 0;
border-radius: 10px;
outline: 0;
}
.nav.curr, .nav:hover {
background-color: #343249;
}
.navicon {
display: block;
height: 30px;
}
#loader {
position: fixed;
left: 0;
top: 0;
right: 0;
bottom: 0;
display: flex;
justify-content: center;
align-items: center;
background-color: #000a;
z-index: 9999;
}
.loader-wrapper {
padding: 15px;
border-radius: 12px;
background-color: white;
}
.loader-img {
max-width: 20vw;
max-height: 20vh;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Some files were not shown because too many files have changed in this diff Show More