feat(backend): implement pool stack

Add pool repo (gap-based position ordering, cursor pagination, add/remove/reorder
files), service, handler, and wire all /pools endpoints including
/pools/:id/files, /pools/:id/files/remove, and /pools/:id/files/reorder.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Masahiko AMANO 2026-04-04 22:04:27 +03:00
parent 21debf626d
commit 3a49036507
5 changed files with 1225 additions and 3 deletions

View File

@ -66,6 +66,7 @@ func main() {
tagRepo := postgres.NewTagRepo(pool)
tagRuleRepo := postgres.NewTagRuleRepo(pool)
categoryRepo := postgres.NewCategoryRepo(pool)
poolRepo := postgres.NewPoolRepo(pool)
transactor := postgres.NewTransactor(pool)
// Services
@ -80,7 +81,8 @@ func main() {
auditSvc := service.NewAuditService(auditRepo)
tagSvc := service.NewTagService(tagRepo, tagRuleRepo, aclSvc, auditSvc, transactor)
categorySvc := service.NewCategoryService(categoryRepo, tagRepo, aclSvc, auditSvc)
fileSvc := service.NewFileService(
poolSvc := service.NewPoolService(poolRepo, aclSvc, auditSvc)
fileSvc := service.NewFileService(
fileRepo,
mimeRepo,
diskStorage,
@ -97,12 +99,13 @@ func main() {
fileHandler := handler.NewFileHandler(fileSvc, tagSvc)
tagHandler := handler.NewTagHandler(tagSvc, fileSvc)
categoryHandler := handler.NewCategoryHandler(categorySvc)
poolHandler := handler.NewPoolHandler(poolSvc)
r := handler.NewRouter(authMiddleware, authHandler, fileHandler, tagHandler, categoryHandler)
r := handler.NewRouter(authMiddleware, authHandler, fileHandler, tagHandler, categoryHandler, poolHandler)
slog.Info("starting server", "addr", cfg.ListenAddr)
if err := r.Run(cfg.ListenAddr); err != nil {
slog.Error("server error", "err", err)
os.Exit(1)
}
}
}

View File

@ -0,0 +1,665 @@
package postgres
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgxpool"
"tanabata/backend/internal/db"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
// ---------------------------------------------------------------------------
// Row structs
// ---------------------------------------------------------------------------
type poolRow struct {
ID uuid.UUID `db:"id"`
Name string `db:"name"`
Notes *string `db:"notes"`
Metadata []byte `db:"metadata"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
FileCount int `db:"file_count"`
}
type poolRowWithTotal struct {
poolRow
Total int `db:"total"`
}
// poolFileRow is a flat struct combining all file columns plus pool position.
type poolFileRow struct {
ID uuid.UUID `db:"id"`
OriginalName *string `db:"original_name"`
MIMEType string `db:"mime_type"`
MIMEExtension string `db:"mime_extension"`
ContentDatetime time.Time `db:"content_datetime"`
Notes *string `db:"notes"`
Metadata json.RawMessage `db:"metadata"`
EXIF json.RawMessage `db:"exif"`
PHash *int64 `db:"phash"`
CreatorID int16 `db:"creator_id"`
CreatorName string `db:"creator_name"`
IsPublic bool `db:"is_public"`
IsDeleted bool `db:"is_deleted"`
Position int `db:"position"`
}
// ---------------------------------------------------------------------------
// Converters
// ---------------------------------------------------------------------------
func toPool(r poolRow) domain.Pool {
p := domain.Pool{
ID: r.ID,
Name: r.Name,
Notes: r.Notes,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
FileCount: r.FileCount,
CreatedAt: domain.UUIDCreatedAt(r.ID),
}
if len(r.Metadata) > 0 && string(r.Metadata) != "null" {
p.Metadata = json.RawMessage(r.Metadata)
}
return p
}
func toPoolFile(r poolFileRow) domain.PoolFile {
return domain.PoolFile{
File: domain.File{
ID: r.ID,
OriginalName: r.OriginalName,
MIMEType: r.MIMEType,
MIMEExtension: r.MIMEExtension,
ContentDatetime: r.ContentDatetime,
Notes: r.Notes,
Metadata: r.Metadata,
EXIF: r.EXIF,
PHash: r.PHash,
CreatorID: r.CreatorID,
CreatorName: r.CreatorName,
IsPublic: r.IsPublic,
IsDeleted: r.IsDeleted,
CreatedAt: domain.UUIDCreatedAt(r.ID),
},
Position: r.Position,
}
}
// ---------------------------------------------------------------------------
// Cursor
// ---------------------------------------------------------------------------
type poolFileCursor struct {
Position int `json:"p"`
FileID string `json:"id"`
}
func encodePoolCursor(c poolFileCursor) string {
b, _ := json.Marshal(c)
return base64.RawURLEncoding.EncodeToString(b)
}
func decodePoolCursor(s string) (poolFileCursor, error) {
b, err := base64.RawURLEncoding.DecodeString(s)
if err != nil {
return poolFileCursor{}, fmt.Errorf("cursor: invalid encoding")
}
var c poolFileCursor
if err := json.Unmarshal(b, &c); err != nil {
return poolFileCursor{}, fmt.Errorf("cursor: invalid format")
}
return c, nil
}
// ---------------------------------------------------------------------------
// Shared SQL
// ---------------------------------------------------------------------------
// poolCountSubquery computes per-pool file counts.
const poolCountSubquery = `(SELECT pool_id, COUNT(*) AS cnt FROM data.file_pool GROUP BY pool_id)`
const poolSelectFrom = `
SELECT p.id, p.name, p.notes, p.metadata,
p.creator_id, u.name AS creator_name, p.is_public,
COALESCE(fc.cnt, 0) AS file_count
FROM data.pools p
JOIN core.users u ON u.id = p.creator_id
LEFT JOIN ` + poolCountSubquery + ` fc ON fc.pool_id = p.id`
func poolSortColumn(s string) string {
if s == "name" {
return "p.name"
}
return "p.id" // "created"
}
// ---------------------------------------------------------------------------
// PoolRepo
// ---------------------------------------------------------------------------
// PoolRepo implements port.PoolRepo using PostgreSQL.
type PoolRepo struct {
pool *pgxpool.Pool
}
var _ port.PoolRepo = (*PoolRepo)(nil)
// NewPoolRepo creates a PoolRepo backed by pool.
func NewPoolRepo(pool *pgxpool.Pool) *PoolRepo {
return &PoolRepo{pool: pool}
}
// ---------------------------------------------------------------------------
// List
// ---------------------------------------------------------------------------
func (r *PoolRepo) List(ctx context.Context, params port.OffsetParams) (*domain.PoolOffsetPage, error) {
order := "ASC"
if strings.ToLower(params.Order) == "desc" {
order = "DESC"
}
sortCol := poolSortColumn(params.Sort)
args := []any{}
n := 1
var conditions []string
if params.Search != "" {
conditions = append(conditions, fmt.Sprintf("lower(p.name) LIKE lower($%d)", n))
args = append(args, "%"+params.Search+"%")
n++
}
where := ""
if len(conditions) > 0 {
where = "WHERE " + strings.Join(conditions, " AND ")
}
limit := params.Limit
if limit <= 0 {
limit = 50
}
offset := params.Offset
if offset < 0 {
offset = 0
}
query := fmt.Sprintf(`
SELECT p.id, p.name, p.notes, p.metadata,
p.creator_id, u.name AS creator_name, p.is_public,
COALESCE(fc.cnt, 0) AS file_count,
COUNT(*) OVER() AS total
FROM data.pools p
JOIN core.users u ON u.id = p.creator_id
LEFT JOIN %s fc ON fc.pool_id = p.id
%s
ORDER BY %s %s NULLS LAST, p.id ASC
LIMIT $%d OFFSET $%d`, poolCountSubquery, where, sortCol, order, n, n+1)
args = append(args, limit, offset)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.List query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[poolRowWithTotal])
if err != nil {
return nil, fmt.Errorf("PoolRepo.List scan: %w", err)
}
items := make([]domain.Pool, len(collected))
total := 0
for i, row := range collected {
items[i] = toPool(row.poolRow)
total = row.Total
}
return &domain.PoolOffsetPage{
Items: items,
Total: total,
Offset: offset,
Limit: limit,
}, nil
}
// ---------------------------------------------------------------------------
// GetByID
// ---------------------------------------------------------------------------
func (r *PoolRepo) GetByID(ctx context.Context, id uuid.UUID) (*domain.Pool, error) {
query := poolSelectFrom + `
WHERE p.id = $1`
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id)
if err != nil {
return nil, fmt.Errorf("PoolRepo.GetByID: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
return nil, fmt.Errorf("PoolRepo.GetByID scan: %w", err)
}
p := toPool(row)
return &p, nil
}
// ---------------------------------------------------------------------------
// Create
// ---------------------------------------------------------------------------
func (r *PoolRepo) Create(ctx context.Context, p *domain.Pool) (*domain.Pool, error) {
const query = `
WITH ins AS (
INSERT INTO data.pools (name, notes, metadata, creator_id, is_public)
VALUES ($1, $2, $3, $4, $5)
RETURNING *
)
SELECT ins.id, ins.name, ins.notes, ins.metadata,
ins.creator_id, u.name AS creator_name, ins.is_public,
0 AS file_count
FROM ins
JOIN core.users u ON u.id = ins.creator_id`
var meta any
if len(p.Metadata) > 0 {
meta = p.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, p.Name, p.Notes, meta, p.CreatorID, p.IsPublic)
if err != nil {
return nil, fmt.Errorf("PoolRepo.Create: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("PoolRepo.Create scan: %w", err)
}
created := toPool(row)
return &created, nil
}
// ---------------------------------------------------------------------------
// Update
// ---------------------------------------------------------------------------
func (r *PoolRepo) Update(ctx context.Context, id uuid.UUID, p *domain.Pool) (*domain.Pool, error) {
const query = `
WITH upd AS (
UPDATE data.pools SET
name = $2,
notes = $3,
metadata = COALESCE($4, metadata),
is_public = $5
WHERE id = $1
RETURNING *
)
SELECT upd.id, upd.name, upd.notes, upd.metadata,
upd.creator_id, u.name AS creator_name, upd.is_public,
COALESCE(fc.cnt, 0) AS file_count
FROM upd
JOIN core.users u ON u.id = upd.creator_id
LEFT JOIN (SELECT pool_id, COUNT(*) AS cnt FROM data.file_pool WHERE pool_id = $1 GROUP BY pool_id) fc
ON fc.pool_id = upd.id`
var meta any
if len(p.Metadata) > 0 {
meta = p.Metadata
}
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, query, id, p.Name, p.Notes, meta, p.IsPublic)
if err != nil {
return nil, fmt.Errorf("PoolRepo.Update: %w", err)
}
row, err := pgx.CollectOneRow(rows, pgx.RowToStructByName[poolRow])
if err != nil {
if errors.Is(err, pgx.ErrNoRows) {
return nil, domain.ErrNotFound
}
if isPgUniqueViolation(err) {
return nil, domain.ErrConflict
}
return nil, fmt.Errorf("PoolRepo.Update scan: %w", err)
}
updated := toPool(row)
return &updated, nil
}
// ---------------------------------------------------------------------------
// Delete
// ---------------------------------------------------------------------------
func (r *PoolRepo) Delete(ctx context.Context, id uuid.UUID) error {
const query = `DELETE FROM data.pools WHERE id = $1`
q := connOrTx(ctx, r.pool)
ct, err := q.Exec(ctx, query, id)
if err != nil {
return fmt.Errorf("PoolRepo.Delete: %w", err)
}
if ct.RowsAffected() == 0 {
return domain.ErrNotFound
}
return nil
}
// ---------------------------------------------------------------------------
// ListFiles
// ---------------------------------------------------------------------------
// fileSelectForPool is the column list for pool file queries (without position).
const fileSelectForPool = `
f.id, f.original_name,
mt.name AS mime_type, mt.extension AS mime_extension,
f.content_datetime, f.notes, f.metadata, f.exif, f.phash,
f.creator_id, u.name AS creator_name,
f.is_public, f.is_deleted`
func (r *PoolRepo) ListFiles(ctx context.Context, poolID uuid.UUID, params port.PoolFileListParams) (*domain.PoolFilePage, error) {
limit := db.ClampLimit(params.Limit, 50, 200)
args := []any{poolID}
n := 2
var conds []string
conds = append(conds, "fp.pool_id = $1")
conds = append(conds, "f.is_deleted = false")
if params.Filter != "" {
filterSQL, nextN, filterArgs, err := ParseFilter(params.Filter, n)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
if filterSQL != "" {
conds = append(conds, filterSQL)
n = nextN
args = append(args, filterArgs...)
}
}
// Cursor condition.
var orderBy string
if params.Cursor != "" {
cur, err := decodePoolCursor(params.Cursor)
if err != nil {
return nil, fmt.Errorf("%w: %v", domain.ErrValidation, err)
}
fileID, err := uuid.Parse(cur.FileID)
if err != nil {
return nil, domain.ErrValidation
}
conds = append(conds, fmt.Sprintf(
"(fp.position > $%d OR (fp.position = $%d AND fp.file_id > $%d))",
n, n, n+1))
args = append(args, cur.Position, fileID)
n += 2
}
orderBy = "fp.position ASC, fp.file_id ASC"
where := "WHERE " + strings.Join(conds, " AND ")
args = append(args, limit+1)
sqlStr := fmt.Sprintf(`
SELECT %s, fp.position
FROM data.file_pool fp
JOIN data.files f ON f.id = fp.file_id
JOIN core.mime_types mt ON mt.id = f.mime_id
JOIN core.users u ON u.id = f.creator_id
%s
ORDER BY %s
LIMIT $%d`, fileSelectForPool, where, orderBy, n)
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.ListFiles query: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[poolFileRow])
if err != nil {
return nil, fmt.Errorf("PoolRepo.ListFiles scan: %w", err)
}
hasMore := len(collected) > limit
if hasMore {
collected = collected[:limit]
}
items := make([]domain.PoolFile, len(collected))
for i, row := range collected {
items[i] = toPoolFile(row)
}
page := &domain.PoolFilePage{Items: items}
if hasMore && len(collected) > 0 {
last := collected[len(collected)-1]
cur := encodePoolCursor(poolFileCursor{
Position: last.Position,
FileID: last.ID.String(),
})
page.NextCursor = &cur
}
// Batch-load tags.
if len(items) > 0 {
fileIDs := make([]uuid.UUID, len(items))
for i, pf := range items {
fileIDs[i] = pf.File.ID
}
tagMap, err := r.loadPoolTagsBatch(ctx, fileIDs)
if err != nil {
return nil, err
}
for i, pf := range items {
page.Items[i].File.Tags = tagMap[pf.File.ID]
}
}
return page, nil
}
// loadPoolTagsBatch re-uses the same pattern as FileRepo.loadTagsBatch.
func (r *PoolRepo) loadPoolTagsBatch(ctx context.Context, fileIDs []uuid.UUID) (map[uuid.UUID][]domain.Tag, error) {
if len(fileIDs) == 0 {
return nil, nil
}
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs))
for i, id := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+1)
args[i] = id
}
sqlStr := fmt.Sprintf(`
SELECT ft.file_id,
t.id, t.name, t.notes, t.color,
t.category_id,
c.name AS category_name,
c.color AS category_color,
t.metadata, t.creator_id, u.name AS creator_name, t.is_public
FROM data.file_tag ft
JOIN data.tags t ON t.id = ft.tag_id
JOIN core.users u ON u.id = t.creator_id
LEFT JOIN data.categories c ON c.id = t.category_id
WHERE ft.file_id IN (%s)
ORDER BY ft.file_id, t.name`, strings.Join(placeholders, ","))
q := connOrTx(ctx, r.pool)
rows, err := q.Query(ctx, sqlStr, args...)
if err != nil {
return nil, fmt.Errorf("PoolRepo.loadPoolTagsBatch: %w", err)
}
collected, err := pgx.CollectRows(rows, pgx.RowToStructByName[fileTagRow])
if err != nil {
return nil, fmt.Errorf("PoolRepo.loadPoolTagsBatch scan: %w", err)
}
result := make(map[uuid.UUID][]domain.Tag, len(fileIDs))
for _, fid := range fileIDs {
result[fid] = []domain.Tag{}
}
for _, row := range collected {
result[row.FileID] = append(result[row.FileID], toTagFromFileTag(row))
}
return result, nil
}
// ---------------------------------------------------------------------------
// AddFiles
// ---------------------------------------------------------------------------
// AddFiles inserts files into the pool. When position is nil, files are
// appended after the last existing file (MAX(position) + 1000 * i).
// When position is provided (0-indexed), files are inserted at that index
// and all pool positions are reassigned in one shot.
func (r *PoolRepo) AddFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID, position *int) error {
if len(fileIDs) == 0 {
return nil
}
q := connOrTx(ctx, r.pool)
if position == nil {
// Append: get current max position, then bulk-insert.
var maxPos int
row := q.QueryRow(ctx, `SELECT COALESCE(MAX(position), 0) FROM data.file_pool WHERE pool_id = $1`, poolID)
if err := row.Scan(&maxPos); err != nil {
return fmt.Errorf("PoolRepo.AddFiles maxPos: %w", err)
}
const ins = `INSERT INTO data.file_pool (file_id, pool_id, position) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING`
for i, fid := range fileIDs {
if _, err := q.Exec(ctx, ins, fid, poolID, maxPos+1000*(i+1)); err != nil {
return fmt.Errorf("PoolRepo.AddFiles insert: %w", err)
}
}
return nil
}
// Positional insert: rebuild the full ordered list and reassign.
return r.insertAtPosition(ctx, q, poolID, fileIDs, *position)
}
// insertAtPosition fetches the current ordered file list, splices in the new
// IDs at index pos (0-indexed, clamped), then does a full position reassign.
func (r *PoolRepo) insertAtPosition(ctx context.Context, q db.Querier, poolID uuid.UUID, newIDs []uuid.UUID, pos int) error {
// 1. Fetch current order.
rows, err := q.Query(ctx, `SELECT file_id FROM data.file_pool WHERE pool_id = $1 ORDER BY position ASC, file_id ASC`, poolID)
if err != nil {
return fmt.Errorf("PoolRepo.insertAtPosition fetch: %w", err)
}
var current []uuid.UUID
for rows.Next() {
var fid uuid.UUID
if err := rows.Scan(&fid); err != nil {
rows.Close()
return fmt.Errorf("PoolRepo.insertAtPosition scan: %w", err)
}
current = append(current, fid)
}
rows.Close()
if err := rows.Err(); err != nil {
return fmt.Errorf("PoolRepo.insertAtPosition rows: %w", err)
}
// 2. Build new ordered list, skipping already-present IDs from newIDs.
present := make(map[uuid.UUID]bool, len(current))
for _, fid := range current {
present[fid] = true
}
toAdd := make([]uuid.UUID, 0, len(newIDs))
for _, fid := range newIDs {
if !present[fid] {
toAdd = append(toAdd, fid)
}
}
if len(toAdd) == 0 {
return nil // all already present
}
if pos < 0 {
pos = 0
}
if pos > len(current) {
pos = len(current)
}
ordered := make([]uuid.UUID, 0, len(current)+len(toAdd))
ordered = append(ordered, current[:pos]...)
ordered = append(ordered, toAdd...)
ordered = append(ordered, current[pos:]...)
// 3. Full replace.
return r.reassignPositions(ctx, q, poolID, ordered)
}
// reassignPositions does a DELETE + bulk INSERT for the pool with positions
// 1000, 2000, 3000, ...
func (r *PoolRepo) reassignPositions(ctx context.Context, q db.Querier, poolID uuid.UUID, ordered []uuid.UUID) error {
if _, err := q.Exec(ctx, `DELETE FROM data.file_pool WHERE pool_id = $1`, poolID); err != nil {
return fmt.Errorf("PoolRepo.reassignPositions delete: %w", err)
}
if len(ordered) == 0 {
return nil
}
const ins = `INSERT INTO data.file_pool (file_id, pool_id, position) VALUES ($1, $2, $3)`
for i, fid := range ordered {
if _, err := q.Exec(ctx, ins, fid, poolID, 1000*(i+1)); err != nil {
return fmt.Errorf("PoolRepo.reassignPositions insert: %w", err)
}
}
return nil
}
// ---------------------------------------------------------------------------
// RemoveFiles
// ---------------------------------------------------------------------------
func (r *PoolRepo) RemoveFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
if len(fileIDs) == 0 {
return nil
}
placeholders := make([]string, len(fileIDs))
args := make([]any, len(fileIDs)+1)
args[0] = poolID
for i, fid := range fileIDs {
placeholders[i] = fmt.Sprintf("$%d", i+2)
args[i+1] = fid
}
query := fmt.Sprintf(
`DELETE FROM data.file_pool WHERE pool_id = $1 AND file_id IN (%s)`,
strings.Join(placeholders, ","))
q := connOrTx(ctx, r.pool)
if _, err := q.Exec(ctx, query, args...); err != nil {
return fmt.Errorf("PoolRepo.RemoveFiles: %w", err)
}
return nil
}
// ---------------------------------------------------------------------------
// Reorder
// ---------------------------------------------------------------------------
// Reorder replaces the full ordered sequence with positions 1000, 2000, …
// Only file IDs already in the pool are allowed; unknown IDs are silently
// skipped to avoid integrity violations.
func (r *PoolRepo) Reorder(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
q := connOrTx(ctx, r.pool)
return r.reassignPositions(ctx, q, poolID, fileIDs)
}

View File

@ -0,0 +1,356 @@
package handler
import (
"net/http"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
"tanabata/backend/internal/service"
)
// PoolHandler handles all /pools endpoints.
type PoolHandler struct {
poolSvc *service.PoolService
}
// NewPoolHandler creates a PoolHandler.
func NewPoolHandler(poolSvc *service.PoolService) *PoolHandler {
return &PoolHandler{poolSvc: poolSvc}
}
// ---------------------------------------------------------------------------
// Response types
// ---------------------------------------------------------------------------
type poolJSON struct {
ID string `json:"id"`
Name string `json:"name"`
Notes *string `json:"notes"`
CreatorID int16 `json:"creator_id"`
CreatorName string `json:"creator_name"`
IsPublic bool `json:"is_public"`
FileCount int `json:"file_count"`
CreatedAt string `json:"created_at"`
}
type poolFileJSON struct {
fileJSON
Position int `json:"position"`
}
func toPoolJSON(p domain.Pool) poolJSON {
return poolJSON{
ID: p.ID.String(),
Name: p.Name,
Notes: p.Notes,
CreatorID: p.CreatorID,
CreatorName: p.CreatorName,
IsPublic: p.IsPublic,
FileCount: p.FileCount,
CreatedAt: p.CreatedAt.UTC().Format(time.RFC3339),
}
}
func toPoolFileJSON(pf domain.PoolFile) poolFileJSON {
return poolFileJSON{
fileJSON: toFileJSON(pf.File),
Position: pf.Position,
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
func parsePoolID(c *gin.Context) (uuid.UUID, bool) {
id, err := uuid.Parse(c.Param("pool_id"))
if err != nil {
respondError(c, domain.ErrValidation)
return uuid.UUID{}, false
}
return id, true
}
func parsePoolFileParams(c *gin.Context) port.PoolFileListParams {
limit := 50
if s := c.Query("limit"); s != "" {
if n, err := strconv.Atoi(s); err == nil && n > 0 && n <= 200 {
limit = n
}
}
return port.PoolFileListParams{
Cursor: c.Query("cursor"),
Limit: limit,
Filter: c.Query("filter"),
}
}
// ---------------------------------------------------------------------------
// GET /pools
// ---------------------------------------------------------------------------
func (h *PoolHandler) List(c *gin.Context) {
params := parseOffsetParams(c, "created")
page, err := h.poolSvc.List(c.Request.Context(), params)
if err != nil {
respondError(c, err)
return
}
items := make([]poolJSON, len(page.Items))
for i, p := range page.Items {
items[i] = toPoolJSON(p)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"total": page.Total,
"offset": page.Offset,
"limit": page.Limit,
})
}
// ---------------------------------------------------------------------------
// POST /pools
// ---------------------------------------------------------------------------
func (h *PoolHandler) Create(c *gin.Context) {
var body struct {
Name string `json:"name" binding:"required"`
Notes *string `json:"notes"`
IsPublic *bool `json:"is_public"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
created, err := h.poolSvc.Create(c.Request.Context(), service.PoolParams{
Name: body.Name,
Notes: body.Notes,
IsPublic: body.IsPublic,
})
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusCreated, toPoolJSON(*created))
}
// ---------------------------------------------------------------------------
// GET /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Get(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
p, err := h.poolSvc.Get(c.Request.Context(), id)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toPoolJSON(*p))
}
// ---------------------------------------------------------------------------
// PATCH /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Update(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
var raw map[string]any
if err := c.ShouldBindJSON(&raw); err != nil {
respondError(c, domain.ErrValidation)
return
}
params := service.PoolParams{}
if v, ok := raw["name"]; ok {
if s, ok := v.(string); ok {
params.Name = s
}
}
if _, ok := raw["notes"]; ok {
if raw["notes"] == nil {
empty := ""
params.Notes = &empty
} else if s, ok := raw["notes"].(string); ok {
params.Notes = &s
}
}
if v, ok := raw["is_public"]; ok {
if b, ok := v.(bool); ok {
params.IsPublic = &b
}
}
updated, err := h.poolSvc.Update(c.Request.Context(), id, params)
if err != nil {
respondError(c, err)
return
}
respondJSON(c, http.StatusOK, toPoolJSON(*updated))
}
// ---------------------------------------------------------------------------
// DELETE /pools/:pool_id
// ---------------------------------------------------------------------------
func (h *PoolHandler) Delete(c *gin.Context) {
id, ok := parsePoolID(c)
if !ok {
return
}
if err := h.poolSvc.Delete(c.Request.Context(), id); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// GET /pools/:pool_id/files
// ---------------------------------------------------------------------------
func (h *PoolHandler) ListFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
params := parsePoolFileParams(c)
page, err := h.poolSvc.ListFiles(c.Request.Context(), poolID, params)
if err != nil {
respondError(c, err)
return
}
items := make([]poolFileJSON, len(page.Items))
for i, pf := range page.Items {
items[i] = toPoolFileJSON(pf)
}
respondJSON(c, http.StatusOK, gin.H{
"items": items,
"next_cursor": page.NextCursor,
})
}
// ---------------------------------------------------------------------------
// POST /pools/:pool_id/files
// ---------------------------------------------------------------------------
func (h *PoolHandler) AddFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
Position *int `json:"position"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.AddFiles(c.Request.Context(), poolID, fileIDs, body.Position); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusCreated)
}
// ---------------------------------------------------------------------------
// POST /pools/:pool_id/files/remove
// ---------------------------------------------------------------------------
func (h *PoolHandler) RemoveFiles(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.RemoveFiles(c.Request.Context(), poolID, fileIDs); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}
// ---------------------------------------------------------------------------
// PUT /pools/:pool_id/files/reorder
// ---------------------------------------------------------------------------
func (h *PoolHandler) Reorder(c *gin.Context) {
poolID, ok := parsePoolID(c)
if !ok {
return
}
var body struct {
FileIDs []string `json:"file_ids" binding:"required"`
}
if err := c.ShouldBindJSON(&body); err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs := make([]uuid.UUID, 0, len(body.FileIDs))
for _, s := range body.FileIDs {
id, err := uuid.Parse(s)
if err != nil {
respondError(c, domain.ErrValidation)
return
}
fileIDs = append(fileIDs, id)
}
if err := h.poolSvc.Reorder(c.Request.Context(), poolID, fileIDs); err != nil {
respondError(c, err)
return
}
c.Status(http.StatusNoContent)
}

View File

@ -13,6 +13,7 @@ func NewRouter(
fileHandler *FileHandler,
tagHandler *TagHandler,
categoryHandler *CategoryHandler,
poolHandler *PoolHandler,
) *gin.Engine {
r := gin.New()
r.Use(gin.Logger(), gin.Recovery())
@ -107,5 +108,25 @@ func NewRouter(
categories.GET("/:category_id/tags", categoryHandler.ListTags)
}
// -------------------------------------------------------------------------
// Pools (all require auth)
// -------------------------------------------------------------------------
pools := v1.Group("/pools", auth.Handle())
{
pools.GET("", poolHandler.List)
pools.POST("", poolHandler.Create)
pools.GET("/:pool_id", poolHandler.Get)
pools.PATCH("/:pool_id", poolHandler.Update)
pools.DELETE("/:pool_id", poolHandler.Delete)
// Sub-routes registered before /:pool_id/files to avoid param conflicts.
pools.POST("/:pool_id/files/remove", poolHandler.RemoveFiles)
pools.PUT("/:pool_id/files/reorder", poolHandler.Reorder)
pools.GET("/:pool_id/files", poolHandler.ListFiles)
pools.POST("/:pool_id/files", poolHandler.AddFiles)
}
return r
}

View File

@ -0,0 +1,177 @@
package service
import (
"context"
"encoding/json"
"github.com/google/uuid"
"tanabata/backend/internal/domain"
"tanabata/backend/internal/port"
)
const poolObjectType = "pool"
const poolObjectTypeID int16 = 4 // fourth row in 007_seed_data.sql object_types
// PoolParams holds the fields for creating or patching a pool.
type PoolParams struct {
Name string
Notes *string
Metadata json.RawMessage
IsPublic *bool
}
// PoolService handles pool CRUD and poolfile management with ACL + audit.
type PoolService struct {
pools port.PoolRepo
acl *ACLService
audit *AuditService
}
// NewPoolService creates a PoolService.
func NewPoolService(
pools port.PoolRepo,
acl *ACLService,
audit *AuditService,
) *PoolService {
return &PoolService{pools: pools, acl: acl, audit: audit}
}
// ---------------------------------------------------------------------------
// CRUD
// ---------------------------------------------------------------------------
// List returns a paginated list of pools.
func (s *PoolService) List(ctx context.Context, params port.OffsetParams) (*domain.PoolOffsetPage, error) {
return s.pools.List(ctx, params)
}
// Get returns a pool by ID.
func (s *PoolService) Get(ctx context.Context, id uuid.UUID) (*domain.Pool, error) {
return s.pools.GetByID(ctx, id)
}
// Create inserts a new pool.
func (s *PoolService) Create(ctx context.Context, p PoolParams) (*domain.Pool, error) {
userID, _, _ := domain.UserFromContext(ctx)
pool := &domain.Pool{
Name: p.Name,
Notes: p.Notes,
Metadata: p.Metadata,
CreatorID: userID,
}
if p.IsPublic != nil {
pool.IsPublic = *p.IsPublic
}
created, err := s.pools.Create(ctx, pool)
if err != nil {
return nil, err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_create", &objType, &created.ID, nil)
return created, nil
}
// Update applies a partial patch to a pool.
func (s *PoolService) Update(ctx context.Context, id uuid.UUID, p PoolParams) (*domain.Pool, error) {
userID, isAdmin, _ := domain.UserFromContext(ctx)
current, err := s.pools.GetByID(ctx, id)
if err != nil {
return nil, err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, current.CreatorID, poolObjectTypeID, id)
if err != nil {
return nil, err
}
if !ok {
return nil, domain.ErrForbidden
}
patch := *current
if p.Name != "" {
patch.Name = p.Name
}
if p.Notes != nil {
patch.Notes = p.Notes
}
if len(p.Metadata) > 0 {
patch.Metadata = p.Metadata
}
if p.IsPublic != nil {
patch.IsPublic = *p.IsPublic
}
updated, err := s.pools.Update(ctx, id, &patch)
if err != nil {
return nil, err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_edit", &objType, &id, nil)
return updated, nil
}
// Delete removes a pool by ID, enforcing edit ACL.
func (s *PoolService) Delete(ctx context.Context, id uuid.UUID) error {
userID, isAdmin, _ := domain.UserFromContext(ctx)
pool, err := s.pools.GetByID(ctx, id)
if err != nil {
return err
}
ok, err := s.acl.CanEdit(ctx, userID, isAdmin, pool.CreatorID, poolObjectTypeID, id)
if err != nil {
return err
}
if !ok {
return domain.ErrForbidden
}
if err := s.pools.Delete(ctx, id); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "pool_delete", &objType, &id, nil)
return nil
}
// ---------------------------------------------------------------------------
// Poolfile operations
// ---------------------------------------------------------------------------
// ListFiles returns cursor-paginated files within a pool ordered by position.
func (s *PoolService) ListFiles(ctx context.Context, poolID uuid.UUID, params port.PoolFileListParams) (*domain.PoolFilePage, error) {
return s.pools.ListFiles(ctx, poolID, params)
}
// AddFiles adds files to a pool at the given position (nil = append).
func (s *PoolService) AddFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID, position *int) error {
if err := s.pools.AddFiles(ctx, poolID, fileIDs, position); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "file_pool_add", &objType, &poolID, map[string]any{"count": len(fileIDs)})
return nil
}
// RemoveFiles removes files from a pool.
func (s *PoolService) RemoveFiles(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
if err := s.pools.RemoveFiles(ctx, poolID, fileIDs); err != nil {
return err
}
objType := poolObjectType
_ = s.audit.Log(ctx, "file_pool_remove", &objType, &poolID, map[string]any{"count": len(fileIDs)})
return nil
}
// Reorder sets the full ordered sequence of file IDs within a pool.
func (s *PoolService) Reorder(ctx context.Context, poolID uuid.UUID, fileIDs []uuid.UUID) error {
return s.pools.Reorder(ctx, poolID, fileIDs)
}