fix: auto-apply migrations on startup + cleanup duplicate inboxes (#60)

- Add migration 0002 to soft-delete duplicate inboxes per user, keeping
  the oldest one and reassigning tasks to it.
- Run drizzle migrations on server startup via drizzle-orm/node-postgres
  migrator.
- Update Dockerfile to copy the migrations folder into the runtime image
  and externalize pg/drizzle-orm from the esbuild bundle.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
le king fu 2026-04-09 08:53:09 -04:00
parent 2a0dd01307
commit 1df41cef1f
4 changed files with 79 additions and 3 deletions

View file

@ -15,7 +15,7 @@ COPY . .
RUN npm run build
# Bundle custom server + ws into a single JS file
RUN npx esbuild server.ts --bundle --platform=node --target=node22 --outfile=dist-server/server.js \
--external:next --external:.next
--external:next --external:.next --external:pg --external:pg-native --external:drizzle-orm
# Production
FROM base AS runner
@ -31,6 +31,7 @@ COPY --from=builder /app/package.json ./
COPY --from=builder /app/public ./public
COPY --from=builder --chown=nextjs:nodejs /app/.next ./.next
COPY --from=builder --chown=nextjs:nodejs /app/dist-server/server.js ./server.js
COPY --from=builder --chown=nextjs:nodejs /app/src/db/migrations ./src/db/migrations
USER nextjs
EXPOSE 3000

View file

@ -1,15 +1,38 @@
import { createServer } from 'http';
import next from 'next';
import { Pool } from 'pg';
import { drizzle } from 'drizzle-orm/node-postgres';
import { migrate } from 'drizzle-orm/node-postgres/migrator';
import { setupWebSocket } from './src/lib/ws';
const dev = process.env.NODE_ENV !== 'production';
const hostname = process.env.HOSTNAME || '0.0.0.0';
const port = parseInt(process.env.PORT || '3000', 10);
async function runMigrations() {
const pool = new Pool({ connectionString: process.env.DATABASE_URL });
const db = drizzle(pool);
try {
await migrate(db, { migrationsFolder: './src/db/migrations' });
console.log('> Migrations applied');
} finally {
await pool.end();
}
}
const app = next({ dev, hostname, port });
const handle = app.getRequestHandler();
app.prepare().then(() => {
(async () => {
try {
await runMigrations();
} catch (err) {
console.error('> Migration error:', err);
process.exit(1);
}
await app.prepare();
const server = createServer((req, res) => {
// Don't log query params on /ws route (ticket security)
handle(req, res);
@ -21,4 +44,4 @@ app.prepare().then(() => {
console.log(`> Ready on http://${hostname}:${port}`);
console.log(`> WebSocket server on ws://${hostname}:${port}/ws`);
});
});
})();

View file

@ -0,0 +1,45 @@
-- Cleanup duplicate inboxes per user (#60)
-- For each user with more than one active inbox, keep the oldest one
-- (lowest created_at), reassign all tasks to it, and soft-delete the duplicates.
WITH ranked_inboxes AS (
SELECT
id,
user_id,
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY created_at ASC, id ASC) AS rn
FROM sl_lists
WHERE is_inbox = true
AND deleted_at IS NULL
),
canonical AS (
SELECT user_id, id AS canonical_id
FROM ranked_inboxes
WHERE rn = 1
),
duplicates AS (
SELECT r.id AS duplicate_id, c.canonical_id, r.user_id
FROM ranked_inboxes r
JOIN canonical c ON c.user_id = r.user_id
WHERE r.rn > 1
)
-- Reassign tasks from duplicate inboxes to the canonical one
UPDATE sl_tasks
SET list_id = d.canonical_id, updated_at = NOW()
FROM duplicates d
WHERE sl_tasks.list_id = d.duplicate_id
AND sl_tasks.user_id = d.user_id;
--> statement-breakpoint
-- Soft-delete the duplicate inboxes
WITH ranked_inboxes AS (
SELECT
id,
user_id,
ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY created_at ASC, id ASC) AS rn
FROM sl_lists
WHERE is_inbox = true
AND deleted_at IS NULL
)
UPDATE sl_lists
SET deleted_at = NOW(), updated_at = NOW()
WHERE id IN (SELECT id FROM ranked_inboxes WHERE rn > 1);

View file

@ -15,6 +15,13 @@
"when": 1775567900000,
"tag": "0001_change_user_id_to_text",
"breakpoints": true
},
{
"idx": 2,
"version": "7",
"when": 1775649600000,
"tag": "0002_cleanup_duplicate_inboxes",
"breakpoints": true
}
]
}