Files
dbmorph/migrate.ts
Robert Kunze 7fb0512f65 🎉 initial commit
2026-01-12 17:44:56 +01:00

213 lines
8.5 KiB
TypeScript

import getfiles from "https://deno.land/x/getfiles@v1.0.0/mod.ts";
import postgres from "https://deno.land/x/postgresjs@v3.4.4/mod.js";
const migrationDir = "./db/migrations/";
export default async function (args: string[]) {
const sql = postgres({});
const exists = (await sql`SELECT to_regclass('public.dbmorph_migration')`)
.pop();
if (!exists?.to_regclass) {
await sql`CREATE TABLE IF NOT EXISTS dbmorph_migration (name varchar, filetime timestamp, created_at timestamp)`;
}
await sql`CREATE OR REPLACE FUNCTION public.generate_create_table_statement(p_table_name character varying)
RETURNS SETOF text AS
$BODY$
DECLARE
v_table_ddl text;
column_record record;
table_rec record;
constraint_rec record;
firstrec boolean;
BEGIN
FOR table_rec IN
SELECT c.relname FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r' AND relname != 'dbmorph_migration'
AND relname~ ('^('||p_table_name||')$')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname
LOOP
FOR column_record IN
SELECT
b.nspname as schema_name,
b.relname as table_name,
a.attname as column_name,
pg_catalog.format_type(a.atttypid, a.atttypmod) as column_type,
CASE WHEN
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) IS NOT NULL THEN
'DEFAULT '|| (SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef)
ELSE
''
END as column_default_value,
CASE WHEN a.attnotnull = true THEN
'NOT NULL'
ELSE
'NULL'
END as column_not_null,
a.attnum as attnum,
e.max_attnum as max_attnum
FROM
pg_catalog.pg_attribute a
INNER JOIN
(SELECT c.oid,
n.nspname,
c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = table_rec.relname
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 2, 3) b
ON a.attrelid = b.oid
INNER JOIN
(SELECT
a.attrelid,
max(a.attnum) as max_attnum
FROM pg_catalog.pg_attribute a
WHERE a.attnum > 0
AND NOT a.attisdropped
GROUP BY a.attrelid) e
ON a.attrelid=e.attrelid
WHERE a.attnum > 0
AND NOT a.attisdropped
ORDER BY a.attnum
LOOP
IF column_record.attnum = 1 THEN
v_table_ddl:='CREATE TABLE '||column_record.schema_name||'.'||column_record.table_name||' (';
ELSE
v_table_ddl:=v_table_ddl||',';
END IF;
IF column_record.attnum <= column_record.max_attnum THEN
v_table_ddl:=v_table_ddl||chr(10)||
' '||column_record.column_name||' '||column_record.column_type||' '||column_record.column_default_value||' '||column_record.column_not_null;
END IF;
END LOOP;
firstrec := TRUE;
FOR constraint_rec IN
SELECT conname, pg_get_constraintdef(c.oid) as constrainddef
FROM pg_constraint c
WHERE conrelid=(
SELECT attrelid FROM pg_attribute
WHERE attrelid = (
SELECT oid FROM pg_class WHERE relname = table_rec.relname
) AND attname='tableoid'
)
LOOP
v_table_ddl:=v_table_ddl||','||chr(10);
v_table_ddl:=v_table_ddl||'CONSTRAINT '||constraint_rec.conname;
v_table_ddl:=v_table_ddl||chr(10)||' '||constraint_rec.constrainddef;
firstrec := FALSE;
END LOOP;
v_table_ddl:=v_table_ddl||');';
RETURN NEXT v_table_ddl;
END LOOP;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100`;
const files = getfiles(migrationDir).filter((x) =>
x.name?.endsWith(".sql") || x.name?.endsWith(".ts")
)
.sort((a, b) => a.name < b.name ? -1 : a.name == b.name ? 0 : 1);
const lastMigration =
(await sql`SELECT * FROM dbmorph_migration ORDER BY created_at DESC LIMIT 1`)
.pop();
for (const file of files) {
const [time, action, ...name] = file.name.split("_");
const fileTime = new Date(time);
if (
(!lastMigration || lastMigration.created_at < fileTime) &&
action === "up"
) {
if (file.name.endsWith(".ts")) {
const mod = await import(file.realPath);
try {
await mod(sql);
} catch (e) {
console.error(
"Unable to migrate ",
name.join("_"),
" rolling back...",
e,
);
const down = await import(file.realPath.replace(
file.name,
[time, "down", ...name].join("_"),
));
await down(sql);
}
continue;
}
const commands = Deno.readTextFileSync(file.realPath).split(/;/);
try {
console.error("Migrating ", name.join("_"), "...");
for (const command of commands) {
if (!command.trim()) continue;
const cmd = [command];
cmd.raw = [command];
await sql(cmd, []);
}
await sql`INSERT INTO dbmorph_migration (name, filetime, created_at) VALUES (${file.name}, ${fileTime}, ${new Date()})`;
} catch (e) {
console.error(
"Unable to migrate ",
name.join("_"),
" rolling back...",
e,
);
const commands = Deno.readTextFileSync(
file.realPath.replace(
file.name,
[time, "down", ...name].join("_"),
),
).split(/;/);
for (const command of commands) {
if (!command.trim()) continue;
const cmd = [command];
cmd.raw = [command];
await sql(cmd, []);
}
}
}
}
if (args.length >= 1) {
const res = await sql`SELECT generate_create_table_statement('.*')`;
const enums = await sql`SELECT n.nspname AS enum_schema,
t.typname AS enum_name,
string_agg(e.enumlabel, '|') AS enum_value
FROM pg_type t
join pg_enum e on t.oid = e.enumtypid
join pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE
t.typname~ ('^(.*)$')
GROUP BY enum_schema, enum_name`;
let output = "";
for (const enumDef of enums) {
output += `CREATE TYPE ${enumDef.enum_name} AS ENUM ('${
enumDef.enum_value.split("|").join("',\n'")
}');\n\n`;
}
for (const row of res) {
output += row.generate_create_table_statement;
}
Deno.writeTextFileSync(args[0], output);
}
await sql.end();
console.error("done");
}