-- Migration naming convention: V{version}__{description}.sql
-- Example: V001__create_users_table.sql
-- Migration 1: Create initial schema
-- V001__create_users_table.sql
CREATE TABLE users (
id SERIAL PRIMARY KEY,
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_users_email ON users(email);
-- Migration 2: Add column (backwards compatible)
-- V002__add_users_status.sql
ALTER TABLE users
ADD COLUMN status VARCHAR(20) DEFAULT 'active';
-- Migration 3: Populate new column
-- V003__populate_users_status.sql
UPDATE users
SET status = 'active'
WHERE status IS NULL;
-- Migration 4: Add NOT NULL constraint
-- V004__make_status_not_null.sql
ALTER TABLE users
ALTER COLUMN status SET NOT NULL;
-- Safe column rename (multi-step for zero downtime)
-- Step 1: Add new column
-- V005__add_users_full_name.sql
ALTER TABLE users
ADD COLUMN full_name VARCHAR(100);
-- Step 2: Copy data
-- V006__populate_full_name.sql
UPDATE users
SET full_name = username
WHERE full_name IS NULL;
-- Step 3: Update application to use full_name
-- Deploy application code
-- Step 4: Drop old column
-- V007__drop_users_username.sql
ALTER TABLE users
DROP COLUMN username;
-- Idempotent migration (safe to run multiple times)
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'users' AND column_name = 'phone'
) THEN
ALTER TABLE users ADD COLUMN phone VARCHAR(20);
END IF;
END $$;
-- Conditional index creation
CREATE INDEX IF NOT EXISTS idx_users_status ON users(status);
-- Transaction-wrapped migration
BEGIN;
CREATE TABLE orders (
id SERIAL PRIMARY KEY,
user_id INT REFERENCES users(id),
total DECIMAL(10,2),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_orders_user_id ON orders(user_id);
CREATE INDEX idx_orders_created_at ON orders(created_at);
COMMIT;
-- If any statement fails, all changes are rolled back
-- Data migration with batching (for large tables)
DO $$
DECLARE
batch_size INT := 1000;
affected_rows INT;
BEGIN
LOOP
UPDATE users
SET status = 'verified'
WHERE id IN (
SELECT id FROM users
WHERE status = 'active' AND email_verified = true
LIMIT batch_size
);
GET DIAGNOSTICS affected_rows = ROW_COUNT;
EXIT WHEN affected_rows = 0;
-- Commit batch and pause
COMMIT;
PERFORM pg_sleep(0.1);
END LOOP;
END $$;
-- Rollback migration (down migration)
-- V002__add_users_status__rollback.sql
ALTER TABLE users
DROP COLUMN status;
-- Check migration status
-- Flyway command: flyway info
-- Shows: version, description, type, installed on, state
-- Schema version table (Flyway creates this)
SELECT * FROM flyway_schema_history
ORDER BY installed_rank;
-- Liquibase XML changeset example
/*
<?xml version="1.0" encoding="UTF-8"?>
<databaseChangeLog>
<changeSet id="1" author="maria">
<createTable tableName="users">
<column name="id" type="int" autoIncrement="true">
<constraints primaryKey="true"/>
</column>
<column name="username" type="varchar(50)">
<constraints nullable="false" unique="true"/>
</column>
</createTable>
</changeSet>
<changeSet id="2" author="maria">
<addColumn tableName="users">
<column name="email" type="varchar(100)"/>
</addColumn>
</changeSet>
<changeSet id="3" author="maria" context="prod">
<sql>
CREATE INDEX idx_users_email ON users(email);
</sql>
<rollback>
DROP INDEX idx_users_email;
</rollback>
</changeSet>
</databaseChangeLog>
*/
-- Rails migration example
/*
class CreateUsers < ActiveRecord::Migration[7.0]
def change
create_table :users do |t|
t.string :username, null: false
t.string :email, null: false
t.string :status, default: 'active'
t.timestamps
end
add_index :users, :email, unique: true
add_index :users, :username, unique: true
end
end
*/
-- Online schema change for large tables
-- PostgreSQL: pg_repack for zero-downtime table rewrites
-- pg_repack --table=users --no-order
-- MySQL: pt-online-schema-change
-- pt-online-schema-change --alter "ADD COLUMN phone VARCHAR(20)" -- D=mydb,t=users --execute
-- Add column with default without table lock (PostgreSQL 11+)
ALTER TABLE large_table
ADD COLUMN new_column INT DEFAULT 0 NOT NULL;
-- In PG 11+, default value doesn't rewrite table
-- Concurrent index creation (non-blocking)
CREATE INDEX CONCURRENTLY idx_users_created_at ON users(created_at);
-- Allows reads/writes during index creation
-- Drop index concurrently
DROP INDEX CONCURRENTLY idx_old_index;
-- Schema validation before migration
DO $$
BEGIN
-- Check table exists
IF NOT EXISTS (
SELECT 1 FROM information_schema.tables
WHERE table_name = 'users'
) THEN
RAISE EXCEPTION 'Table users does not exist';
END IF;
-- Check column doesn't exist
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'users' AND column_name = 'new_column'
) THEN
RAISE EXCEPTION 'Column new_column already exists';
END IF;
END $$;
-- Enum type migration (PostgreSQL)
-- Create enum
CREATE TYPE user_status AS ENUM ('active', 'inactive', 'suspended');
ALTER TABLE users
ADD COLUMN status_enum user_status DEFAULT 'active';
-- Add value to enum
ALTER TYPE user_status ADD VALUE 'archived';
-- Drop enum (must drop dependencies first)
ALTER TABLE users DROP COLUMN status_enum;
DROP TYPE user_status;
-- Foreign key addition (validate separately for zero downtime)
-- Step 1: Add NOT VALID constraint (doesn't lock)
ALTER TABLE orders
ADD CONSTRAINT fk_orders_user_id
FOREIGN KEY (user_id) REFERENCES users(id)
NOT VALID;
-- Step 2: Validate constraint (locks briefly per row)
ALTER TABLE orders
VALIDATE CONSTRAINT fk_orders_user_id;
-- Partition existing table (PostgreSQL)
-- Create new partitioned table
CREATE TABLE measurements_new (
id BIGSERIAL,
sensor_id INT,
value DECIMAL,
measured_at TIMESTAMP,
PRIMARY KEY (id, measured_at)
) PARTITION BY RANGE (measured_at);
-- Create partitions
CREATE TABLE measurements_2024_01 PARTITION OF measurements_new
FOR VALUES FROM ('2024-01-01') TO ('2024-02-01');
-- Copy data
INSERT INTO measurements_new SELECT * FROM measurements;
-- Swap tables
BEGIN;
ALTER TABLE measurements RENAME TO measurements_old;
ALTER TABLE measurements_new RENAME TO measurements;
COMMIT;
-- Drop old table
DROP TABLE measurements_old;
-- Migration dry-run
BEGIN;
-- Run migration SQL
ALTER TABLE users ADD COLUMN test_column INT;
-- Review changes
SELECT * FROM users LIMIT 1;
-- Rollback
ROLLBACK;
-- Track migration duration
iming on
-- Run migration
ALTER TABLE large_table ADD COLUMN new_col INT;
iming off
Schema migrations evolve database structure safely. I use migration tools like Flyway, Liquibase, or framework migrations. Version-controlled migrations track schema changes. Up migrations apply changes, down migrations revert. Idempotent migrations can run multiple times safely. I avoid destructive changes—rename, don't drop. Backwards-compatible migrations enable zero-downtime deploys. Multi-step migrations: add column, deploy code, backfill data, add constraint. Transaction-wrapped migrations ensure atomicity. Understanding migration order prevents dependency issues. Blue-green deployments need schema compatibility. Testing migrations in staging catches errors. Migration rollback plans minimize downtime. Proper migration strategy enables continuous database evolution without production incidents.