Deploy Phase Recipes
Production-ready deployment patterns and operational procedures for GISE methodology. These recipes ensure reliable, scalable deployments with comprehensive monitoring.
Container Deployment
Recipe: Production Dockerfile Template
Use When: Creating production-ready container images
Template:
# Multi-stage build for optimization
FROM node:18-alpine AS builder
WORKDIR /app
# Copy dependency files
COPY package*.json ./
RUN npm ci --only=production && npm cache clean --force
# Build application
COPY . .
RUN npm run build
# Production runtime image
FROM node:18-alpine AS runtime
WORKDIR /app
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Copy built application
COPY --from=builder --chown=nodejs:nodejs /app/dist ./dist
COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nodejs:nodejs /app/package.json ./
# Security and performance
USER nodejs
EXPOSE 3000
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node dist/health-check.js
CMD ["node", "dist/server.js"]
Recipe: Docker Compose for Development
Use When: Setting up local development environment
Template:
version: '3.8'
services:
app:
build: .
ports:
- "3000:3000"
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@db:5432/app_dev
- REDIS_URL=redis://redis:6379
volumes:
- ./src:/app/src
- ./tests:/app/tests
depends_on:
- db
- redis
db:
image: postgres:15-alpine
environment:
POSTGRES_DB: app_dev
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
postgres_data:
Infrastructure as Code
Recipe: Basic Kubernetes Deployment
Use When: Deploying to Kubernetes clusters
Template:
apiVersion: apps/v1
kind: Deployment
metadata:
name: [app-name]
labels:
app: [app-name]
spec:
replicas: 3
selector:
matchLabels:
app: [app-name]
template:
metadata:
labels:
app: [app-name]
spec:
containers:
- name: [app-name]
image: [image-name]:[tag]
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: [app-name]-secrets
key: database-url
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
---
apiVersion: v1
kind: Service
metadata:
name: [app-name]-service
spec:
selector:
app: [app-name]
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: ClusterIP
Recipe: Terraform AWS Infrastructure
Use When: Provisioning AWS infrastructure
Template:
# VPC and Networking
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.project_name}-vpc"
}
}
resource "aws_subnet" "public" {
count = 2
vpc_id = aws_vpc.main.id
cidr_block = "10.0.${count.index + 1}.0/24"
availability_zone = data.aws_availability_zones.available.names[count.index]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-public-${count.index + 1}"
}
}
# Application Load Balancer
resource "aws_lb" "main" {
name = "${var.project_name}-alb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.alb.id]
subnets = aws_subnet.public[*].id
tags = {
Name = "${var.project_name}-alb"
}
}
# ECS Cluster
resource "aws_ecs_cluster" "main" {
name = "${var.project_name}-cluster"
setting {
name = "containerInsights"
value = "enabled"
}
}
Monitoring & Observability
Recipe: Health Check Endpoints
Use When: Implementing application health monitoring
Template:
import express from 'express';
const healthRouter = express.Router();
// Basic health check
healthRouter.get('/health', (req, res) => {
res.status(200).json({
status: 'healthy',
timestamp: new Date().toISOString(),
service: process.env.SERVICE_NAME || 'unknown'
});
});
// Readiness check (dependencies)
healthRouter.get('/ready', async (req, res) => {
const checks = {
database: false,
redis: false,
external_api: false
};
try {
// Check database connection
await db.query('SELECT 1');
checks.database = true;
// Check Redis connection
await redis.ping();
checks.redis = true;
// Check external API
const response = await fetch(process.env.EXTERNAL_API_URL + '/health');
checks.external_api = response.ok;
const allHealthy = Object.values(checks).every(check => check);
res.status(allHealthy ? 200 : 503).json({
status: allHealthy ? 'ready' : 'not_ready',
checks,
timestamp: new Date().toISOString()
});
} catch (error) {
res.status(503).json({
status: 'not_ready',
checks,
error: error.message,
timestamp: new Date().toISOString()
});
}
});
export { healthRouter };
Recipe: Prometheus Metrics
Use When: Implementing application metrics
Template:
import promClient from 'prom-client';
// Create metrics
const httpRequestDuration = new promClient.Histogram({
name: 'http_request_duration_seconds',
help: 'Duration of HTTP requests in seconds',
labelNames: ['method', 'route', 'status']
});
const httpRequestTotal = new promClient.Counter({
name: 'http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['method', 'route', 'status']
});
const activeConnections = new promClient.Gauge({
name: 'active_connections_total',
help: 'Total number of active connections'
});
// Middleware for metrics collection
export function metricsMiddleware(req, res, next) {
const start = Date.now();
res.on('finish', () => {
const duration = (Date.now() - start) / 1000;
const labels = {
method: req.method,
route: req.route?.path || req.path,
status: res.statusCode
};
httpRequestDuration.observe(labels, duration);
httpRequestTotal.inc(labels);
});
next();
}
// Metrics endpoint
export function metricsEndpoint(req, res) {
res.set('Content-Type', promClient.register.contentType);
res.send(promClient.register.metrics());
}
Deployment Strategies
Recipe: Blue-Green Deployment Script
Use When: Implementing zero-downtime deployments
Template:
#!/bin/bash
set -e
PROJECT_NAME="myapp"
NEW_VERSION=${1:-latest}
HEALTH_CHECK_URL="http://localhost:3000/health"
HEALTH_CHECK_TIMEOUT=60
echo "Starting blue-green deployment for ${PROJECT_NAME}:${NEW_VERSION}"
# Determine current and new environments
if docker ps --format "table {{.Names}}" | grep -q "${PROJECT_NAME}-blue"; then
CURRENT="blue"
NEW="green"
else
CURRENT="green"
NEW="blue"
fi
echo "Current environment: ${CURRENT}"
echo "Deploying to: ${NEW}"
# Deploy new version
echo "Deploying ${NEW} environment..."
docker-compose -f docker-compose.${NEW}.yml down || true
docker-compose -f docker-compose.${NEW}.yml pull
docker-compose -f docker-compose.${NEW}.yml up -d
# Health check
echo "Waiting for ${NEW} environment to be healthy..."
for i in $(seq 1 $HEALTH_CHECK_TIMEOUT); do
if curl -sf $HEALTH_CHECK_URL > /dev/null 2>&1; then
echo "${NEW} environment is healthy!"
break
fi
if [ $i -eq $HEALTH_CHECK_TIMEOUT ]; then
echo "Health check failed for ${NEW} environment"
docker-compose -f docker-compose.${NEW}.yml logs
exit 1
fi
sleep 1
done
# Switch traffic
echo "Switching traffic to ${NEW} environment..."
# Update load balancer configuration or DNS
./scripts/switch-traffic.sh $NEW
# Wait for connections to drain
sleep 30
# Stop old environment
echo "Stopping ${CURRENT} environment..."
docker-compose -f docker-compose.${CURRENT}.yml down
echo "Blue-green deployment completed successfully!"
Security & Compliance
Recipe: Security Headers Middleware
Use When: Implementing security best practices
Template:
import helmet from 'helmet';
import rateLimit from 'express-rate-limit';
// Security headers
export const securityMiddleware = [
helmet({
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
styleSrc: ["'self'", "'unsafe-inline'"],
scriptSrc: ["'self'"],
imgSrc: ["'self'", "data:", "https:"]
}
},
hsts: {
maxAge: 31536000,
includeSubDomains: true,
preload: true
}
}),
// Rate limiting
rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 100, // limit each IP to 100 requests per windowMs
message: 'Too many requests from this IP',
standardHeaders: true,
legacyHeaders: false
})
];
Recipe: SSL/TLS Configuration
Use When: Configuring HTTPS in production
Template:
server {
listen 443 ssl http2;
server_name yourdomain.com;
# SSL Configuration
ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;
# SSL Settings
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security Headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options DENY always;
add_header X-Content-Type-Options nosniff always;
add_header X-XSS-Protection "1; mode=block" always;
# Application
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
# Redirect HTTP to HTTPS
server {
listen 80;
server_name yourdomain.com;
return 301 https://$server_name$request_uri;
}
Operational Procedures
Recipe: Backup Strategy
Use When: Implementing data backup procedures
Template:
#!/bin/bash
# Database backup script
set -e
DB_NAME=${DB_NAME:-myapp}
DB_USER=${DB_USER:-postgres}
DB_HOST=${DB_HOST:-localhost}
BACKUP_DIR=${BACKUP_DIR:-/backups}
RETENTION_DAYS=${RETENTION_DAYS:-7}
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/${DB_NAME}_${TIMESTAMP}.sql.gz"
echo "Starting database backup..."
# Create backup directory
mkdir -p $BACKUP_DIR
# Create database dump
pg_dump -h $DB_HOST -U $DB_USER -d $DB_NAME | gzip > $BACKUP_FILE
# Verify backup
if [ -f "$BACKUP_FILE" ] && [ -s "$BACKUP_FILE" ]; then
echo "Backup created successfully: $BACKUP_FILE"
# Upload to cloud storage (optional)
if [ ! -z "$AWS_S3_BUCKET" ]; then
aws s3 cp $BACKUP_FILE s3://$AWS_S3_BUCKET/backups/
echo "Backup uploaded to S3"
fi
else
echo "Backup failed!"
exit 1
fi
# Clean up old backups
find $BACKUP_DIR -name "${DB_NAME}_*.sql.gz" -mtime +$RETENTION_DAYS -delete
echo "Old backups cleaned up (older than $RETENTION_DAYS days)"
Recipe: Log Management
Use When: Centralizing application logs
Template:
# docker-compose.logging.yml
version: '3.8'
services:
elasticsearch:
image: elasticsearch:8.8.0
environment:
- discovery.type=single-node
- xpack.security.enabled=false
ports:
- "9200:9200"
logstash:
image: logstash:8.8.0
volumes:
- ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
ports:
- "5044:5044"
depends_on:
- elasticsearch
kibana:
image: kibana:8.8.0
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- "5601:5601"
depends_on:
- elasticsearch
filebeat:
image: elastic/filebeat:8.8.0
user: root
volumes:
- ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- /var/log:/var/log:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- logstash
Related Resources
Next: Template Library | Course Overview