Container-First Development Practices
Learn how to adopt a container-first approach to development, ensuring consistent environments, simplified deployment, and enhanced collaboration across the entire development lifecycle.
Learning Objectives
After completing this module, you will be able to:
✅ Design Container-First Architecture - Structure applications for containerized environments
✅ Implement Development Containers - Set up consistent development environments
✅ Optimize Container Images - Build efficient, secure container images
✅ Orchestrate Multi-Container Applications - Use Docker Compose for local development
✅ Integrate CI/CD Pipelines - Automate container builds and deployments
Container-First Philosophy
Core Principles
- Environment Parity - Development, testing, and production environments are identical
- Immutable Infrastructure - Containers are built once and deployed everywhere
- Dependency Isolation - All dependencies are packaged within containers
- Scalability by Design - Applications are designed to scale horizontally
- Configuration Management - Environment-specific configuration is externalized
Development Environment Setup
Development Container Configuration
// .devcontainer/devcontainer.json
{
"name": "GISE Development Environment",
"dockerComposeFile": "docker-compose.dev.yml",
"service": "app",
"workspaceFolder": "/workspace",
"shutdownAction": "stopCompose",
"customizations": {
"vscode": {
"extensions": [
"ms-vscode.vscode-typescript-next",
"esbenp.prettier-vscode",
"ms-vscode.vscode-eslint",
"ms-vscode.vscode-docker",
"ms-vscode.vscode-json",
"bradlc.vscode-tailwindcss"
],
"settings": {
"typescript.preferences.importModuleSpecifier": "relative",
"editor.formatOnSave": true,
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.codeActionsOnSave": {
"source.fixAll.eslint": true
}
}
}
},
"forwardPorts": [3000, 3001, 5432, 6379],
"portsAttributes": {
"3000": {
"label": "Frontend",
"onAutoForward": "notify"
},
"3001": {
"label": "Backend API",
"onAutoForward": "notify"
},
"5432": {
"label": "PostgreSQL",
"onAutoForward": "silent"
},
"6379": {
"label": "Redis",
"onAutoForward": "silent"
}
},
"postCreateCommand": "npm install && npm run setup:dev",
"remoteUser": "node"
}
Development Docker Compose
# .devcontainer/docker-compose.dev.yml
version: '3.8'
services:
app:
build:
context: ..
dockerfile: .devcontainer/Dockerfile
volumes:
- ..:/workspace:cached
- node_modules:/workspace/node_modules
command: sleep infinity
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@postgres:5432/gise_dev
- REDIS_URL=redis://redis:6379
depends_on:
- postgres
- redis
networks:
- gise-dev
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_DB=gise_dev
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
volumes:
- postgres_data:/var/lib/postgresql/data
- ../database/init:/docker-entrypoint-initdb.d
ports:
- "5432:5432"
networks:
- gise-dev
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis_data:/data
networks:
- gise-dev
maildev:
image: maildev/maildev
ports:
- "1080:1080"
- "1025:1025"
networks:
- gise-dev
volumes:
node_modules:
postgres_data:
redis_data:
networks:
gise-dev:
driver: bridge
Development Dockerfile
# .devcontainer/Dockerfile
FROM node:18-bullseye
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
curl \
wget \
vim \
postgresql-client \
redis-tools \
&& rm -rf /var/lib/apt/lists/*
# Install global npm packages
RUN npm install -g \
@nestjs/cli \
prisma \
typescript \
ts-node \
nodemon
# Create non-root user
RUN groupadd --gid 1000 node \
&& useradd --uid 1000 --gid node --shell /bin/bash --create-home node
# Set up workspace
WORKDIR /workspace
RUN chown node:node /workspace
USER node
# Install shell enhancements
RUN echo 'alias ll="ls -alF"' >> ~/.bashrc \
&& echo 'alias la="ls -A"' >> ~/.bashrc \
&& echo 'alias l="ls -CF"' >> ~/.bashrc
CMD ["sleep", "infinity"]
Application Containerization
Multi-Stage Production Dockerfile
# Dockerfile
# Build stage
FROM node:18-alpine AS builder
WORKDIR /app
# Copy package files
COPY package*.json ./
COPY prisma ./prisma/
# Install dependencies
RUN npm ci --only=production && npm cache clean --force
# Copy source code
COPY . .
# Generate Prisma client
RUN npx prisma generate
# Build application
RUN npm run build
# Production stage
FROM node:18-alpine AS production
# Install security updates
RUN apk update && apk upgrade
# Create app user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nextjs -u 1001
WORKDIR /app
# Copy built application
COPY --from=builder --chown=nextjs:nodejs /app/dist ./dist
COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
COPY --from=builder --chown=nextjs:nodejs /app/package*.json ./
COPY --from=builder --chown=nextjs:nodejs /app/prisma ./prisma
# Set environment
ENV NODE_ENV=production
ENV PORT=3000
# Expose port
EXPOSE 3000
# Switch to non-root user
USER nextjs
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3000/health || exit 1
# Start application
CMD ["node", "dist/main.js"]
Docker Compose for Local Development
# docker-compose.yml
version: '3.8'
services:
frontend:
build:
context: ./frontend
dockerfile: Dockerfile.dev
ports:
- "3000:3000"
volumes:
- ./frontend:/app
- /app/node_modules
environment:
- CHOKIDAR_USEPOLLING=true
- REACT_APP_API_URL=http://localhost:3001
depends_on:
- backend
networks:
- gise-network
backend:
build:
context: ./backend
dockerfile: Dockerfile.dev
ports:
- "3001:3001"
volumes:
- ./backend:/app
- /app/node_modules
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@postgres:5432/gise_dev
- REDIS_URL=redis://redis:6379
- JWT_SECRET=dev-secret-key
depends_on:
- postgres
- redis
networks:
- gise-network
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_DB=gise_dev
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/init.sql:/docker-entrypoint-initdb.d/init.sql
ports:
- "5432:5432"
networks:
- gise-network
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
ports:
- "6379:6379"
networks:
- gise-network
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf
depends_on:
- frontend
- backend
networks:
- gise-network
volumes:
postgres_data:
redis_data:
networks:
gise-network:
driver: bridge
Development Dockerfile for Hot Reloading
# frontend/Dockerfile.dev
FROM node:18-alpine
WORKDIR /app
# Install dependencies
COPY package*.json ./
RUN npm install
# Copy source code
COPY . .
# Expose port
EXPOSE 3000
# Start development server
CMD ["npm", "start"]
# backend/Dockerfile.dev
FROM node:18-alpine
WORKDIR /app
# Install dependencies
COPY package*.json ./
RUN npm install
# Install development tools
RUN npm install -g nodemon
# Copy source code
COPY . .
# Generate Prisma client
RUN npx prisma generate
# Expose port
EXPOSE 3001
# Start development server with hot reloading
CMD ["npm", "run", "dev"]
Container Optimization Strategies
Image Size Optimization
# Optimized Dockerfile with multi-stage build
FROM node:18-alpine AS dependencies
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production && npm cache clean --force
FROM node:18-alpine AS build
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
FROM node:18-alpine AS runtime
# Install only runtime dependencies
RUN apk add --no-cache dumb-init
# Create user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nextjs -u 1001
WORKDIR /app
# Copy production dependencies
COPY --from=dependencies --chown=nextjs:nodejs /app/node_modules ./node_modules
# Copy built application
COPY --from=build --chown=nextjs:nodejs /app/dist ./dist
COPY --from=build --chown=nextjs:nodejs /app/package*.json ./
USER nextjs
EXPOSE 3000
# Use dumb-init for proper signal handling
ENTRYPOINT ["dumb-init", "--"]
CMD ["node", "dist/main.js"]
Security Best Practices
# Security-hardened Dockerfile
FROM node:18-alpine AS base
# Install security updates
RUN apk update && apk upgrade && apk add --no-cache dumb-init
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nextjs -u 1001
FROM base AS dependencies
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies with security audit
RUN npm ci --only=production && \
npm audit --audit-level=high && \
npm cache clean --force
FROM base AS build
WORKDIR /app
# Copy package files and install all dependencies
COPY package*.json ./
RUN npm ci
# Copy source and build
COPY . .
RUN npm run build && \
npm run test:security
FROM base AS runtime
WORKDIR /app
# Copy production files
COPY --from=dependencies --chown=nextjs:nodejs /app/node_modules ./node_modules
COPY --from=build --chown=nextjs:nodejs /app/dist ./dist
COPY --from=build --chown=nextjs:nodejs /app/package*.json ./
# Remove unnecessary files
RUN rm -rf /tmp/* /var/cache/apk/*
# Set security headers
ENV NODE_OPTIONS="--max-old-space-size=1024"
# Switch to non-root user
USER nextjs
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost:3000/health || exit 1
EXPOSE 3000
ENTRYPOINT ["dumb-init", "--"]
CMD ["node", "dist/main.js"]
Container Orchestration
Production Docker Compose
# docker-compose.prod.yml
version: '3.8'
services:
traefik:
image: traefik:v3.0
command:
- --api.dashboard=true
- --providers.docker=true
- --providers.docker.exposedbydefault=false
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --certificatesresolvers.letsencrypt.acme.tlschallenge=true
- --certificatesresolvers.letsencrypt.acme.email=admin@example.com
- --certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json
ports:
- "80:80"
- "443:443"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- letsencrypt:/letsencrypt
networks:
- gise-network
labels:
- "traefik.enable=true"
- "traefik.http.routers.dashboard.rule=Host(`traefik.example.com`)"
- "traefik.http.routers.dashboard.tls.certresolver=letsencrypt"
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
environment:
- NODE_ENV=production
- REACT_APP_API_URL=https://api.example.com
networks:
- gise-network
labels:
- "traefik.enable=true"
- "traefik.http.routers.frontend.rule=Host(`example.com`)"
- "traefik.http.routers.frontend.tls.certresolver=letsencrypt"
- "traefik.http.services.frontend.loadbalancer.server.port=3000"
deploy:
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
backend:
build:
context: ./backend
dockerfile: Dockerfile
environment:
- NODE_ENV=production
- DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/gise_prod
- REDIS_URL=redis://redis:6379
- JWT_SECRET=${JWT_SECRET}
networks:
- gise-network
depends_on:
- postgres
- redis
labels:
- "traefik.enable=true"
- "traefik.http.routers.backend.rule=Host(`api.example.com`)"
- "traefik.http.routers.backend.tls.certresolver=letsencrypt"
- "traefik.http.services.backend.loadbalancer.server.port=3001"
deploy:
replicas: 3
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
postgres:
image: postgres:15-alpine
environment:
- POSTGRES_DB=gise_prod
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/init.sql:/docker-entrypoint-initdb.d/init.sql
networks:
- gise-network
deploy:
restart_policy:
condition: on-failure
redis:
image: redis:7-alpine
volumes:
- redis_data:/data
networks:
- gise-network
deploy:
restart_policy:
condition: on-failure
monitoring:
image: prom/prometheus
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
networks:
- gise-network
labels:
- "traefik.enable=true"
- "traefik.http.routers.monitoring.rule=Host(`monitoring.example.com`)"
- "traefik.http.routers.monitoring.tls.certresolver=letsencrypt"
volumes:
postgres_data:
redis_data:
letsencrypt:
prometheus_data:
networks:
gise-network:
driver: overlay
attachable: true
CI/CD Integration
GitHub Actions for Container Builds
# .github/workflows/container-build.yml
name: Container Build and Deploy
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Run container security scan
uses: aquasecurity/trivy-action@master
with:
image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ github.sha }}
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results
uses: github/codeql-action/upload-sarif@v2
if: always()
with:
sarif_file: 'trivy-results.sarif'
integration-tests:
runs-on: ubuntu-latest
needs: build-and-test
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Start test environment
run: |
docker-compose -f docker-compose.test.yml up -d
sleep 30
- name: Run integration tests
run: |
docker-compose -f docker-compose.test.yml exec -T backend npm run test:integration
- name: Run end-to-end tests
run: |
docker-compose -f docker-compose.test.yml exec -T frontend npm run test:e2e
- name: Cleanup test environment
if: always()
run: |
docker-compose -f docker-compose.test.yml down -v
deploy:
runs-on: ubuntu-latest
needs: [build-and-test, integration-tests]
if: github.ref == 'refs/heads/main'
steps:
- name: Deploy to production
uses: appleboy/ssh-action@v1.0.0
with:
host: ${{ secrets.PRODUCTION_HOST }}
username: ${{ secrets.PRODUCTION_USER }}
key: ${{ secrets.PRODUCTION_SSH_KEY }}
script: |
cd /opt/gise
docker-compose pull
docker-compose up -d
docker system prune -f
Test Environment Configuration
# docker-compose.test.yml
version: '3.8'
services:
frontend-test:
build:
context: ./frontend
dockerfile: Dockerfile.test
environment:
- NODE_ENV=test
- REACT_APP_API_URL=http://backend-test:3001
depends_on:
- backend-test
networks:
- test-network
backend-test:
build:
context: ./backend
dockerfile: Dockerfile.test
environment:
- NODE_ENV=test
- DATABASE_URL=postgresql://postgres:password@postgres-test:5432/gise_test
- REDIS_URL=redis://redis-test:6379
depends_on:
- postgres-test
- redis-test
networks:
- test-network
postgres-test:
image: postgres:15-alpine
environment:
- POSTGRES_DB=gise_test
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
tmpfs:
- /var/lib/postgresql/data
networks:
- test-network
redis-test:
image: redis:7-alpine
tmpfs:
- /data
networks:
- test-network
networks:
test-network:
driver: bridge
Container Monitoring and Logging
Monitoring Configuration
# monitoring/docker-compose.monitoring.yml
version: '3.8'
services:
prometheus:
image: prom/prometheus
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- monitoring
grafana:
image: grafana/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
networks:
- monitoring
loki:
image: grafana/loki
volumes:
- ./loki/loki-config.yml:/etc/loki/local-config.yaml
- loki_data:/loki
command: -config.file=/etc/loki/local-config.yaml
networks:
- monitoring
promtail:
image: grafana/promtail
volumes:
- /var/log:/var/log:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- ./promtail/promtail-config.yml:/etc/promtail/config.yml
command: -config.file=/etc/promtail/config.yml
networks:
- monitoring
volumes:
prometheus_data:
grafana_data:
loki_data:
networks:
monitoring:
driver: bridge
Application Metrics
// src/monitoring/metrics.ts
import { register, Counter, Histogram, Gauge } from 'prom-client';
// HTTP request metrics
export const httpRequestDuration = new Histogram({
name: 'http_request_duration_seconds',
help: 'Duration of HTTP requests in seconds',
labelNames: ['method', 'route', 'status_code'],
buckets: [0.1, 0.3, 0.5, 0.7, 1, 3, 5, 7, 10]
});
export const httpRequestTotal = new Counter({
name: 'http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['method', 'route', 'status_code']
});
// Database metrics
export const databaseConnectionsActive = new Gauge({
name: 'database_connections_active',
help: 'Number of active database connections'
});
export const databaseQueryDuration = new Histogram({
name: 'database_query_duration_seconds',
help: 'Duration of database queries in seconds',
labelNames: ['operation', 'table'],
buckets: [0.01, 0.05, 0.1, 0.3, 0.5, 1, 3, 5]
});
// Application metrics
export const activeUsers = new Gauge({
name: 'active_users_total',
help: 'Number of active users'
});
export const taskQueueSize = new Gauge({
name: 'task_queue_size',
help: 'Number of tasks in queue',
labelNames: ['queue_name']
});
// Register all metrics
register.registerMetric(httpRequestDuration);
register.registerMetric(httpRequestTotal);
register.registerMetric(databaseConnectionsActive);
register.registerMetric(databaseQueryDuration);
register.registerMetric(activeUsers);
register.registerMetric(taskQueueSize);
// Metrics endpoint
export const getMetrics = () => register.metrics();
Container Development Workflow
Development Scripts
{
"scripts": {
"dev": "docker-compose up -d && npm run dev:watch",
"dev:build": "docker-compose build",
"dev:clean": "docker-compose down -v && docker system prune -f",
"dev:logs": "docker-compose logs -f",
"dev:shell": "docker-compose exec backend sh",
"dev:db": "docker-compose exec postgres psql -U postgres -d gise_dev",
"dev:redis": "docker-compose exec redis redis-cli",
"test:integration": "docker-compose -f docker-compose.test.yml up --abort-on-container-exit",
"test:e2e": "docker-compose -f docker-compose.test.yml run --rm frontend-test npm run test:e2e",
"prod:build": "docker-compose -f docker-compose.prod.yml build",
"prod:deploy": "docker-compose -f docker-compose.prod.yml up -d",
"prod:logs": "docker-compose -f docker-compose.prod.yml logs -f",
"container:scan": "trivy image gise-app:latest",
"container:optimize": "docker-slim build --target gise-app:latest --tag gise-app:slim"
}
}
Makefile for Container Operations
# Makefile
.PHONY: help dev build test deploy clean
help: ## Show this help message
@echo 'Usage: make [target]'
@echo ''
@echo 'Targets:'
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " %-15s %s\n", $$1, $$2}' $(MAKEFILE_LIST)
dev: ## Start development environment
docker-compose up -d
@echo "Development environment started"
@echo "Frontend: http://localhost:3000"
@echo "Backend: http://localhost:3001"
@echo "Database: localhost:5432"
build: ## Build all containers
docker-compose build --parallel
test: ## Run all tests
docker-compose -f docker-compose.test.yml up --abort-on-container-exit
docker-compose -f docker-compose.test.yml down -v
deploy: ## Deploy to production
docker-compose -f docker-compose.prod.yml pull
docker-compose -f docker-compose.prod.yml up -d
clean: ## Clean up containers and volumes
docker-compose down -v
docker system prune -f
docker volume prune -f
logs: ## Show logs
docker-compose logs -f
shell: ## Open shell in backend container
docker-compose exec backend sh
db: ## Connect to database
docker-compose exec postgres psql -U postgres -d gise_dev
redis: ## Connect to Redis
docker-compose exec redis redis-cli
scan: ## Scan containers for vulnerabilities
trivy image $$(docker-compose config --services | head -1)
optimize: ## Optimize container images
docker-slim build --target gise-app:latest --tag gise-app:slim
Best Practices and Troubleshooting
Container Best Practices
-
Use Multi-Stage Builds
- Separate build and runtime environments
- Minimize final image size
- Improve security by excluding build tools
-
Implement Health Checks
- Monitor container health
- Enable automatic restarts
- Provide meaningful health endpoints
-
Manage Secrets Securely
- Use Docker secrets or external secret management
- Never embed secrets in images
- Rotate secrets regularly
-
Optimize Layer Caching
- Order Dockerfile instructions by change frequency
- Use .dockerignore to exclude unnecessary files
- Leverage build cache for faster builds
-
Monitor Resource Usage
- Set memory and CPU limits
- Monitor container metrics
- Implement proper logging
Common Issues and Solutions
Issue: Container Startup Failures
# Debug container startup
docker-compose logs service-name
docker-compose exec service-name sh
# Check container health
docker inspect container-name | grep -A 10 Health
Issue: Port Conflicts
# Find processes using ports
lsof -i :3000
netstat -tulpn | grep :3000
# Use different ports in docker-compose.yml
ports:
- "3001:3000" # host:container
Issue: Volume Mount Problems
# Check volume mounts
docker inspect container-name | grep -A 10 Mounts
# Fix permissions
docker-compose exec service-name chown -R node:node /app
Issue: Network Connectivity