Tech Guide // Containerization Protocol
Containerize. Deploy. Dominate. // The complete reference.
docker run -d -p 8080:80 --name web nginx
docker stop web
docker start web
docker restart web
docker rm -f web
docker kill web
docker pause / unpause web
docker ps -a # list all
docker logs -f --tail 100 web
docker exec -it web /bin/bash
docker inspect web
docker stats
docker top web
docker cp web:/path /host/path
docker build -t myapp:1.0 .
docker images
docker pull nginx:latest
docker push user/myapp:1.0
docker tag myapp:1.0 user/myapp:latest
docker rmi myapp:1.0
docker save -o img.tar / load -i img.tar
docker compose up -d --build
docker compose down -v
docker compose logs -f web
docker compose exec web /bin/bash
docker compose ps
docker compose build --no-cache
docker compose restart web
docker network create mynet
docker network ls
docker network inspect mynet
docker network connect mynet web
docker network disconnect mynet web
docker network rm mynet
docker network prune
docker system df # disk usage
docker system prune -a --volumes
docker container prune
docker image prune -a
docker volume prune
docker network prune
01
Create, start, stop, and destroy containers. The fundamental operations of containerized deployment in the megacity.
// create and start a container
# Basic syntax $ docker run [OPTIONS] IMAGE [COMMAND] [ARG...] # Key flags -d # Detached mode (background) -it # Interactive terminal (stdin + TTY) --rm # Auto-remove when container exits -p 8080:80 # Port mapping (host:container) -v /host:/container # Volume mount --name myapp # Assign custom name --network mynet # Connect to network -e "KEY=value" # Set environment variable --restart unless-stopped # Restart policy # Combined example $ docker run -d \ --name web \ --restart unless-stopped \ -p 8080:80 \ -v /data:/app/data \ -e NODE_ENV=production \ --network backend \ nginx:latest
// start, stop, kill, remove, pause
$ docker start CONTAINER # Start stopped container $ docker stop CONTAINER # Graceful stop (SIGTERM, then SIGKILL) $ docker restart CONTAINER # Restart container $ docker kill CONTAINER # Force stop (SIGKILL) $ docker rm CONTAINER # Remove stopped container $ docker rm -f CONTAINER # Force remove running container $ docker pause CONTAINER # Pause all processes $ docker unpause CONTAINER # Unpause container
02
Peer into running containers. Monitor processes, read logs, execute commands, and inspect metadata from the outside.
# List containers $ docker ps # Running containers $ docker ps -a # All containers (including stopped) $ docker ps -q # Only container IDs $ docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" # Logs $ docker logs CONTAINER # View logs $ docker logs -f CONTAINER # Follow (tail -f) $ docker logs --tail 100 CONTAINER # Last 100 lines $ docker logs --since 10m CONTAINER # Last 10 minutes $ docker logs --details CONTAINER # Show extra details # Container details $ docker inspect CONTAINER # Full metadata (JSON) $ docker top CONTAINER # Running processes $ docker stats # Live resource usage (all) $ docker stats CONTAINER # Stats for specific container $ docker diff CONTAINER # Filesystem changes since creation # Interact with running containers $ docker exec CONTAINER COMMAND # Run command (new process) $ docker exec -it CONTAINER /bin/bash # Interactive shell $ docker attach CONTAINER # Attach to main process $ docker cp CONTAINER:/path /host # Copy from container $ docker cp /host CONTAINER:/path # Copy to container
03
Build, tag, push, pull, and manage container images. The blueprints of your containerized infrastructure.
# List and search $ docker images # List local images $ docker images -a # All (including intermediate) $ docker images -q # Only image IDs $ docker search nginx # Search Docker Hub # Pull and push $ docker pull IMAGE[:TAG] # Pull from registry $ docker push USER/IMAGE[:TAG] # Push to registry # Build $ docker build -t NAME:TAG . # Build from Dockerfile $ docker build -f Dockerfile.dev . # Specific Dockerfile $ docker build --build-arg K=v . # Build-time variables $ docker build --no-cache . # No cache $ docker build --target stage . # Build specific stage # Tag and remove $ docker tag SRC[:TAG] DST[:TAG] # Create tag for image $ docker rmi IMAGE # Remove image $ docker rmi -f IMAGE # Force remove $ docker history IMAGE # Show layer history # Import / Export $ docker save -o image.tar IMAGE # Save image to tar $ docker load -i image.tar # Load image from tar $ docker export CONTAINER > cont.tar # Export container FS $ docker import cont.tar IMAGE:TAG # Import as image
04
The blueprint for building container images. Every instruction creates a layer in the image filesystem. Optimize order for caching.
# Base image -- every Dockerfile starts here FROM node:18-alpine # Set working directory WORKDIR /app # Copy files (preferred over ADD for simple copies) COPY package*.json ./ COPY . . # ADD: like COPY but extracts tars and supports URLs ADD archive.tar.gz /app/ # Run commands during build (each RUN = new layer) RUN npm install RUN apt-get update && apt-get install -y curl \ && rm -rf /var/lib/apt/lists/* # Environment variables (persist in container) ENV NODE_ENV=production ENV PORT=3000 # Build-time variables (only during build) ARG VERSION=1.0 ARG BUILD_DATE # Expose ports (documentation, doesn't publish) EXPOSE 3000 # Mount point for volumes VOLUME /data # Switch to non-root user (security) USER node # Metadata labels LABEL version="1.0" description="My application" # Health check HEALTHCHECK --interval=30s --timeout=3s --retries=3 \ CMD curl -f http://localhost:3000/health || exit 1 # ENTRYPOINT: main executable (hard to override) ENTRYPOINT ["node"] # CMD: default args to ENTRYPOINT (easily overridden) CMD ["server.js"]
// separate build and runtime for smaller images
# ---- Build stage ---- FROM node:18 AS builder WORKDIR /app COPY package*.json ./ RUN npm install COPY . . RUN npm run build # ---- Production stage ---- FROM node:18-alpine AS production WORKDIR /app COPY --from=builder /app/dist ./dist COPY --from=builder /app/node_modules ./node_modules USER node EXPOSE 3000 CMD ["node", "dist/server.js"]
05
Define and run multi-container applications with a single YAML file. Orchestrate your services like a conductor in the digital symphony.
$ docker compose up # Create and start $ docker compose up -d # Detached mode $ docker compose up --build # Rebuild images first $ docker compose down # Stop and remove $ docker compose down -v # Also remove volumes $ docker compose down --rmi all # Also remove images $ docker compose build # Build/rebuild services $ docker compose build --no-cache # No cache $ docker compose ps # List containers $ docker compose logs -f SERVICE # Follow service logs $ docker compose exec SERVICE CMD # Execute in service $ docker compose restart # Restart services $ docker compose pull # Pull service images # Profiles (conditional services) $ docker compose --profile debug up $ docker compose --profile debug --profile test up
// modern compose files no longer require version: field
services: web: build: context: . dockerfile: Dockerfile args: - VERSION=1.0 image: nginx:latest container_name: my-web ports: - "8080:80" environment: - NODE_ENV=production env_file: - .env volumes: - ./data:/app/data - node_modules:/app/node_modules networks: - frontend - backend depends_on: db: condition: service_healthy restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:3000/health"] interval: 30s timeout: 10s retries: 3 start_period: 40s deploy: resources: limits: cpus: '0.5' memory: 512M profiles: - debug db: image: postgres:15 environment: POSTGRES_PASSWORD: secret volumes: - db-data:/var/lib/postgresql/data healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 10s timeout: 5s retries: 5 volumes: db-data: driver: local node_modules: networks: frontend: driver: bridge backend: driver: bridge
06
Connect containers across networks, configure drivers, and manage inter-container communication in the digital grid.
$ docker network create NETWORK $ docker network create --driver bridge my-network $ docker network create --driver overlay --attachable swarm-net $ docker network create --driver bridge --subnet 172.20.0.0/16 --gateway 172.20.0.1 my-bridge $ docker network ls # List networks $ docker network inspect NETWORK # Detailed info $ docker network rm NETWORK # Remove network $ docker network prune # Remove unused $ docker network connect NETWORK CONTAINER # Connect container $ docker network disconnect NETWORK CONTAINER # Disconnect
| Driver | Description | Use Case |
|---|---|---|
bridge |
Default. Isolated network on single host with automatic DNS between containers. | Most single-host and dev environments |
host |
No isolation. Container uses host network directly. Better performance, reduced security. | Max performance or direct host access |
overlay |
Distributed network across Docker hosts. Multi-daemon communication for Swarm. | Multi-host and Docker Swarm |
macvlan |
Assigns MAC address. Container appears as physical device on network. | Legacy apps needing direct network connection |
none |
Complete network isolation from host and other containers. | Security-sensitive workloads |
07
Persist data beyond container lifecycles. Volumes, bind mounts, and tmpfs -- choose your storage weapon wisely.
$ docker volume create VOLUME # Create named volume $ docker volume ls # List volumes $ docker volume inspect VOLUME # Detailed info $ docker volume rm VOLUME # Remove volume $ docker volume prune # Remove all unused # Use volume in container $ docker run -v my-data:/app/data nginx # Bind mount (host path) $ docker run -v $(pwd):/app node # tmpfs mount (memory only) $ docker run --tmpfs /app/cache nginx $ docker run --tmpfs /app/cache:size=100M nginx
| Type | Managed By | Location | Use Case | Persists |
|---|---|---|---|---|
Volumes |
Docker | /var/lib/docker/volumes/ | Production data, sharing between containers | Yes |
Bind Mounts |
User | Anywhere on host | Development, sharing host files, config | Yes (host) |
tmpfs |
Docker | Memory only | Temp data, caches, secrets | No (RAM) |
08
Authenticate to registries, push and pull images. Secure your supply chain with proper credential management.
# Login to Docker Hub (device code flow) $ docker login # Login with username (prompts for password) $ docker login -u username # Secure login for scripts (stdin) $ echo $DOCKER_PASSWORD | docker login -u username --password-stdin # Login to custom registry $ docker login registry.example.com # Logout $ docker logout $ docker logout registry.example.com # Push / Pull $ docker tag myapp:latest username/myapp:latest $ docker push username/myapp:latest $ docker pull username/myapp:latest
# AWS ECR $ aws ecr get-login-password --region us-east-1 | \ docker login --username AWS --password-stdin \ 123456789.dkr.ecr.us-east-1.amazonaws.com # Google Cloud (Artifact Registry) $ gcloud auth print-access-token | \ docker login -u oauth2accesstoken --password-stdin https://gcr.io # Azure Container Registry $ az acr login --name myregistry
09
Monitor disk usage and purge unused resources. Keep your system lean and mean in the neon wasteland.
# System information $ docker system df # Show disk usage $ docker system info # System-wide info $ docker system events # Real-time events # Nuclear cleanup options $ docker system prune # Stopped containers + dangling images + unused nets $ docker system prune -f # Force (skip confirmation) $ docker system prune -a # + ALL unused images $ docker system prune --volumes # + unused volumes $ docker system prune -a --volumes # EVERYTHING unused # Targeted cleanup $ docker container prune # Stopped containers $ docker image prune # Dangling images $ docker image prune -a # All unused images $ docker volume prune # Unused volumes $ docker network prune # Unused networks
| Command | Removes |
|---|---|
docker system prune |
Stopped containers, dangling images, unused networks, build cache |
docker system prune -a |
Above + ALL unused images (not just dangling) |
docker system prune --volumes |
Above + unused volumes |
docker system prune -a --volumes |
Everything unused -- the nuclear option |
10
Next-gen build engine with parallel execution, better caching, build secrets, and cross-platform image construction.
# Enable for single build $ DOCKER_BUILDKIT=1 docker build . # Enable globally $ export DOCKER_BUILDKIT=1 # Or in /etc/docker/daemon.json { "features": { "buildkit": true } }
# 1. QEMU Emulation (easiest) $ docker run --privileged --rm tonistiigi/binfmt --install all $ docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 . # 2. Build and push multi-platform image $ docker buildx create --name mybuilder --use $ docker buildx build --platform linux/amd64,linux/arm64 \ -t user/image:latest --push .
# Use platform-aware build variables FROM --platform=$BUILDPLATFORM golang:1.22 AS builder ARG TARGETPLATFORM ARG TARGETARCH ARG TARGETOS WORKDIR /app COPY . . RUN GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o app FROM alpine COPY --from=builder /app/app /app CMD ["/app"]
# syntax=docker/dockerfile:1 # Base stage FROM node:18 AS base WORKDIR /app COPY package*.json ./ # Development FROM base AS development RUN npm install COPY . . CMD ["npm", "run", "dev"] # Build FROM base AS builder RUN npm ci --only=production COPY . . RUN npm run build # Test FROM builder AS test RUN npm ci RUN npm test # Production FROM node:18-alpine AS production WORKDIR /app COPY --from=builder /app/dist ./dist COPY --from=builder /app/node_modules ./node_modules USER node HEALTHCHECK --interval=30s CMD node healthcheck.js CMD ["node", "dist/server.js"]
$ docker build --target development -t myapp:dev . $ docker build --target production -t myapp:prod . $ docker build --target test . # Run tests only
11
Battle-tested strategies for building smaller images, faster builds, tighter security, and effective debugging in the container wasteland.
// exclude files from build context
node_modules npm-debug.log .git .env .env.local *.md .dockerignore Dockerfile .vscode .idea *.swp *.swo .DS_Store dist build coverage .pytest_cache __pycache__ *.pyc
// order matters: put frequently-changing files last
FROM node:18 WORKDIR /app COPY . . RUN npm install CMD ["npm", "start"]
FROM node:18 WORKDIR /app COPY package*.json ./ RUN npm install COPY . . CMD ["npm", "start"]
RUN apt-get update RUN apt-get install -y curl RUN apt-get install -y vim
RUN apt-get update \
&& apt-get install -y \
curl vim \
&& rm -rf /var/lib/apt/lists/*
Use alpine, scratch, or gcr.io/distroless/static instead of full OS images. Smaller surface area = fewer vulnerabilities.
Create a dedicated user with RUN addgroup && adduser and switch with USER. Never run production containers as root.
Use RUN --mount=type=secret with BuildKit to securely pass tokens during build without baking them into image layers.
Scan with docker scout cves IMAGE or trivy image IMAGE. Integrate scanning into CI/CD pipelines.
FROM node:latest
FROM node:18.19.0-alpine3.18
ENV API_KEY=secret123
$ docker run -e API_KEY=$API_KEY myapp
# No HEALTHCHECK defined
HEALTHCHECK --interval=30s \
CMD curl -f http://localhost/health || exit 1
# View container processes $ docker top CONTAINER # Interactive shell $ docker exec -it CONTAINER /bin/sh # Logs with timestamps $ docker logs -t CONTAINER # Follow logs from specific time $ docker logs --since 2h -f CONTAINER # Inspect config with jq $ docker inspect CONTAINER | jq '.[0].Config' # Filesystem changes $ docker diff CONTAINER # Resource usage $ docker stats CONTAINER # Resource limits $ docker run -d --cpus="1.5" --memory="512m" nginx
services: web: build: context: . target: development volumes: - ./src:/app/src # Mount source for hot reload - /app/node_modules # Don't overwrite installed modules environment: - NODE_ENV=development ports: - "3000:3000"
| Priority | Source | Example |
|---|---|---|
| 1 (Highest) | docker run -e |
docker run -e VAR=value |
| 2 | compose.yml environment | environment: - VAR=value |
| 3 | .env file (Compose) | env_file: - .env |
| 4 | Dockerfile ENV | ENV VAR=value |
| 5 (Lowest) | Base image defaults | Inherited from parent image |