Files
filezzy-staging/docker/docker-compose.coolify-merged.yml
2026-02-04 20:04:41 +01:00

527 lines
15 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
# =============================================================================
# Coolify: single-file staging stack (no include:)
# =============================================================================
# Use this as "Docker Compose Location" in Coolify: docker/docker-compose.coolify-merged.yml
# Base Directory: /. Build contexts (backend, worker, frontend) are relative to Base Directory.
# Set all env vars in Coolify Environment Variables.
#
# Built-in for Coolify:
# - Traefik labels on frontend and api-gateway: public URL and /api routing + HTTPS (letsencrypt).
# Set TRAEFIK_PUBLIC_HOST in Coolify env to change domain (default app.getlinkzen.com).
# - prisma-init service: runs db push + seed once after Postgres is ready; api-gateway waits for it.
# So you do NOT need to add domains in Coolify UI for frontend/api or run Prisma manually.
# If Traefik routing or HTTPS does not work, Coolify may use different entry point names
# (e.g. web/websecure); then use docs/coolify-proxy-app-getlinkzen.yaml as fallback.
#
# MEMORY: This stack requests ~14 GB of memory limits. On 4 GB you swap heavily (very slow).
# On 8 GB you can still swap when many services are active; if Coolify or the app feel slow,
# reduce limits so total is ~67 GB or check with: free -h; docker stats (see COOLIFY-SETUP.md §8b).
# =============================================================================
services:
postgres:
image: postgres:16-alpine
container_name: toolsplatform-postgres
restart: unless-stopped
environment:
POSTGRES_DB: ${DB_NAME}
POSTGRES_USER: ${DB_USER}
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
networks:
- backend
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
deploy:
resources:
limits:
memory: 768M
reservations:
memory: 256M
redis:
image: redis:7-alpine
container_name: toolsplatform-redis
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 400mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
networks:
- backend
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 5s
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
minio:
image: minio/minio:latest
container_name: toolsplatform-minio
restart: unless-stopped
command: server /data --console-address ":9001"
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY}
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY}
volumes:
- minio_data:/data
networks:
- backend
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
start_period: 10s
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
keycloak:
image: quay.io/keycloak/keycloak:latest
container_name: toolsplatform-keycloak
restart: unless-stopped
command: start-dev
ports:
- "8180:8080"
environment:
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN}
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
KC_DB: postgres
KC_DB_URL: jdbc:postgresql://postgres:5432/${DB_NAME}
KC_DB_USERNAME: ${DB_USER}
KC_DB_PASSWORD: ${DB_PASSWORD}
KC_HOSTNAME: ${KEYCLOAK_PUBLIC_URL:-https://auth.getlinkzen.com}
KC_HOSTNAME_ADMIN: ${KEYCLOAK_PUBLIC_URL:-https://auth.getlinkzen.com}
KC_HOSTNAME_URL: ${KEYCLOAK_PUBLIC_URL:-https://auth.getlinkzen.com}
KC_HOSTNAME_STRICT: "false"
KC_PROXY: edge
KC_PROXY_HEADERS: xforwarded
KC_HTTP_ENABLED: "true"
volumes:
- keycloak_data:/opt/keycloak/data
networks:
- backend
- frontend
depends_on:
postgres:
condition: service_healthy
deploy:
resources:
limits:
memory: 1G
reservations:
memory: 512M
stirling-pdf:
image: stirlingtools/stirling-pdf:latest-fat
container_name: toolsplatform-stirling
restart: unless-stopped
ports:
- "8090:8080"
environment:
SECURITY_ENABLELOGIN: "false"
SECURITY_CUSTOMGLOBALAPIKEY: "dev-api-key-change-in-production"
INSTALL_BOOK_AND_ADVANCED_HTML_OPS: "true"
LANGS: "en_GB,fr_FR,ar_AR"
SYSTEM_MAXFILESIZE: "500"
SPRING_SERVLET_MULTIPART_MAX_FILE_SIZE: "500MB"
SPRING_SERVLET_MULTIPART_MAX_REQUEST_SIZE: "500MB"
volumes:
- stirling_data:/usr/share/tessdata
- stirling_configs:/configs
networks:
- processing
deploy:
resources:
limits:
memory: 1536M
reservations:
memory: 512M
imagor:
image: shumc/imagor:latest
container_name: toolsplatform-imagor
restart: unless-stopped
ports:
- "8082:8000"
environment:
IMAGOR_UNSAFE: "1"
UPLOAD_LOADER_ENABLE: "1"
UPLOAD_LOADER_MAX_ALLOWED_SIZE: "67108864"
IMAGOR_AUTO_WEBP: "1"
IMAGOR_RESULT_STORAGE_PATH: "/tmp/imagor"
volumes:
- imagor_data:/tmp/imagor
networks:
- processing
- backend
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
rembg:
image: danielgatis/rembg
container_name: toolsplatform-rembg
restart: unless-stopped
ports:
- "5000:7000"
entrypoint: ["/bin/sh", "-c"]
command:
- |
set -e
echo "Pre-downloading rembg models to /root/.u2net ..."
rembg d u2net u2netp u2net_human_seg isnet-general-use
echo "Starting rembg server on port 7000 ..."
exec rembg s --host 0.0.0.0 --port 7000
volumes:
- rembg_models:/root/.u2net
networks:
- processing
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:7000/ || exit 1"]
interval: 15s
timeout: 5s
retries: 5
start_period: 600s
deploy:
resources:
limits:
memory: 1280M
reservations:
memory: 512M
languagetool:
image: erikvl87/languagetool
container_name: toolsplatform-languagetool
restart: unless-stopped
ports:
- "8010:8010"
environment:
Java_Xms: "512m"
Java_Xmx: "2g"
networks:
- processing
deploy:
resources:
limits:
memory: 2048M
reservations:
memory: 512M
# One-time DB schema + seed (runs after Postgres is healthy; api-gateway waits for it)
prisma-init:
build:
context: backend
dockerfile: Dockerfile.prod
container_name: toolsplatform-prisma-init
restart: "no"
env_file:
- .env
environment:
DATABASE_URL: postgresql://postgres:${DB_PASSWORD:-postgres}@postgres:5432/${DB_NAME:-toolsplatform}?schema=app
command: ["sh", "-c", "set -e; echo '==> prisma db push'; npx prisma db push --accept-data-loss; echo '==> prisma db seed'; npx prisma db seed; echo '==> prisma-init done'"]
depends_on:
postgres:
condition: service_healthy
networks:
- backend
api-gateway:
build:
context: backend
dockerfile: Dockerfile.prod
container_name: toolsplatform-api-gateway
restart: unless-stopped
# Do not depend on prisma-init success so deploy succeeds even if seed fails; run seed manually once if needed (see COOLIFY-SETUP.md §6.1).
depends_on:
postgres: { condition: service_started }
redis: { condition: service_started }
minio: { condition: service_started }
keycloak: { condition: service_started }
env_file:
- .env
environment:
NODE_ENV: production
PRISMA_QUERY_ENGINE_LIBRARY: /app/node_modules/.prisma/client/libquery_engine-linux-musl-openssl-3.0.x.so.node
API_PORT: "4000"
API_HOST: 0.0.0.0
DATABASE_URL: postgresql://postgres:${DB_PASSWORD:-postgres}@postgres:5432/${DB_NAME:-toolsplatform}?schema=app
REDIS_HOST: redis
REDIS_PORT: "6379"
MINIO_ENDPOINT: minio
MINIO_PORT: "9000"
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
MINIO_BUCKET: uploads
MINIO_USE_SSL: "false"
KEYCLOAK_URL: http://keycloak:8080
KEYCLOAK_PUBLIC_URL: ${KEYCLOAK_PUBLIC_URL:-http://localhost:8180}
KEYCLOAK_REALM: ${KEYCLOAK_REALM:-toolsplatform}
KEYCLOAK_ISSUER_URI: ${KEYCLOAK_ISSUER_URI:-http://localhost:8180/realms/toolsplatform}
KEYCLOAK_CLIENT_ID: api-gateway
KEYCLOAK_CLIENT_SECRET: ${KEYCLOAK_CLIENT_SECRET}
ADMIN_ROLE: platform-admin
ADMIN_DASHBOARD_ENABLED: "true"
STIRLING_PDF_URL: http://stirling-pdf:8080
IMAGOR_URL: http://imagor:8000
REMBG_URL: http://rembg:7000
LANGUAGETOOL_URL: http://languagetool:8010
networks:
- backend
- frontend
- processing
ports:
- "4000:4000"
deploy:
labels:
# Public URL + /api -> this service (priority so /api wins over frontend)
- "traefik.enable=true"
- "traefik.http.routers.tools-api.rule=Host(\"${TRAEFIK_PUBLIC_HOST:-app.getlinkzen.com}\") && PathPrefix(\"/api\")"
- "traefik.http.routers.tools-api.priority=10"
- "traefik.http.routers.tools-api.entrypoints=http,https"
- "traefik.http.routers.tools-api.service=tools-api-svc"
- "traefik.http.routers.tools-api.tls.certresolver=letsencrypt"
- "traefik.http.services.tools-api-svc.loadbalancer.server.port=4000"
resources:
limits:
memory: 1536M
reservations:
memory: 256M
worker:
build:
context: worker
dockerfile: Dockerfile.prod
container_name: toolsplatform-worker
restart: unless-stopped
env_file:
- .env
environment:
NODE_ENV: production
DATABASE_URL: postgresql://postgres:${DB_PASSWORD:-postgres}@postgres:5432/${DB_NAME:-toolsplatform}?schema=app
REDIS_HOST: redis
REDIS_PORT: "6379"
MINIO_ENDPOINT: minio
MINIO_PORT: "9000"
MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
MINIO_BUCKET: uploads
MINIO_USE_SSL: "false"
REMBG_URL: http://rembg:7000
IMAGOR_URL: http://imagor:8000
STIRLING_PDF_URL: http://stirling-pdf:8080
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_started
minio:
condition: service_started
rembg:
condition: service_healthy
networks:
- backend
- processing
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
frontend:
build:
context: frontend
dockerfile: Dockerfile.prod
args:
NEXT_PUBLIC_API_BASE_URL: ${NEXT_PUBLIC_API_BASE_URL:-}
NEXT_PUBLIC_API_URL: ${NEXT_PUBLIC_API_URL:-}
NEXT_PUBLIC_KEYCLOAK_URL: ${NEXT_PUBLIC_KEYCLOAK_URL:-}
container_name: toolsplatform-frontend
restart: unless-stopped
env_file:
- .env
environment:
PORT: "3000"
HOSTNAME: 0.0.0.0
networks:
- frontend
ports:
- "${FRONTEND_PORT:-3000}:3000"
deploy:
labels:
- "traefik.enable=true"
- "traefik.http.routers.tools-frontend.rule=Host(\"${TRAEFIK_PUBLIC_HOST:-app.getlinkzen.com}\")"
- "traefik.http.routers.tools-frontend.entrypoints=http,https"
- "traefik.http.routers.tools-frontend.service=tools-frontend-svc"
- "traefik.http.routers.tools-frontend.tls.certresolver=letsencrypt"
- "traefik.http.services.tools-frontend-svc.loadbalancer.server.port=3000"
resources:
limits:
memory: 512M
reservations:
memory: 256M
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- prometheus_data:/prometheus
command:
- --config.file=/etc/prometheus/prometheus.yml
- --storage.tsdb.path=/prometheus
- --storage.tsdb.retention.time=15d
networks:
- backend
restart: unless-stopped
ports:
- "9090:9090"
deploy:
resources:
limits:
memory: 1G
reservations:
memory: 256M
grafana:
image: grafana/grafana:latest
container_name: grafana
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
GF_USERS_ALLOW_SIGN_UP: "false"
GF_SERVER_HTTP_PORT: "3000"
volumes:
- grafana_data:/var/lib/grafana
- ../config/grafana/provisioning:/etc/grafana/provisioning:ro
networks:
- frontend
- backend
restart: unless-stopped
ports:
- "3002:3000"
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
loki:
image: grafana/loki:latest
container_name: loki
volumes:
- loki_data:/loki
command: -config.file=/etc/loki/local-config.yaml
networks:
- backend
restart: unless-stopped
deploy:
resources:
limits:
memory: 768M
reservations:
memory: 256M
# Promtail disabled under Coolify: bind mount ../config/promtail is not resolved correctly.
# To re-enable, use Coolify Persistent Storage for config or fix Base Directory so config/ exists.
# promtail:
# image: grafana/promtail:latest
# container_name: promtail
# volumes:
# - /var/log:/var/log:ro
# - /var/run/docker.sock:/var/run/docker.sock:ro
# - ../config/promtail:/etc/promtail:ro
# command: -config.file=/etc/promtail/promtail-config.yml
# networks:
# - backend
# restart: unless-stopped
backup:
image: offen/docker-volume-backup:latest
container_name: backup
environment:
BACKUP_CRON_EXPRESSION: "0 3 * * *"
BACKUP_RETENTION_DAYS: "7"
BACKUP_FILENAME: "backup-%Y-%m-%d.tar.gz"
volumes:
- postgres_data:/backup/postgres:ro
- minio_data:/backup/minio:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
- backup_archive:/archive
networks:
- backend
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 64M
# Use existing networks (Coolify/redeploy: they already exist from a previous run).
# First deploy on a new server: create them once with e.g.
# docker network create toolsplatform-backend
# docker network create toolsplatform-frontend
# docker network create toolsplatform-processing
networks:
backend:
name: toolsplatform-backend
external: true
frontend:
name: toolsplatform-frontend
external: true
processing:
name: toolsplatform-processing
external: true
volumes:
postgres_data:
name: toolsplatform-postgres-data
redis_data:
name: toolsplatform-redis-data
minio_data:
name: toolsplatform-minio-data
keycloak_data:
name: toolsplatform-keycloak-data
stirling_data:
name: toolsplatform-stirling-data
stirling_configs:
name: toolsplatform-stirling-configs
imagor_data:
name: toolsplatform-imagor-data
rembg_models:
name: toolsplatform-rembg-models
prometheus_data:
name: toolsplatform-prometheus-data
grafana_data:
name: toolsplatform-grafana-data
loki_data:
name: toolsplatform-loki-data
backup_archive:
name: toolsplatform-backup-archive