services: # --- LE PORTIER --- gateway: image: nginx:latest container_name: studio-gateway-1 ports: - "80:80" volumes: - ./nginx/default.conf:/etc/nginx/conf.d/default.conf networks: - studio-net depends_on: open-webui: condition: service_healthy audio-api: condition: service_started restart: always # --- L'INTERFACE (Open WebUI) --- open-webui: image: ghcr.io/open-webui/open-webui:main container_name: open-webui environment: - OLLAMA_BASE_URL=http://ollama:11434 - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-supersecretkey} - WEBUI_URL=http://93.108.34.236 - ENABLE_WEBSOCKETS=True volumes: - webui_data:/app/backend/data networks: - studio-net healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/health"] interval: 30s timeout: 10s retries: 3 restart: always # --- LE CERVEAU (Ollama) --- ollama: image: ollama/ollama:latest container_name: ollama deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - ollama_data:/root/.ollama networks: - studio-net restart: always # --- LE SERVICE AUDIO --- audio-api: build: context: ./services/audio-api dockerfile: Dockerfile container_name: audio-api # Force Python à sortir les logs immédiatement environment: - NVIDIA_VISIBLE_DEVICES=all - PYTHONUNBUFFERED=1 # On utilise le chemin absolu dans le container pour être sûr entrypoint: ["python3.11", "/app/server.py"] volumes: - ./services/audio-api:/app - ./models:/app/models - ./outputs:/app/outputs deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] networks: - studio-net restart: always networks: studio-net: driver: bridge volumes: webui_data: ollama_data: