version: '3.8' services: # --- LE PORTIER --- gateway: image: nginx:latest container_name: studio-gateway-1 ports: - "80:80" volumes: - ./nginx/default.conf:/etc/nginx/conf.d/default.conf networks: - studio-net # On attend que l'interface ET l'audio soient en état "Healthy" depends_on: open-webui: condition: service_healthy audio-api: condition: service_started restart: always # --- L'INTERFACE (Open WebUI) --- open-webui: image: ghcr.io/open-webui/open-webui:main container_name: open-webui environment: - OLLAMA_BASE_URL=http://ollama:11434 - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-supersecretkey} volumes: - webui_data:/app/backend/data networks: - studio-net healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/health"] interval: 30s timeout: 10s retries: 3 restart: always # --- LE CERVEAU (Ollama) --- ollama: image: ollama/ollama:latest container_name: ollama deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - ollama_data:/root/.ollama networks: - studio-net restart: always # --- LE SERVICE AUDIO --- audio-api: image: nvcr.io/nvidia/pytorch:23.10-py3 container_name: audio-api working_dir: /app environment: - NVIDIA_VISIBLE_DEVICES=all volumes: - ./services/audio-api:/app - ./models:/app/models - ./outputs:/app/outputs # Correction de la commande pour s'assurer que les dossiers existent command: > sh -c "mkdir -p models outputs && pip install --no-cache-dir gradio faster-whisper && python3 server.py" ports: - "7860:7860" deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] networks: - studio-net restart: always networks: studio-net: driver: bridge volumes: webui_data: ollama_data: