version: '3.8' services: # --- LE PORTIER --- gateway: image: nginx:latest container_name: studio-gateway-1 ports: - "80:80" volumes: - ./nginx/default.conf:/etc/nginx/conf.d/default.conf networks: - studio-net depends_on: - open-webui - audio-api # Ajouté ici : Nginx attend que l'audio soit prêt restart: always # --- L'INTERFACE --- open-webui: image: ghcr.io/open-webui/open-webui:main container_name: open-webui environment: - OLLAMA_BASE_URL=http://ollama:11434 - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-supersecretkey} volumes: - webui_data:/app/backend/data networks: - studio-net restart: always # --- LE CERVEAU --- ollama: image: ollama/ollama:latest container_name: ollama deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - ollama_data:/root/.ollama networks: - studio-net restart: always # --- LE SERVICE AUDIO --- audio-api: image: nvcr.io/nvidia/pytorch:23.10-py3 # On utilise une image robuste container_name: audio-api working_dir: /app # On monte le dossier local pour que le script server.py soit trouvé volumes: - ./services/audio-api:/app - ./models:/app/models - ./outputs:/app/outputs # L'astuce : On installe Gradio AVANT de lancer le script command: > sh -c "pip install --no-cache-dir gradio faster-whisper && python3 server.py" ports: - "7860:7860" deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] networks: - studio-net restart: always networks: studio-net: driver: bridge volumes: webui_data: ollama_data: