diff --git a/docker-compose.yml b/docker-compose.yml index d63d011..b936d98 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,12 +11,15 @@ services: - ./nginx/default.conf:/etc/nginx/conf.d/default.conf networks: - studio-net + # On attend que l'interface ET l'audio soient en état "Healthy" depends_on: - - open-webui - - audio-api # Ajouté ici : Nginx attend que l'audio soit prêt + open-webui: + condition: service_healthy + audio-api: + condition: service_started restart: always - # --- L'INTERFACE --- + # --- L'INTERFACE (Open WebUI) --- open-webui: image: ghcr.io/open-webui/open-webui:main container_name: open-webui @@ -27,9 +30,14 @@ services: - webui_data:/app/backend/data networks: - studio-net + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 restart: always - # --- LE CERVEAU --- + # --- LE CERVEAU (Ollama) --- ollama: image: ollama/ollama:latest container_name: ollama @@ -48,17 +56,20 @@ services: # --- LE SERVICE AUDIO --- audio-api: - image: nvcr.io/nvidia/pytorch:23.10-py3 # On utilise une image robuste + image: nvcr.io/nvidia/pytorch:23.10-py3 container_name: audio-api working_dir: /app - # On monte le dossier local pour que le script server.py soit trouvé + environment: + - NVIDIA_VISIBLE_DEVICES=all volumes: - ./services/audio-api:/app - ./models:/app/models - ./outputs:/app/outputs - # L'astuce : On installe Gradio AVANT de lancer le script + # Correction de la commande pour s'assurer que les dossiers existent command: > - sh -c "pip install --no-cache-dir gradio faster-whisper && python3 server.py" + sh -c "mkdir -p models outputs && + pip install --no-cache-dir gradio faster-whisper && + python3 server.py" ports: - "7860:7860" deploy: