version: '3.8' services: # --- LE PORTIER (Point d'entrée pour tes 20 étudiants) --- gateway: image: nginx:latest ports: - "80:80" volumes: - ./nginx/default.conf:/etc/nginx/conf.d/default.conf networks: - studio-net depends_on: - open-webui # --- L'INTERFACE (Open WebUI) --- open-webui: image: ghcr.io/open-webui/open-webui:main container_name: open-webui # On ne publie plus le port 3000 vers l'extérieur, Nginx s'en charge en interne environment: - OLLAMA_BASE_URL=http://ollama:11434 - WEBUI_SECRET_KEY=${WEBUI_SECRET_KEY:-supersecretkey} volumes: - webui_data:/app/backend/data networks: - studio-net restart: always # --- LE CERVEAU (Ollama) --- ollama: image: ollama/ollama:latest container_name: ollama deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - ollama_data:/root/.ollama networks: - studio-net # --- LE SERVICE AUDIO (Ton API) --- audio-api: build: context: ./services/audio-api container_name: audio-api # On laisse le port 7860 ouvert au cas où tu voudrais tester en direct ports: - "7860:7860" deploy: resources: limits: memory: 32G # 64 Go de VRAM permet d'être large sur la RAM système aussi reservations: devices: - driver: nvidia count: all capabilities: [gpu] volumes: - ./models:/app/models - ./outputs:/app/outputs networks: - studio-net networks: studio-net: driver: bridge volumes: webui_data: ollama_data: