erreur server.py gradio > fast api
This commit is contained in:
@ -1,19 +1,35 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
from fastapi import FastAPI, UploadFile, File, HTTPException
|
||||
from faster_whisper import WhisperModel
|
||||
import io
|
||||
|
||||
def generate_music(prompt, duration):
|
||||
print(f"🎵 Génération demandée : {prompt} pour {duration} secondes")
|
||||
# Simule un temps de calcul
|
||||
time.sleep(2)
|
||||
return "Dummy audio generated !"
|
||||
app = FastAPI()
|
||||
|
||||
# Interface pour Open WebUI
|
||||
demo = gr.Interface(
|
||||
fn=generate_music,
|
||||
inputs=["text", "number"],
|
||||
outputs="text",
|
||||
title="ACE-Step Audio API"
|
||||
)
|
||||
# Chargement du modèle sur ton GPU RTX 4060 Ti
|
||||
# On utilise "cuda" et "float16" pour la vitesse maximale
|
||||
model = WhisperModel("base", device="cuda", compute_type="float16")
|
||||
|
||||
@app.get("/v1/models")
|
||||
async def get_models():
|
||||
# Indispensable pour qu'Open WebUI voie le modèle dans la liste
|
||||
return {"data": [{"id": "whisper-1"}]}
|
||||
|
||||
@app.post("/v1/audio/transcriptions")
|
||||
async def transcribe(file: UploadFile = File(...)):
|
||||
try:
|
||||
# On lit le fichier envoyé par le micro
|
||||
audio_data = await file.read()
|
||||
audio_file = io.BytesIO(audio_data)
|
||||
|
||||
# Transcription ultra-rapide avec ton GPU
|
||||
segments, _ = model.transcribe(audio_file, beam_size=5)
|
||||
text = " ".join([segment.text for segment in segments])
|
||||
|
||||
return {"text": text}
|
||||
except Exception as e:
|
||||
print(f"Erreur transcription: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(server_name="0.0.0.0", server_port=7860)
|
||||
import uvicorn
|
||||
# On lance sur le port 7860 comme prévu dans ton Docker-compose
|
||||
uvicorn.run(app, host="0.0.0.0", port=7860)
|
||||
Reference in New Issue
Block a user