speech_recognition.py 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. import tempfile
  2. import ffmpeg
  3. import asyncio
  4. import subprocess
  5. import os
  6. SAMPLE_RATE = 16000
  7. def convert_audio(data: bytes, out_filename: str):
  8. try:
  9. with tempfile.NamedTemporaryFile("w+b") as file:
  10. file.write(data)
  11. file.flush()
  12. print(f"Converting media {file.name} to {out_filename}")
  13. out, err = (
  14. ffmpeg.input(file.name, threads=0)
  15. .output(out_filename, format="wav", acodec="pcm_s16le", ac=1, ar=SAMPLE_RATE)
  16. .overwrite_output()
  17. .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True, input=data)
  18. )
  19. if os.path.getsize(out_filename) == 0:
  20. print(str(err, "utf-8"))
  21. raise Exception("Converted file is empty")
  22. except ffmpeg.Error as e:
  23. raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
  24. return out
  25. MODELS = [
  26. "tiny",
  27. "tiny.en",
  28. "tiny-q5_1",
  29. "tiny.en-q5_1",
  30. "tiny-q8_0",
  31. "base",
  32. "base.en",
  33. "base-q5_1",
  34. "base.en-q5_1",
  35. "base-q8_0",
  36. "small",
  37. "small.en",
  38. "small.en-tdrz",
  39. "small-q5_1",
  40. "small.en-q5_1",
  41. "small-q8_0",
  42. "medium",
  43. "medium.en",
  44. "medium-q5_0",
  45. "medium.en-q5_0",
  46. "medium-q8_0",
  47. "large-v1",
  48. "large-v2",
  49. "large-v2-q5_0",
  50. "large-v2-q8_0",
  51. "large-v3",
  52. "large-v3-q5_0",
  53. "large-v3-turbo",
  54. "large-v3-turbo-q5_0",
  55. "large-v3-turbo-q8_0",
  56. ]
  57. class ASR():
  58. def __init__(self, model = "tiny", language = "en"):
  59. if model not in MODELS:
  60. raise ValueError(f"Invalid model: {model}. Must be one of {MODELS}")
  61. self.model = model
  62. self.language = language
  63. if os.path.exists(f"/app/ggml-{model}.bin"):
  64. self.model_path = f"/app"
  65. else:
  66. self.model_path = f"/data/models"
  67. if not os.path.exists(self.model_path):
  68. os.mkdir(self.model_path)
  69. self.lock = asyncio.Lock()
  70. def load_model(self):
  71. file_path = f"{self.model_path}/ggml-{self.model}.bin"
  72. if not os.path.exists(file_path) or os.path.getsize(file_path) == 0:
  73. print("Downloading model...")
  74. subprocess.run(["./download-ggml-model.sh", self.model, self.model_path], check=True)
  75. print("Done.")
  76. async def transcribe(self, audio: bytes) -> str:
  77. filename = tempfile.mktemp(suffix=".wav")
  78. convert_audio(audio, filename)
  79. async with self.lock:
  80. proc = await asyncio.create_subprocess_exec(
  81. "./whisper-cli",
  82. "-m", f"{self.model_path}/ggml-{self.model}.bin",
  83. "-l", self.language,
  84. "-f", filename,
  85. "-nt",
  86. stdout=asyncio.subprocess.PIPE,
  87. stderr=asyncio.subprocess.PIPE
  88. )
  89. stdout, stderr = await proc.communicate()
  90. os.remove(filename)
  91. if stderr:
  92. print(stderr.decode())
  93. text = stdout.decode().strip()
  94. print(text)
  95. return text