speech_recognition.py 1.1 KB

12345678910111213141516171819202122232425262728293031323334353637
  1. import ffmpeg
  2. import subprocess
  3. import tempfile
  4. import numpy as np
  5. SAMPLE_RATE = 16000
  6. def convert_audio(data: bytes) -> bytes:
  7. try:
  8. # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
  9. # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
  10. out, _ = (
  11. ffmpeg.input("pipe:", threads=0)
  12. .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=SAMPLE_RATE)
  13. .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True, input=data)
  14. )
  15. except ffmpeg.Error as e:
  16. raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
  17. return out
  18. class ASR():
  19. def __init__(self, model = "tiny"):
  20. self.model = model
  21. def transcribe(self, audio: bytes) -> str:
  22. audio = convert_audio(audio)
  23. with tempfile.NamedTemporaryFile("w+b") as file:
  24. file.write(audio)
  25. file.flush()
  26. stdout, stderr = subprocess.Popen(
  27. ["./whisper", "-m", f"models/ggml-{self.model}.bin", "-f", file.name],
  28. stdout=subprocess.PIPE
  29. ).communicate()
  30. if stderr:
  31. print(stderr.decode())
  32. return stdout.decode()