Home

Awesome

useWhisper

React Hook for OpenAI Whisper API with speech recorder, real-time transcription and silence removal built-in


https://user-images.githubusercontent.com/2707253/224465747-0b1ee159-21dd-4cd0-af9d-6fc9b882d716.mp4


Repository: https://github.com/chengsokdara/use-whisper-native

Progress: https://github.com/chengsokdara/use-whisper-native/issues/1

npm i @chengsokdara/use-whisper
yarn add @chengsokdara/use-whisper
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const {
    recording,
    speaking,
    transcribing,
    transcript,
    pauseRecording,
    startRecording,
    stopRecording,
  } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
  })

  return (
    <div>
      <p>Recording: {recording}</p>
      <p>Speaking: {speaking}</p>
      <p>Transcribing: {transcribing}</p>
      <p>Transcribed Text: {transcript.text}</p>
      <button onClick={() => startRecording()}>Start</button>
      <button onClick={() => pauseRecording()}>Pause</button>
      <button onClick={() => stopRecording()}>Stop</button>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  /**
   * you have more control like this
   * do whatever you want with the recorded speech
   * send it to your own custom server
   * and return the response back to useWhisper
   */
  const onTranscribe = (blob: Blob) => {
    const base64 = await new Promise<string | ArrayBuffer | null>(
      (resolve) => {
        const reader = new FileReader()
        reader.onloadend = () => resolve(reader.result)
        reader.readAsDataURL(blob)
      }
    )
    const body = JSON.stringify({ file: base64, model: 'whisper-1' })
    const headers = { 'Content-Type': 'application/json' }
    const { default: axios } = await import('axios')
    const response = await axios.post('/api/whisper', body, {
      headers,
    })
    const { text } = await response.data
    // you must return result from your server in Transcript format
    return {
      blob,
      text,
    }
  }

  const { transcript } = useWhisper({
    // callback to handle transcription with custom server
    onTranscribe,
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const { transcript } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
    streaming: true,
    timeSlice: 1_000, // 1 second
    whisperConfig: {
      language: 'en',
    },
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const { transcript } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
    // use ffmpeg-wasp to remove silence from recorded speech
    removeSilence: true,
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const { transcript } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
    // will auto start recording speech upon component mounted
    autoStart: true,
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const { transcript } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
    nonStop: true, // keep recording as long as the user is speaking
    stopTimeout: 5000, // auto stop after 5 seconds
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}
import { useWhisper } from '@chengsokdara/use-whisper'

const App = () => {
  const { transcript } = useWhisper({
    apiKey: process.env.OPENAI_API_TOKEN, // YOUR_OPEN_AI_TOKEN
    autoTranscribe: true,
    whisperConfig: {
      prompt: 'previous conversation', // you can pass previous conversation for context
      response_format: 'text', // output text instead of json
      temperature: 0.8, // random output
      language: 'es', // Spanish
    },
  })

  return (
    <div>
      <p>{transcript.text}</p>
    </div>
  )
}

most of these dependecies are lazy loaded, so it is only imported when it is needed

NameTypeDefault ValueDescription
apiKeystring''your OpenAI API token
autoStartbooleanfalseauto start speech recording on component mount
autoTranscribebooleantrueshould auto transcribe after stop recording
modestringtranscriptionscontrol Whisper mode either transcriptions or translations, currently only support translation to English
nonStopbooleanfalseif true, record will auto stop after stopTimeout. However if user keep on speaking, the recorder will keep recording
removeSilencebooleanfalseremove silence before sending file to OpenAI API
stopTimeoutnumber5,000 msif nonStop is true, this become required. This control when the recorder auto stop
streamingbooleanfalsetranscribe speech in real-time based on timeSlice
timeSlicenumber1000 msinterval between each onDataAvailable event
whisperConfigWhisperApiConfigundefinedWhisper API transcription config
onDataAvailable(blob: Blob) => voidundefinedcallback function for getting recorded blob in interval between timeSlice
onTranscribe(blob: Blob) => Promise<Transcript>undefinedcallback function to handle transcription on your own custom server
NameTypeDefault ValueDescription
promptstringundefinedAn optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
response_formatstringjsonThe format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
temperaturenumber0The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
languagestringenThe language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.
NameTypeDescription
recordingbooleanspeech recording state
speakingbooleandetect when user is speaking
transcribingbooleanwhile removing silence from speech and send request to OpenAI Whisper API
transcriptTranscriptobject return after Whisper transcription complete
pauseRecordingPromisepause speech recording
startRecordingPromisestart speech recording
stopRecordingPromisestop speech recording
NameTypeDescription
blobBlobrecorded speech in JavaScript Blob
textstringtranscribed text returned from Whisper API

Contact me for web or mobile app development using React or React Native
https://chengsokdara.github.io