diff --git a/README.md b/README.md index 17b8b42..59110ac 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,32 @@ const App = () => { ## Language Model +### Model Options + +Choose model quantization and NPU acceleration with Pro models. + +```typescript +import { CactusLM } from 'cactus-react-native'; + +// Use int4 for faster performance and smaller file size +const cactusLM = new CactusLM({ + model: 'lfm2-vl-450m', + options: { + quantization: 'int4', // 'int4' or 'int8' + pro: false + } +}); + +// Use pro models for NPU acceleration +const cactusPro = new CactusLM({ + model: 'lfm2-vl-450m', + options: { + quantization: 'int4', + pro: true + } +}); +``` + ### Completion Generate text responses from the model by providing a conversation history. @@ -559,6 +585,60 @@ const App = () => { }; ``` +### Streaming Transcription + +Transcribe audio in real-time with incremental results. + +#### Class + +```typescript +import { CactusSTT } from 'cactus-react-native'; + +const cactusSTT = new CactusSTT({ model: 'whisper-small' }); + +await cactusSTT.streamTranscribeInit(); + +const audioChunk: number[] = [/* PCM samples */]; +await cactusSTT.streamTranscribeInsert({ audio: audioChunk }); + +const result = await cactusSTT.streamTranscribeProcess({ + options: { confirmationThreshold: 0.95 } +}); + +console.log('Confirmed:', result.confirmed); +console.log('Pending:', result.pending); + +const final = await cactusSTT.streamTranscribeFinalize(); +await cactusSTT.streamTranscribeDestroy(); +``` + +#### Hook + +```tsx +import { useCactusSTT } from 'cactus-react-native'; + +const App = () => { + const cactusSTT = useCactusSTT({ model: 'whisper-small' }); + + const handleStream = async () => { + await cactusSTT.streamTranscribeInit(); + + const audioChunk: number[] = [/* PCM samples */]; + await cactusSTT.streamTranscribeInsert({ audio: audioChunk }); + + await cactusSTT.streamTranscribeProcess(); + }; + + return ( + <> +