import { useCallback, useEffect, useRef, useState } from 'react'; import { z } from 'zod'; // Speech Recognition Event Schemas const SpeechRecognitionAlternativeSchema = z.object({ transcript: z.string(), confidence: z.number(), }); const SpeechRecognitionResultItemSchema = z.object({ 0: SpeechRecognitionAlternativeSchema, isFinal: z.boolean(), length: z.number().optional(), }); const SpeechRecognitionResultListSchema = z.array(SpeechRecognitionResultItemSchema); const SpeechRecognitionEventSchema = z.object({ resultIndex: z.number(), results: SpeechRecognitionResultListSchema, }); const SpeechRecognitionErrorEventSchema = z.object({ error: z.enum([ 'no-speech', 'aborted', 'audio-capture', 'network', 'not-allowed', 'service-not-allowed', 'bad-grammar', 'language-not-supported', ]), message: z.string().optional(), }); // FIX: Define SpeechRecognition interface and add constructors to the window object to fix type errors. // The Web Speech API is not part of standard TypeScript typings. interface SpeechRecognition extends EventTarget { continuous: boolean; interimResults: boolean; lang: string; onresult: (event: z.infer) => void; onerror: (event: z.infer) => void; onend: () => void; start: () => void; stop: () => void; } declare global { interface Window { SpeechRecognition: { new (): SpeechRecognition }; webkitSpeechRecognition: { new (): SpeechRecognition }; } } export const useSpeechRecognition = (lang = 'ru-RU') => { const [isListening, setIsListening] = useState(false); const [transcript, setTranscript] = useState(''); const recognitionRef = useRef(null); useEffect(() => { if (typeof window === 'undefined') return; const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (!SpeechRecognition) { console.warn('Speech Recognition is not supported by this browser.'); return; } const recognition = new SpeechRecognition(); recognition.continuous = true; recognition.interimResults = true; recognition.lang = lang; recognition.onresult = (event) => { try { const validatedEvent = SpeechRecognitionEventSchema.parse(event); let finalTranscript = ''; for (let i = validatedEvent.resultIndex; i < validatedEvent.results.length; ++i) { const transcriptPart = validatedEvent.results[i][0].transcript; if (validatedEvent.results[i].isFinal) { finalTranscript += transcriptPart; } } setTranscript(finalTranscript); } catch (error) { console.error('Speech recognition event validation failed:', error); } }; recognition.onerror = (event) => { try { const validatedEvent = SpeechRecognitionErrorEventSchema.parse(event); console.error('Speech recognition error:', validatedEvent.error); setIsListening(false); } catch (error) { console.error('Speech recognition error event validation failed:', error); setIsListening(false); } }; recognition.onend = () => { if (recognitionRef.current) { setIsListening(false); } }; recognitionRef.current = recognition; return () => { if (recognitionRef.current) { recognitionRef.current.stop(); } recognitionRef.current = null; }; }, [lang]); const startListening = useCallback(() => { if (recognitionRef.current && !isListening) { setTranscript(''); recognitionRef.current.start(); setIsListening(true); } }, [isListening]); const stopListening = useCallback(() => { if (recognitionRef.current && isListening) { recognitionRef.current.stop(); setIsListening(false); } }, [isListening]); const isSupported = !!( typeof window !== 'undefined' && (window.SpeechRecognition || window.webkitSpeechRecognition) ); return { isListening, transcript, startListening, stopListening, isSupported }; };