mirror of
https://github.com/SamyRai/turash.git
synced 2025-12-26 23:01:33 +00:00
- Remove nested git repository from bugulma/frontend/.git - Add all frontend files to main repository tracking - Convert from separate frontend/backend repos to unified monorepo - Preserve all frontend code and development history as tracked files - Eliminate nested repository complexity for simpler development workflow This creates a proper monorepo structure with frontend and backend coexisting in the same repository for easier development and deployment.
141 lines
4.1 KiB
TypeScript
141 lines
4.1 KiB
TypeScript
import { useCallback, useEffect, useRef, useState } from 'react';
|
|
import { z } from 'zod';
|
|
|
|
// Speech Recognition Event Schemas
|
|
const SpeechRecognitionAlternativeSchema = z.object({
|
|
transcript: z.string(),
|
|
confidence: z.number(),
|
|
});
|
|
|
|
const SpeechRecognitionResultItemSchema = z.object({
|
|
0: SpeechRecognitionAlternativeSchema,
|
|
isFinal: z.boolean(),
|
|
length: z.number().optional(),
|
|
});
|
|
|
|
const SpeechRecognitionResultListSchema = z.array(SpeechRecognitionResultItemSchema);
|
|
|
|
const SpeechRecognitionEventSchema = z.object({
|
|
resultIndex: z.number(),
|
|
results: SpeechRecognitionResultListSchema,
|
|
});
|
|
|
|
const SpeechRecognitionErrorEventSchema = z.object({
|
|
error: z.enum([
|
|
'no-speech',
|
|
'aborted',
|
|
'audio-capture',
|
|
'network',
|
|
'not-allowed',
|
|
'service-not-allowed',
|
|
'bad-grammar',
|
|
'language-not-supported',
|
|
]),
|
|
message: z.string().optional(),
|
|
});
|
|
|
|
// FIX: Define SpeechRecognition interface and add constructors to the window object to fix type errors.
|
|
// The Web Speech API is not part of standard TypeScript typings.
|
|
interface SpeechRecognition extends EventTarget {
|
|
continuous: boolean;
|
|
interimResults: boolean;
|
|
lang: string;
|
|
onresult: (event: z.infer<typeof SpeechRecognitionEventSchema>) => void;
|
|
onerror: (event: z.infer<typeof SpeechRecognitionErrorEventSchema>) => void;
|
|
onend: () => void;
|
|
start: () => void;
|
|
stop: () => void;
|
|
}
|
|
|
|
declare global {
|
|
interface Window {
|
|
SpeechRecognition: { new (): SpeechRecognition };
|
|
webkitSpeechRecognition: { new (): SpeechRecognition };
|
|
}
|
|
}
|
|
|
|
export const useSpeechRecognition = (lang = 'ru-RU') => {
|
|
const [isListening, setIsListening] = useState(false);
|
|
const [transcript, setTranscript] = useState('');
|
|
const recognitionRef = useRef<SpeechRecognition | null>(null);
|
|
|
|
useEffect(() => {
|
|
if (typeof window === 'undefined') return;
|
|
|
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
if (!SpeechRecognition) {
|
|
console.warn('Speech Recognition is not supported by this browser.');
|
|
return;
|
|
}
|
|
|
|
const recognition = new SpeechRecognition();
|
|
recognition.continuous = true;
|
|
recognition.interimResults = true;
|
|
recognition.lang = lang;
|
|
|
|
recognition.onresult = (event) => {
|
|
try {
|
|
const validatedEvent = SpeechRecognitionEventSchema.parse(event);
|
|
let finalTranscript = '';
|
|
for (let i = validatedEvent.resultIndex; i < validatedEvent.results.length; ++i) {
|
|
const transcriptPart = validatedEvent.results[i][0].transcript;
|
|
if (validatedEvent.results[i].isFinal) {
|
|
finalTranscript += transcriptPart;
|
|
}
|
|
}
|
|
setTranscript(finalTranscript);
|
|
} catch (error) {
|
|
console.error('Speech recognition event validation failed:', error);
|
|
}
|
|
};
|
|
|
|
recognition.onerror = (event) => {
|
|
try {
|
|
const validatedEvent = SpeechRecognitionErrorEventSchema.parse(event);
|
|
console.error('Speech recognition error:', validatedEvent.error);
|
|
setIsListening(false);
|
|
} catch (error) {
|
|
console.error('Speech recognition error event validation failed:', error);
|
|
setIsListening(false);
|
|
}
|
|
};
|
|
|
|
recognition.onend = () => {
|
|
if (recognitionRef.current) {
|
|
setIsListening(false);
|
|
}
|
|
};
|
|
|
|
recognitionRef.current = recognition;
|
|
|
|
return () => {
|
|
if (recognitionRef.current) {
|
|
recognitionRef.current.stop();
|
|
}
|
|
recognitionRef.current = null;
|
|
};
|
|
}, [lang]);
|
|
|
|
const startListening = useCallback(() => {
|
|
if (recognitionRef.current && !isListening) {
|
|
setTranscript('');
|
|
recognitionRef.current.start();
|
|
setIsListening(true);
|
|
}
|
|
}, [isListening]);
|
|
|
|
const stopListening = useCallback(() => {
|
|
if (recognitionRef.current && isListening) {
|
|
recognitionRef.current.stop();
|
|
setIsListening(false);
|
|
}
|
|
}, [isListening]);
|
|
|
|
const isSupported = !!(
|
|
typeof window !== 'undefined' &&
|
|
(window.SpeechRecognition || window.webkitSpeechRecognition)
|
|
);
|
|
|
|
return { isListening, transcript, startListening, stopListening, isSupported };
|
|
};
|