code cleanup, fully functional backend.

This commit is contained in:
Khyretos 2023-12-25 07:49:32 +01:00
parent d5de27ca8b
commit 643d01b23d
11 changed files with 76 additions and 93 deletions

View file

@ -1,42 +0,0 @@
{
"env": {
"development": {
"application/javascript": {
"presets": [
[
"env",
{
"targets": {
"electron": "8.2"
}
}
],
"react"
],
"plugins": [
"transform-async-to-generator"
],
"sourceMaps": "inline"
}
},
"production": {
"application/javascript": {
"presets": [
[
"env",
{
"targets": {
"electron": "8.2"
}
}
],
"react"
],
"plugins": [
"transform-async-to-generator"
],
"sourceMaps": "none"
}
}
}
}

View file

@ -1,25 +0,0 @@
{
"env": {
"browser": true,
"es2021": true
},
"extends": [
"airbnb-base"
],
"parserOptions": {
"ecmaVersion": "latest",
"sourceType": "module"
},
"rules": {
"linebreak-style":"off",
"indent":["error", "tab"],
"no-tabs":"off",
"prefer-destructuring": ["error", {
"AssignmentExpression": {
"array": false,
"object": true
}
}],
"no-console": ["error", { "allow": ["warn", "error"] }]
}
}

7
.gitignore vendored
View file

@ -100,5 +100,8 @@ language_detection_model/*
!language_detection_model/Where to get language detection model.txt
.vscode/
package-lock.json
src/backend/loquendoBot_backend.exe
src/sounds/tts/*
src/sounds/tts/*
loquendoBot_backend.spec
forge.config.js
backend/*
src/backend/loquendoBot_backend.exe

View file

@ -2,7 +2,7 @@ module.exports = {
packagerConfig: {
icon: './src/images/icon.ico',
asar: true,
extraResource: ['./src/config/loquendo.db', './src/sounds', './backend', './speech_to_text_models'],
extraResource: ['./src/config/loquendo.db', './src/sounds', './backend', './language_detection_model', './speech_to_text_models'],
},
rebuildConfig: {},
makers: [

View file

@ -8,7 +8,6 @@
"package": "npm run backend && electron-forge package",
"make": "electron-forge make",
"publish": "electron-forge publish",
"lint": "echo \"No linting configured\"",
"backend": "pyinstaller --noconsole --onefile --collect-all vosk --distpath ./backend ./src/backend/loquendoBot_backend.py"
},
"keywords": [],

View file

@ -33,8 +33,8 @@ settings = configparser.ConfigParser()
app = Flask(__name__)
settingsPath = ""
environment = ""
settingsPath = os.path.normpath(sys.argv[1])
environment = sys.argv[2]
q = queue.Queue()
@ -45,10 +45,23 @@ q = queue.Queue()
class LanguageDetection:
def __init__(self):
pretrained_lang_model = (
r"C:\repos\LoquendoBotV2\language_detection_model\lid.176.bin"
if environment == "dev":
settings_folder = os.path.dirname(settingsPath)
src_folder = os.path.dirname(settings_folder)
main_folder = os.path.dirname(src_folder)
language_detection_model = os.path.join(
main_folder, "language_detection_model", f"lid.176.bin"
)
else:
resources_folder = os.path.dirname(settingsPath)
language_detection_model = os.path.join(
resources_folder, "language_detection_model", f"lid.176.bin"
)
language_detection_model = (
rf"{language_detection_model}"
)
self.model = fasttext.load_model(pretrained_lang_model)
self.model = fasttext.load_model(language_detection_model)
def predict_lang(self, text):
predictions = self.model.predict(text, k=5) # returns top 2 matching languages
@ -58,17 +71,31 @@ class LanguageDetection:
return language_codes
class STT:
samplerate = None
args = ""
remaining = ""
def __init__(self):
device_info = sd.query_devices(0, "input")
self.samplerate = int(device_info["default_samplerate"])
settings.read(settingsPath)
device_info = sd.query_devices(int(settings["STT"]["MICROPHONE"]), "input")
self.samplerate = int(device_info["default_samplerate"])
if environment == "dev":
settings_folder = os.path.dirname(settingsPath)
src_folder = os.path.dirname(settings_folder)
main_folder = os.path.dirname(src_folder)
vosk_model = os.path.join(
main_folder, "speech_to_text_models", settings["STT"]["LANGUAGE"]
)
else:
resources_folder = os.path.dirname(settingsPath)
vosk_model = os.path.join(
resources_folder, "speech_to_text_models", settings["STT"]["LANGUAGE"]
)
self.model = Model(
r"C:\repos\LoquendoBotV2\speech_to_text_models\vosk-model-small-es-0.42"
rf"{vosk_model}"
)
self.dump_fn = None
@ -234,7 +261,7 @@ def get_voices():
if __name__ == "__main__":
LANGUAGE = LanguageDetection()
lang = LANGUAGE.predict_lang("hola")
lang = LANGUAGE.predict_lang("hola cómo estás")
print(lang)
text = "Keep it up. You are awesome"
translated = MyMemoryTranslator(
@ -242,8 +269,6 @@ if __name__ == "__main__":
).translate(text)
print(translated)
if len(sys.argv) > 1:
environment = sys.argv[2]
settingsPath = os.path.normpath(sys.argv[1])
settings.read(settingsPath)
port = int(settings["GENERAL"]["PORT"])
else:

View file

@ -55,7 +55,7 @@ async function getBackendServerStatus() {
}
}
function getSTT() {
function startSTT() {
const eventSource = new EventSource('http://127.0.0.1:9000/stream');
eventSource.addEventListener('message', (event) => {
@ -134,7 +134,9 @@ async function initiateBackend() {
createBackendServer().then(() => {
getBackendServerStatus();
getInstalledVoices();
getSTT();
if (settings.STT.USE_STT) {
startSTT();
}
});
} catch (error) {
console.error('Error during backend initialization:', error);

View file

@ -18,7 +18,17 @@ function getAvailableMediaDevices(type) {
// Microphones
getAvailableMediaDevices('audioinput')
.then((microphones) => {
microphones.forEach((mic, i) => {
let i = 0;
let tempname = '';
for (let mic of microphones) {
if (mic.deviceId === 'default') {
tempname = mic.label.slice(10); // remove "default -" from the label to get the default device name.
}
if (mic.deviceId === 'communications' || mic.label === tempname) {
continue;
}
const option = document.createElement('option');
// Set the options value and text.
@ -31,7 +41,8 @@ getAvailableMediaDevices('audioinput')
if (i === microphones.length - 1) {
document.getElementById('microphone').value = settings.STT.SELECTED_MICROPHONE;
}
});
i++;
}
})
.catch((error) => {
console.error('Error retrieving microphones:', error);

View file

@ -3,7 +3,6 @@ const ini = require('ini');
const path = require('path'); // get directory path
const { ipcRenderer, shell } = require('electron'); // necessary electron libraries to send data to the app
const say = require('say');
const io = require('socket.io-client');
const util = require('util');
@ -101,20 +100,23 @@ fs.readdir(notificationSounds, (err, files) => {
// Check for installed stt models
fs.readdir(sttModels, (err, files) => {
files.forEach((file, i) => {
for (let file of files) {
if (file.includes('.txt')) {
continue;
}
// Create a new option element.
const option = document.createElement('option');
// Set the options value and text.
option.value = i;
option.value = file;
option.innerHTML = file;
// Add the option to the sound selector.
sttModel.appendChild(option);
});
}
// set the saved notification sound
sttModel.selectedIndex = settings.AUDIO.NOTIFICATION_SOUND;
sttModel.value = settings.STT.LANGUAGE;
});
async function getAudioDevices() {

View file

@ -80,13 +80,20 @@ document.body.querySelector('#primaryVoice').addEventListener('change', () => {
document.body.querySelector('#microphone').addEventListener('change', () => {
var select = document.querySelector('#microphone');
settings.STT.MICROPHONE = select.selectedIndex;
settings.STT.MICROPHONE = select.value;
settings.STT.MICROPHONE_ID = select.options[select.selectedIndex].text;
fs.writeFileSync(settingsPath, ini.stringify(settings));
createNotification('Saved microphone!', 'success');
startVoiceRecognition();
});
document.body.querySelector('#sttModel').addEventListener('change', () => {
var select = document.querySelector('#sttModel');
settings.STT.LANGUAGE = select.value;
fs.writeFileSync(settingsPath, ini.stringify(settings));
createNotification('Saved voice detection language!', 'success');
});
document.body.querySelector('#defaultLanguage').addEventListener('change', () => {
var select = document.querySelector('#defaultLanguage');
settings.TTS.PRIMARY_TTS_LANGUAGE_INDEX = select.selectedIndex;

View file

@ -150,6 +150,7 @@ async function createIniFile() {
MICROPHONE_ID: 'default',
SELECTED_MICROPHONE: 'default',
MICROPHONE: 5,
LANGUAGE: 'vosk-model-small-es-0.42',
},
AUDIO: {
USE_NOTIFICATION_SOUNDS: true,