mirror of
https://github.com/hexastack/hexabot
synced 2025-04-10 15:55:55 +00:00
feat: add llm helper
This commit is contained in:
parent
d19565aae0
commit
648912fcc7
14
api/package-lock.json
generated
14
api/package-lock.json
generated
@ -43,6 +43,7 @@
|
||||
"nestjs-dynamic-providers": "^0.3.4",
|
||||
"nestjs-i18n": "^10.4.0",
|
||||
"nodemailer": "^6.9.13",
|
||||
"ollama": "^0.5.9",
|
||||
"papaparse": "^5.4.1",
|
||||
"passport": "^0.6.0",
|
||||
"passport-anonymous": "^1.0.1",
|
||||
@ -14879,6 +14880,14 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/ollama": {
|
||||
"version": "0.5.9",
|
||||
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.9.tgz",
|
||||
"integrity": "sha512-F/KZuDRC+ZsVCuMvcOYuQ6zj42/idzCkkuknGyyGVmNStMZ/sU3jQpvhnl4SyC0+zBzLiKNZJnJeuPFuieWZvQ==",
|
||||
"dependencies": {
|
||||
"whatwg-fetch": "^3.6.20"
|
||||
}
|
||||
},
|
||||
"node_modules/on-finished": {
|
||||
"version": "2.4.1",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
|
||||
@ -19059,6 +19068,11 @@
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/whatwg-fetch": {
|
||||
"version": "3.6.20",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
|
||||
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg=="
|
||||
},
|
||||
"node_modules/whatwg-mimetype": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",
|
||||
|
@ -64,6 +64,7 @@
|
||||
"nestjs-dynamic-providers": "^0.3.4",
|
||||
"nestjs-i18n": "^10.4.0",
|
||||
"nodemailer": "^6.9.13",
|
||||
"ollama": "^0.5.9",
|
||||
"papaparse": "^5.4.1",
|
||||
"passport": "^0.6.0",
|
||||
"passport-anonymous": "^1.0.1",
|
||||
|
23
api/src/extensions/helpers/ollama/i18n/en/help.json
Normal file
23
api/src/extensions/helpers/ollama/i18n/en/help.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"api_url": "URL of the Ollama server.",
|
||||
"model": "Determines which model to run. You need to ensure to pull the model in Ollama to be able to use it.",
|
||||
"keep_alive": "Time to keep the model in memory.",
|
||||
"max_messages_ctx": "Number of messages to include in the context.",
|
||||
"context": "Provide context to the assistant (e.g., You are an AI assistant).",
|
||||
"instructions": "Instructions to give to the assistant.",
|
||||
"fallback_message": "Message to return in case there is an API error.",
|
||||
"mirostat": "Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)",
|
||||
"mirostat_eta": "Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)",
|
||||
"mirostat_tau": "Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)",
|
||||
"num_ctx": "Sets the size of the context window used to generate the next token. (Default: 2048)",
|
||||
"repeat_last_n": "Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
|
||||
"repeat_penalty": "Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)",
|
||||
"temperature": "The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)",
|
||||
"seed": "Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0)",
|
||||
"stop": "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile.",
|
||||
"tfs_z": "Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)",
|
||||
"num_predict": "Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context)",
|
||||
"top_k": "Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)",
|
||||
"top_p": "Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)",
|
||||
"min_p": "Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter *p* represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with *p*=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)"
|
||||
}
|
23
api/src/extensions/helpers/ollama/i18n/en/label.json
Normal file
23
api/src/extensions/helpers/ollama/i18n/en/label.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"api_url": "API URL",
|
||||
"model": "Model",
|
||||
"keep_alive": "Keep Alive",
|
||||
"max_messages_ctx": "Max Context Messages",
|
||||
"context": "Context",
|
||||
"instructions": "Instructions",
|
||||
"fallback_message": "Fallback Message",
|
||||
"mirostat": "Mirostat",
|
||||
"mirostat_eta": "Mirostat Eta",
|
||||
"mirostat_tau": "Mirostat Tau",
|
||||
"num_ctx": "Context Window Size",
|
||||
"repeat_last_n": "Repeat Last N",
|
||||
"repeat_penalty": "Repeat Penalty",
|
||||
"temperature": "Temperature",
|
||||
"seed": "Seed",
|
||||
"stop": "Stop",
|
||||
"tfs_z": "TFS Z",
|
||||
"num_predict": "Maximum number of tokens",
|
||||
"top_k": "Top K",
|
||||
"top_p": "Top P",
|
||||
"min_p": "Min P"
|
||||
}
|
3
api/src/extensions/helpers/ollama/i18n/en/title.json
Normal file
3
api/src/extensions/helpers/ollama/i18n/en/title.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"ollama": "Ollama"
|
||||
}
|
23
api/src/extensions/helpers/ollama/i18n/fr/help.json
Normal file
23
api/src/extensions/helpers/ollama/i18n/fr/help.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"api_url": "Adresse URL du serveur Ollama.",
|
||||
"model": "Détermine le modèle à utiliser. Assurez-vous de charger le modèle sur Ollama pour pouvoir l'utiliser.",
|
||||
"keep_alive": "Temps pendant lequel le modèle reste en mémoire.",
|
||||
"max_messages_ctx": "Nombre maximum de messages à inclure dans le contexte.",
|
||||
"context": "Fournit un contexte à l'assistant (par exemple : Vous êtes un assistant IA).",
|
||||
"instructions": "Instructions à donner à l'assistant.",
|
||||
"fallback_message": "Message à retourner en cas d'erreur API.",
|
||||
"mirostat": "Active le prélèvement de Mirostat pour contrôler la perplexité. (par défaut : 0, 0 = désactivé, 1 = Mirostat, 2 = Mirostat 2.0)",
|
||||
"mirostat_eta": "Influence la rapidité de réaction de l'algorithme aux retours du texte généré. Un taux d'apprentissage plus bas entraînera des ajustements plus lents, tandis qu'un taux plus élevé rendra l'algorithme plus réactif. (Par défaut : 0.1)",
|
||||
"mirostat_tau": "Contrôle l'équilibre entre la cohérence et la diversité de la sortie. Une valeur plus basse résulte en un texte plus concentré et cohérent. (Par défaut : 5.0)",
|
||||
"num_ctx": "Définit la taille de la fenêtre de contexte utilisée pour générer le prochain jeton. (Par défaut : 2048)",
|
||||
"repeat_last_n": "Définit jusqu'où le modèle doit regarder en arrière pour éviter la répétition. (Par défaut : 64, 0 = désactivé, -1 = num_ctx)",
|
||||
"repeat_penalty": "Définit la force de la pénalité pour les répétitions. Une valeur plus élevée (par exemple, 1.5) pénalisera plus fortement les répétitions, tandis qu'une valeur plus basse (par exemple, 0.9) sera plus clémente. (Par défaut : 1.1)",
|
||||
"temperature": "La température du modèle. Augmenter la température rendra le modèle plus créatif. (Par défaut : 0.8)",
|
||||
"seed": "Définit la graine de nombre aléatoire à utiliser pour la génération. Fixer ce numéro permettra au modèle de générer le même texte pour la même invite. (Par défaut : 0)",
|
||||
"stop": "Définit les séquences d'arrêt à utiliser. Lorsque ce motif est rencontré, le modèle cessera de générer du texte et retournera. Plusieurs motifs d'arrêt peuvent être définis en spécifiant plusieurs paramètres `stop` séparés dans un fichier de modèle.",
|
||||
"tfs_z": "L'échantillonnage sans queue est utilisé pour réduire l'impact des jetons moins probables dans la sortie. Une valeur plus élevée (par exemple, 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)",
|
||||
"num_predict": "Nombre maximum de jetons à prédire lors de la génération de texte. (Par défaut : 128, -1 = génération infinie, -2 = remplir le contexte)",
|
||||
"top_k": "Réduit la probabilité de générer des non-sens. Une valeur plus élevée (par exemple, 100) donnera des réponses plus diverses, tandis qu'une valeur plus basse (par exemple, 10) sera plus conservatrice. (Par défaut : 40)",
|
||||
"top_p": "Fonctionne conjointement avec top-k. Une valeur plus élevée (par exemple, 0.95) conduira à un texte plus diversifié, tandis qu'une valeur plus basse (par exemple, 0.5) générera un texte plus concentré et conservateur. (Par défaut : 0.9)",
|
||||
"min_p": "Alternative au top_p, et vise à assurer un équilibre entre la qualité et la variété. Le paramètre *p* représente la probabilité minimum pour qu'un jeton soit considéré, par rapport à la probabilité du jeton le plus probable. Par exemple, avec *p* = 0.05 et le jeton le plus probable ayant une probabilité de 0.9, les logits d'une valeur inférieure à 0.045 sont filtrés. (Par défaut : 0.0)"
|
||||
}
|
23
api/src/extensions/helpers/ollama/i18n/fr/label.json
Normal file
23
api/src/extensions/helpers/ollama/i18n/fr/label.json
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"api_url": "URL de l'API",
|
||||
"model": "Modèle",
|
||||
"keep_alive": "Maintien en Vie",
|
||||
"max_messages_ctx": "Nombre Maximum de Messages",
|
||||
"context": "Contexte",
|
||||
"instructions": "Instructions",
|
||||
"fallback_message": "Message de Secours",
|
||||
"mirostat": "Mirostat",
|
||||
"mirostat_eta": "Mirostat Eta",
|
||||
"mirostat_tau": "Mirostat Tau",
|
||||
"num_ctx": "Num Ctx",
|
||||
"repeat_last_n": "Répéter Dernier N",
|
||||
"repeat_penalty": "Pénalité de Répétition",
|
||||
"temperature": "Température",
|
||||
"seed": "Graine",
|
||||
"stop": "Arrêt",
|
||||
"tfs_z": "TFS Z",
|
||||
"num_predict": "Nombre de Tokens",
|
||||
"top_k": "Top K",
|
||||
"top_p": "Top P",
|
||||
"min_p": "Min P"
|
||||
}
|
3
api/src/extensions/helpers/ollama/i18n/fr/title.json
Normal file
3
api/src/extensions/helpers/ollama/i18n/fr/title.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"ollama": "Ollama"
|
||||
}
|
14
api/src/extensions/helpers/ollama/index.d.ts
vendored
Normal file
14
api/src/extensions/helpers/ollama/index.d.ts
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
import { OLLAMA_HELPER_GROUP, OLLAMA_HELPER_SETTINGS } from './settings';
|
||||
|
||||
declare global {
|
||||
interface Settings extends SettingTree<typeof OLLAMA_HELPER_SETTINGS> {}
|
||||
}
|
||||
|
||||
declare module '@nestjs/event-emitter' {
|
||||
interface IHookExtensionsOperationMap {
|
||||
[OLLAMA_HELPER_GROUP]: TDefinition<
|
||||
object,
|
||||
SettingMapByType<typeof OLLAMA_HELPER_SETTINGS>
|
||||
>;
|
||||
}
|
||||
}
|
127
api/src/extensions/helpers/ollama/index.helper.ts
Normal file
127
api/src/extensions/helpers/ollama/index.helper.ts
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright © 2024 Hexastack. All rights reserved.
|
||||
*
|
||||
* Licensed under the GNU Affero General Public License v3.0 (AGPLv3) with the following additional terms:
|
||||
* 1. The name "Hexabot" is a trademark of Hexastack. You may not use this name in derivative works without express written permission.
|
||||
* 2. All derivative works must include clear attribution to the original creator and software, Hexastack and Hexabot, in a prominent location (e.g., in the software's "About" section, documentation, and README file).
|
||||
*/
|
||||
|
||||
import { Injectable, OnApplicationBootstrap } from '@nestjs/common';
|
||||
import { OnEvent } from '@nestjs/event-emitter';
|
||||
import { Ollama } from 'ollama';
|
||||
|
||||
import { AnyMessage } from '@/chat/schemas/types/message';
|
||||
import { HelperService } from '@/helper/helper.service';
|
||||
import BaseLlmHelper from '@/helper/lib/base-llm-helper';
|
||||
import { LoggerService } from '@/logger/logger.service';
|
||||
import { Setting } from '@/setting/schemas/setting.schema';
|
||||
import { SettingService } from '@/setting/services/setting.service';
|
||||
|
||||
import { OLLAMA_HELPER_NAME, OLLAMA_HELPER_SETTINGS } from './settings';
|
||||
|
||||
@Injectable()
|
||||
export default class OllamaLlmHelper
|
||||
extends BaseLlmHelper<typeof OLLAMA_HELPER_NAME>
|
||||
implements OnApplicationBootstrap
|
||||
{
|
||||
private client: Ollama;
|
||||
|
||||
/**
|
||||
* Instantiate the LLM helper
|
||||
*
|
||||
* @param logger - Logger service
|
||||
*/
|
||||
constructor(
|
||||
settingService: SettingService,
|
||||
helperService: HelperService,
|
||||
protected readonly logger: LoggerService,
|
||||
) {
|
||||
super(
|
||||
OLLAMA_HELPER_NAME,
|
||||
OLLAMA_HELPER_SETTINGS,
|
||||
settingService,
|
||||
helperService,
|
||||
logger,
|
||||
);
|
||||
}
|
||||
|
||||
async onApplicationBootstrap() {
|
||||
const settings = await this.getSettings();
|
||||
|
||||
this.client = new Ollama({ host: settings.api_url });
|
||||
}
|
||||
|
||||
@OnEvent('hook:ollama:api_url')
|
||||
handleApiUrlChange(setting: Setting) {
|
||||
this.client = new Ollama({ host: setting.value });
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a response using LLM
|
||||
*
|
||||
* @param prompt - The input text from the user
|
||||
* @param model - The model to be used
|
||||
* @returns {Promise<string>} - The generated response from the LLM
|
||||
*/
|
||||
async generateResponse(prompt: string, model: string): Promise<string> {
|
||||
const response = await this.client.generate({
|
||||
model,
|
||||
prompt,
|
||||
});
|
||||
|
||||
return response.response ? response.response : '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats messages to the Ollama required data structure
|
||||
*
|
||||
* @param messages - Message history to include
|
||||
*
|
||||
* @returns Ollama message array
|
||||
*/
|
||||
private formatMessages(messages: AnyMessage[]) {
|
||||
return messages.map((m) => {
|
||||
return {
|
||||
role: 'sender' in m && m.sender ? 'user' : 'assistant',
|
||||
content: 'text' in m.message && m.message.text ? m.message.text : '',
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a chat completion request with the conversation history.
|
||||
* You can use this same approach to start the conversation
|
||||
* using multi-shot or chain-of-thought prompting.
|
||||
*
|
||||
* @param prompt - The input text from the user
|
||||
* @param model - The model to be used
|
||||
* @param history - Array of messages
|
||||
* @returns {Promise<string>} - The generated response from the LLM
|
||||
*/
|
||||
public async generateChatCompletion(
|
||||
prompt: string,
|
||||
model: string,
|
||||
systemPrompt: string,
|
||||
history: AnyMessage[] = [],
|
||||
{ keepAlive = '5m', options = {} },
|
||||
) {
|
||||
const response = await this.client.chat({
|
||||
model,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: systemPrompt,
|
||||
},
|
||||
...this.formatMessages(history),
|
||||
{
|
||||
role: 'user',
|
||||
content: prompt,
|
||||
},
|
||||
],
|
||||
keep_alive: keepAlive,
|
||||
options,
|
||||
});
|
||||
|
||||
return response.message.content ? response.message.content : '';
|
||||
}
|
||||
}
|
10
api/src/extensions/helpers/ollama/package.json
Normal file
10
api/src/extensions/helpers/ollama/package.json
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "hexabot-ollama",
|
||||
"version": "2.0.0",
|
||||
"description": "The Ollama Helper Extension for Hexabot Chatbot / Agent Builder to enable the LLM Capability",
|
||||
"dependencies": {
|
||||
"ollama": "^0.5.9"
|
||||
},
|
||||
"author": "Hexastack",
|
||||
"license": "AGPL-3.0-only"
|
||||
}
|
125
api/src/extensions/helpers/ollama/settings.ts
Normal file
125
api/src/extensions/helpers/ollama/settings.ts
Normal file
@ -0,0 +1,125 @@
|
||||
import { HelperSetting } from '@/helper/types';
|
||||
import { SettingType } from '@/setting/schemas/types';
|
||||
|
||||
export const OLLAMA_HELPER_NAME = 'ollama';
|
||||
|
||||
export const OLLAMA_HELPER_GROUP = 'ollama';
|
||||
|
||||
export const OLLAMA_HELPER_SETTINGS = [
|
||||
{
|
||||
label: 'api_url',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
type: SettingType.text,
|
||||
value: 'http://ollama:11434', // Default value
|
||||
},
|
||||
{
|
||||
label: 'model',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
type: SettingType.text,
|
||||
value: 'tinyllama', // Default model
|
||||
},
|
||||
{
|
||||
label: 'keep_alive',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
type: SettingType.text,
|
||||
value: '5m', // Default value for keeping the model in memory
|
||||
},
|
||||
{
|
||||
label: 'mirostat',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0, // Default: disabled
|
||||
},
|
||||
{
|
||||
label: 'mirostat_eta',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0.1, // Default value
|
||||
},
|
||||
{
|
||||
label: 'mirostat_tau',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 5.0, // Default value
|
||||
},
|
||||
{
|
||||
label: 'num_ctx',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 2048, // Default value
|
||||
},
|
||||
{
|
||||
label: 'repeat_last_n',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 64, // Default value
|
||||
},
|
||||
{
|
||||
label: 'repeat_penalty',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 1.1, // Default value
|
||||
},
|
||||
{
|
||||
label: 'temperature',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0.8, // Default value
|
||||
},
|
||||
{
|
||||
label: 'seed',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0, // Default value
|
||||
},
|
||||
{
|
||||
label: 'stop',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.text,
|
||||
value: 'AI assistant:', // Default stop sequence
|
||||
},
|
||||
{
|
||||
label: 'tfs_z',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 1, // Default value, 1.0 means disabled
|
||||
},
|
||||
{
|
||||
label: 'num_predict',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 20, // Default value
|
||||
},
|
||||
{
|
||||
label: 'top_k',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 40, // Default value
|
||||
},
|
||||
{
|
||||
label: 'top_p',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0.9, // Default value
|
||||
},
|
||||
{
|
||||
label: 'min_p',
|
||||
group: OLLAMA_HELPER_GROUP,
|
||||
subgroup: 'options',
|
||||
type: SettingType.number,
|
||||
value: 0.0, // Default value
|
||||
},
|
||||
] as const satisfies HelperSetting<typeof OLLAMA_HELPER_NAME>[];
|
@ -86,4 +86,24 @@ export class HelperService {
|
||||
|
||||
return defaultHelper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get default LLM helper.
|
||||
*
|
||||
* @returns - The helper
|
||||
*/
|
||||
async getDefaultLlmHelper() {
|
||||
const settings = await this.settingService.getSettings();
|
||||
|
||||
const defaultHelper = this.get(
|
||||
HelperType.LLM,
|
||||
settings.chatbot_settings.default_llm_helper,
|
||||
);
|
||||
|
||||
if (!defaultHelper) {
|
||||
throw new Error(`Unable to find default LLM helper`);
|
||||
}
|
||||
|
||||
return defaultHelper;
|
||||
}
|
||||
}
|
||||
|
59
api/src/helper/lib/base-llm-helper.ts
Normal file
59
api/src/helper/lib/base-llm-helper.ts
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright © 2024 Hexastack. All rights reserved.
|
||||
*
|
||||
* Licensed under the GNU Affero General Public License v3.0 (AGPLv3) with the following additional terms:
|
||||
* 1. The name "Hexabot" is a trademark of Hexastack. You may not use this name in derivative works without express written permission.
|
||||
* 2. All derivative works must include clear attribution to the original creator and software, Hexastack and Hexabot, in a prominent location (e.g., in the software's "About" section, documentation, and README file).
|
||||
*/
|
||||
|
||||
import { AnyMessage } from '@/chat/schemas/types/message';
|
||||
import { LoggerService } from '@/logger/logger.service';
|
||||
import { SettingService } from '@/setting/services/setting.service';
|
||||
|
||||
import { HelperService } from '../helper.service';
|
||||
import { HelperSetting, HelperType } from '../types';
|
||||
|
||||
import BaseHelper from './base-helper';
|
||||
|
||||
export default abstract class BaseLlmHelper<
|
||||
N extends string,
|
||||
> extends BaseHelper<N> {
|
||||
protected readonly type: HelperType = HelperType.LLM;
|
||||
|
||||
constructor(
|
||||
name: N,
|
||||
settings: HelperSetting<N>[],
|
||||
settingService: SettingService,
|
||||
helperService: HelperService,
|
||||
logger: LoggerService,
|
||||
) {
|
||||
super(name, settings, settingService, helperService, logger);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a response using LLM
|
||||
*
|
||||
* @param prompt - The input text from the user
|
||||
* @param model - The model to be used
|
||||
* @returns {Promise<string>} - The generated response from the LLM
|
||||
*/
|
||||
abstract generateResponse(prompt: string, model: string): Promise<string>;
|
||||
|
||||
/**
|
||||
* Send a chat completion request with the conversation history.
|
||||
* You can use this same approach to start the conversation
|
||||
* using multi-shot or chain-of-thought prompting.
|
||||
*
|
||||
* @param prompt - The input text from the user
|
||||
* @param model - The model to be used
|
||||
* @param history - Array of messages
|
||||
* @returns {Promise<string>} - The generated response from the LLM
|
||||
*/
|
||||
abstract generateChatCompletion(
|
||||
prompt: string,
|
||||
model: string,
|
||||
systemPrompt?: string,
|
||||
history?: AnyMessage[],
|
||||
extra?: any,
|
||||
): Promise<string>;
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
import { SettingCreateDto } from '@/setting/dto/setting.dto';
|
||||
|
||||
import BaseHelper from './lib/base-helper';
|
||||
import BaseLlmHelper from './lib/base-llm-helper';
|
||||
import BaseNlpHelper from './lib/base-nlp-helper';
|
||||
|
||||
export namespace Nlp {
|
||||
@ -24,12 +25,15 @@ export namespace Nlp {
|
||||
|
||||
export enum HelperType {
|
||||
NLU = 'nlu',
|
||||
LLM = 'llm',
|
||||
UTIL = 'util',
|
||||
}
|
||||
|
||||
export type TypeOfHelper<T extends HelperType> = T extends HelperType.NLU
|
||||
? BaseNlpHelper<string>
|
||||
: BaseHelper;
|
||||
export type TypeOfHelper<T extends HelperType> = T extends HelperType.LLM
|
||||
? BaseLlmHelper<string>
|
||||
: T extends HelperType.NLU
|
||||
? BaseNlpHelper<string>
|
||||
: BaseHelper;
|
||||
|
||||
export type HelperRegistry<H extends BaseHelper = BaseHelper> = Map<
|
||||
HelperType,
|
||||
|
@ -24,6 +24,20 @@ export const DEFAULT_SETTINGS = [
|
||||
},
|
||||
weight: 1,
|
||||
},
|
||||
{
|
||||
group: 'chatbot_settings',
|
||||
label: 'default_llm_helper',
|
||||
value: 'ollama',
|
||||
type: SettingType.select,
|
||||
config: {
|
||||
multiple: false,
|
||||
allowCreate: false,
|
||||
entity: 'Helper',
|
||||
idKey: 'name',
|
||||
labelKey: 'name',
|
||||
},
|
||||
weight: 2,
|
||||
},
|
||||
{
|
||||
group: 'chatbot_settings',
|
||||
label: 'global_fallback',
|
||||
|
@ -5,11 +5,13 @@
|
||||
"label": {
|
||||
"global_fallback": "Enable Global Fallback?",
|
||||
"fallback_message": "Fallback Message",
|
||||
"default_nlu_helper": "Default NLU Helper"
|
||||
"default_nlu_helper": "Default NLU Helper",
|
||||
"default_llm_helper": "Default LLM Helper"
|
||||
},
|
||||
"help": {
|
||||
"global_fallback": "Global fallback allows you to send custom messages when user entry does not match any of the block messages.",
|
||||
"fallback_message": "If no fallback block is selected, then one of these messages will be sent.",
|
||||
"default_nlu_helper": "The NLU helper is responsible for processing and understanding user inputs, including tasks like intent prediction, language detection, and entity recognition."
|
||||
"default_nlu_helper": "The NLU helper is responsible for processing and understanding user inputs, including tasks like intent prediction, language detection, and entity recognition.",
|
||||
"default_llm_helper": "The LLM helper leverages advanced generative AI to perform tasks such as text generation, chat completion, and complex query responses."
|
||||
}
|
||||
}
|
||||
}
|
@ -5,11 +5,13 @@
|
||||
"label": {
|
||||
"global_fallback": "Activer la réponse de secours globale ?",
|
||||
"fallback_message": "Message de secours",
|
||||
"default_nlu_helper": "Utilitaire NLU par défaut"
|
||||
"default_nlu_helper": "Utilitaire NLU par défaut",
|
||||
"default_llm_helper": "Utilitaire LLM par défaut"
|
||||
},
|
||||
"help": {
|
||||
"global_fallback": "La réponse de secours globale vous permet d'envoyer des messages personnalisés lorsque l'entrée de l'utilisateur ne correspond à aucun des messages des blocs.",
|
||||
"fallback_message": "Si aucun bloc de secours n'est sélectionné, l'un de ces messages sera envoyé.",
|
||||
"default_nlu_helper": "Utilitaire du traitement et de la compréhension des entrées des utilisateurs, incluant des tâches telles que la prédiction d'intention, la détection de langue et la reconnaissance d'entités."
|
||||
"default_nlu_helper": "Utilitaire du traitement et de la compréhension des entrées des utilisateurs, incluant des tâches telles que la prédiction d'intention, la détection de langue et la reconnaissance d'entités.",
|
||||
"default_llm_helper": "Utilitaire responsable de l'intelligence artificielle générative avancée pour effectuer des tâches telles que la génération de texte, la complétion de chat et les réponses à des requêtes complexes."
|
||||
}
|
||||
}
|
||||
}
|
@ -139,6 +139,23 @@ const SettingInput: React.FC<RenderSettingInputProps> = ({
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
} else if (setting.label === "default_llm_helper") {
|
||||
const { onChange, ...rest } = field;
|
||||
|
||||
return (
|
||||
<AutoCompleteEntitySelect<IHelper, "name", false>
|
||||
searchFields={["name"]}
|
||||
entity={EntityType.LLM_HELPER}
|
||||
format={Format.BASIC}
|
||||
labelKey="name"
|
||||
idKey="name"
|
||||
label={t("label.default_llm_helper")}
|
||||
helperText={t("help.default_llm_helper")}
|
||||
multiple={false}
|
||||
onChange={(_e, selected, ..._) => onChange(selected?.name)}
|
||||
{...rest}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
|
@ -66,6 +66,7 @@ export const ROUTES = {
|
||||
[EntityType.CHANNEL]: "/channel",
|
||||
[EntityType.HELPER]: "/helper",
|
||||
[EntityType.NLU_HELPER]: "/helper/nlu",
|
||||
[EntityType.LLM_HELPER]: "/helper/llm",
|
||||
} as const;
|
||||
|
||||
export class ApiClient {
|
||||
|
@ -296,6 +296,13 @@ export const NluHelperEntity = new schema.Entity(
|
||||
},
|
||||
);
|
||||
|
||||
export const LlmHelperEntity = new schema.Entity(
|
||||
EntityType.LLM_HELPER,
|
||||
undefined,
|
||||
{
|
||||
idAttribute: ({ name }) => name,
|
||||
},
|
||||
);
|
||||
|
||||
export const ENTITY_MAP = {
|
||||
[EntityType.SUBSCRIBER]: SubscriberEntity,
|
||||
@ -325,4 +332,5 @@ export const ENTITY_MAP = {
|
||||
[EntityType.CHANNEL]: ChannelEntity,
|
||||
[EntityType.HELPER]: HelperEntity,
|
||||
[EntityType.NLU_HELPER]: NluHelperEntity,
|
||||
[EntityType.LLM_HELPER]: LlmHelperEntity,
|
||||
} as const;
|
||||
|
@ -37,6 +37,7 @@ export enum EntityType {
|
||||
CHANNEL = "Channel",
|
||||
HELPER = "Helper",
|
||||
NLU_HELPER = "NluHelper",
|
||||
LLM_HELPER = "LlmHelper",
|
||||
}
|
||||
|
||||
export type NormalizedEntities = Record<string, Record<string, any>>;
|
||||
|
@ -115,6 +115,7 @@ export const POPULATE_BY_TYPE = {
|
||||
[EntityType.CHANNEL]: [],
|
||||
[EntityType.HELPER]: [],
|
||||
[EntityType.NLU_HELPER]: [],
|
||||
[EntityType.LLM_HELPER]: [],
|
||||
} as const;
|
||||
|
||||
export type Populate<C extends EntityType> =
|
||||
@ -205,6 +206,7 @@ export interface IEntityMapTypes {
|
||||
[EntityType.CHANNEL]: IEntityTypes<IChannelAttributes, IChannel>;
|
||||
[EntityType.HELPER]: IEntityTypes<IHelperAttributes, IHelper>;
|
||||
[EntityType.NLU_HELPER]: IEntityTypes<IHelperAttributes, IHelper>;
|
||||
[EntityType.LLM_HELPER]: IEntityTypes<IHelperAttributes, IHelper>;
|
||||
}
|
||||
|
||||
export type TType<TParam extends keyof IEntityMapTypes> =
|
||||
|
Loading…
Reference in New Issue
Block a user