diff --git a/package.json b/package.json index 01a05ce..a105ea5 100644 --- a/package.json +++ b/package.json @@ -11,6 +11,7 @@ "lint": "next lint" }, "dependencies": { + "@google/generative-ai": "^0.21.0", "axios": "^1.7.9", "next": "15.1.0", "react": "^19.0.0", diff --git a/src/llm/llm-config.js b/src/llm/llm-config.js new file mode 100644 index 0000000..cfecd57 --- /dev/null +++ b/src/llm/llm-config.js @@ -0,0 +1,50 @@ +/** + * Configuration class for LLM generation parameters + * @class LLMConfig + * @property {number} temperature - Controls randomness in the output (0.0 to 2.0) + * @property {number} topP - Controls diversity via nucleus sampling (0.0 to 1.0) + * @property {number} topK - Controls diversity by limiting to K most likely tokens + * @property {number} maxToken - Controls the max number of output token + */ +export default class LLMConfig { + constructor(temperature, topP, topK, maxToken) { + if (temperature > 2 || temperature < 0) throw new Error('Temperature must be between 0.0 and 2.0'); + if (topP > 1 || topP < 0) throw new Error('Top-p must be between 0.0 and 1.0'); + this._temperature = temperature; + this._topP = topP; + this._topK = topK; + this._maxToken = maxToken; + } + + /** + * Gets the temperature parameter + * @returns {number} The temperature value (0.0 to 1.0) + */ + get temperature() { + return this._temperature; + } + + /** + * Gets the top-p parameter + * @returns {number} The top-p value (0.0 to 1.0) + */ + get topP() { + return this._topP; + } + + /** + * Gets the top-k parameter + * @returns {number} The top-k value + */ + get topK() { + return this._topK; + } + + /** + * Gets the max output token parameter + * @returns {number} The number of tokens + */ + get maxToken() { + return this._maxToken; + } +} \ No newline at end of file diff --git a/src/llm/llm-provider.js b/src/llm/llm-provider.js new file mode 100644 index 0000000..46d6ad8 --- /dev/null +++ b/src/llm/llm-provider.js @@ -0,0 +1,34 @@ +import LLMConfig from '@/llm/llmconfig.js' + +/** + * Abstract class for LLM providers (e.g. OpenAI, Gemini, etc.) + * @abstract + * @class LLMProvider + */ +export default class LLMProvider { + /** + * Constructor for LLMProvider + * @param {string} apikey - API key for the LLM service (your secret key) + * @param {string} model - Model identifier (e.g. gemini-1.5-pro) + * @param {LLMConfig} llmconfig - Configuration for LLM + * @param {string} systemPrompt - System prompt for the LLM + */ + constructor(apikey, model, llmconfig, systemPrompt) { + this.apikey = apikey; + this.model = model; + this.llmconfig = llmconfig; + this.systemPrompt = systemPrompt + } + + /** + * Executes the LLM with given prompt + * @abstract + * @param {string} userPrompt - User input prompt (The code will go inside here) + * @param {Array>} history - The history of the conversation + * @throws {Error} When not implemented by child class + * @returns {Promise} The LLM response + */ + async run(userPrompt, history) { + throw new Error('The LLM chat method run() must be implemented'); + } +} \ No newline at end of file diff --git a/src/llm/provider/google.js b/src/llm/provider/google.js new file mode 100644 index 0000000..e5142c1 --- /dev/null +++ b/src/llm/provider/google.js @@ -0,0 +1,43 @@ +import LLMProvider from '@/llm/llm-provider.js'; +import LLMConfig from '@/llm/llm-config.js'; +import { GoogleGenerativeAI } from '@google/generative-ai'; + +/** + * Class for Google Gemini LLM + * @class GoogleProvider + */ +class GoogleProvider extends LLMProvider { + /** + * Constructor for Google Gemini LLM + * @param {string} apiKey - API key for google ai studio (https://aistudio.google.com) + * @param {string} modelName - Select model available in google ai studio + * @param {LLMConfig} llmconfig - Configuration for LLM + * @param {string} systemPrompt - System prompt for the LLM + */ + constructor(apiKey, modelName, llmconfig, systemPrompt) { + super(apiKey, modelName, llmconfig, systemPrompt); + this.llm = new GoogleGenerativeAI({ apiKey: apiKey, systemInstruction: systemPrompt, model: modelName }); + + this.config = { + temperature: llmconfig.temperature, + topP: llmconfig.topP, + topK: llmconfig.topK, + maxOutputTokens: llmconfig.maxOutputTokens, + responseMimeType: "text/plain", + } + } + + /** + * Executes the LLM with given prompt + * @param {string} userPrompt - User input prompt (The code will go inside here) + * @param {Array>} history - The history of the conversation + * @returns {Promise} The LLM response + */ + async run(userPrompt, history) { + const chatSession = this.llm.startChat({ + ...this.config, + history: history + }); + return await chatSession.sendMessage(userPrompt); + } +} diff --git a/src/test/llm/llm-config.test.js b/src/test/llm/llm-config.test.js new file mode 100644 index 0000000..463034f --- /dev/null +++ b/src/test/llm/llm-config.test.js @@ -0,0 +1,21 @@ +import LLMConfig from '@/llm/llm-config.js'; + +describe('LLMConfig', () => { + describe('constructor validation', () => { + it('should create instance with valid parameters', () => { + const config = new LLMConfig(0.7, 0.9, 50, 1000); + expect(config).toBeInstanceOf(LLMConfig); + }); + + it('should throw error for temperature < 0', () => { + expect(() => new LLMConfig(-0.1, 0.9, 50, 1000)) + .toThrow('Temperature must be between 0.0 and 2.0'); + }); + + it('should throw error for temperature > 2', () => { + expect(() => new LLMConfig(2.1, 0.9, 50, 1000)) + .toThrow('Temperature must be between 0.0 and 2.0'); + }); + + }); +}); \ No newline at end of file