Skip to content

Implement LLM Provider with Config (#8) #9

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"lint": "next lint"
},
"dependencies": {
"@google/generative-ai": "^0.21.0",
"axios": "^1.7.9",
"next": "15.1.0",
"react": "^19.0.0",
Expand Down
50 changes: 50 additions & 0 deletions src/llm/llm-config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/**
* Configuration class for LLM generation parameters
* @class LLMConfig
* @property {number} temperature - Controls randomness in the output (0.0 to 2.0)
* @property {number} topP - Controls diversity via nucleus sampling (0.0 to 1.0)
* @property {number} topK - Controls diversity by limiting to K most likely tokens
* @property {number} maxToken - Controls the max number of output token
*/
export default class LLMConfig {
constructor(temperature, topP, topK, maxToken) {
if (temperature > 2 || temperature < 0) throw new Error('Temperature must be between 0.0 and 2.0');
if (topP > 1 || topP < 0) throw new Error('Top-p must be between 0.0 and 1.0');
this._temperature = temperature;
this._topP = topP;
this._topK = topK;
this._maxToken = maxToken;
}

/**
* Gets the temperature parameter
* @returns {number} The temperature value (0.0 to 1.0)
*/
get temperature() {
return this._temperature;
}

/**
* Gets the top-p parameter
* @returns {number} The top-p value (0.0 to 1.0)
*/
get topP() {
return this._topP;
}

/**
* Gets the top-k parameter
* @returns {number} The top-k value
*/
get topK() {
return this._topK;
}

/**
* Gets the max output token parameter
* @returns {number} The number of tokens
*/
get maxToken() {
return this._maxToken;
}
}
34 changes: 34 additions & 0 deletions src/llm/llm-provider.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import LLMConfig from '@/llm/llmconfig.js'

/**
* Abstract class for LLM providers (e.g. OpenAI, Gemini, etc.)
* @abstract
* @class LLMProvider
*/
export default class LLMProvider {
/**
* Constructor for LLMProvider
* @param {string} apikey - API key for the LLM service (your secret key)
* @param {string} model - Model identifier (e.g. gemini-1.5-pro)
* @param {LLMConfig} llmconfig - Configuration for LLM
* @param {string} systemPrompt - System prompt for the LLM
*/
constructor(apikey, model, llmconfig, systemPrompt) {
this.apikey = apikey;
this.model = model;
this.llmconfig = llmconfig;
this.systemPrompt = systemPrompt
}

/**
* Executes the LLM with given prompt
* @abstract
* @param {string} userPrompt - User input prompt (The code will go inside here)
* @param {Array<role: string, parts: Array<text: string>>} history - The history of the conversation
* @throws {Error} When not implemented by child class
* @returns {Promise<string>} The LLM response
*/
async run(userPrompt, history) {
throw new Error('The LLM chat method run() must be implemented');
}
}
43 changes: 43 additions & 0 deletions src/llm/provider/google.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import LLMProvider from '@/llm/llm-provider.js';
import LLMConfig from '@/llm/llm-config.js';
import { GoogleGenerativeAI } from '@google/generative-ai';

/**
* Class for Google Gemini LLM
* @class GoogleProvider
*/
class GoogleProvider extends LLMProvider {
/**
* Constructor for Google Gemini LLM
* @param {string} apiKey - API key for google ai studio (https://aistudio.google.com)
* @param {string} modelName - Select model available in google ai studio
* @param {LLMConfig} llmconfig - Configuration for LLM
* @param {string} systemPrompt - System prompt for the LLM
*/
constructor(apiKey, modelName, llmconfig, systemPrompt) {
super(apiKey, modelName, llmconfig, systemPrompt);
this.llm = new GoogleGenerativeAI({ apiKey: apiKey, systemInstruction: systemPrompt, model: modelName });

this.config = {
temperature: llmconfig.temperature,
topP: llmconfig.topP,
topK: llmconfig.topK,
maxOutputTokens: llmconfig.maxOutputTokens,
responseMimeType: "text/plain",
}
}

/**
* Executes the LLM with given prompt
* @param {string} userPrompt - User input prompt (The code will go inside here)
* @param {Array<role: string, parts: Array<text: string>>} history - The history of the conversation
* @returns {Promise<string>} The LLM response
*/
async run(userPrompt, history) {
const chatSession = this.llm.startChat({
...this.config,
history: history
});
return await chatSession.sendMessage(userPrompt);
}
}
21 changes: 21 additions & 0 deletions src/test/llm/llm-config.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import LLMConfig from '@/llm/llm-config.js';

describe('LLMConfig', () => {
describe('constructor validation', () => {
it('should create instance with valid parameters', () => {
const config = new LLMConfig(0.7, 0.9, 50, 1000);
expect(config).toBeInstanceOf(LLMConfig);
});

it('should throw error for temperature < 0', () => {
expect(() => new LLMConfig(-0.1, 0.9, 50, 1000))
.toThrow('Temperature must be between 0.0 and 2.0');
});

it('should throw error for temperature > 2', () => {
expect(() => new LLMConfig(2.1, 0.9, 50, 1000))
.toThrow('Temperature must be between 0.0 and 2.0');
});

});
});
Loading