Skip to content

Commit b7e7184

Browse files
authored
Merge pull request #9 from daeisbae/8-implement-llm-provider
Implement LLM Provider with Config (#8)
2 parents 1661ddc + 0187b3f commit b7e7184

File tree

5 files changed

+149
-0
lines changed

5 files changed

+149
-0
lines changed

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
"lint": "next lint"
1212
},
1313
"dependencies": {
14+
"@google/generative-ai": "^0.21.0",
1415
"axios": "^1.7.9",
1516
"next": "15.1.0",
1617
"react": "^19.0.0",

src/llm/llm-config.js

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
/**
2+
* Configuration class for LLM generation parameters
3+
* @class LLMConfig
4+
* @property {number} temperature - Controls randomness in the output (0.0 to 2.0)
5+
* @property {number} topP - Controls diversity via nucleus sampling (0.0 to 1.0)
6+
* @property {number} topK - Controls diversity by limiting to K most likely tokens
7+
* @property {number} maxToken - Controls the max number of output token
8+
*/
9+
export default class LLMConfig {
10+
constructor(temperature, topP, topK, maxToken) {
11+
if (temperature > 2 || temperature < 0) throw new Error('Temperature must be between 0.0 and 2.0');
12+
if (topP > 1 || topP < 0) throw new Error('Top-p must be between 0.0 and 1.0');
13+
this._temperature = temperature;
14+
this._topP = topP;
15+
this._topK = topK;
16+
this._maxToken = maxToken;
17+
}
18+
19+
/**
20+
* Gets the temperature parameter
21+
* @returns {number} The temperature value (0.0 to 1.0)
22+
*/
23+
get temperature() {
24+
return this._temperature;
25+
}
26+
27+
/**
28+
* Gets the top-p parameter
29+
* @returns {number} The top-p value (0.0 to 1.0)
30+
*/
31+
get topP() {
32+
return this._topP;
33+
}
34+
35+
/**
36+
* Gets the top-k parameter
37+
* @returns {number} The top-k value
38+
*/
39+
get topK() {
40+
return this._topK;
41+
}
42+
43+
/**
44+
* Gets the max output token parameter
45+
* @returns {number} The number of tokens
46+
*/
47+
get maxToken() {
48+
return this._maxToken;
49+
}
50+
}

src/llm/llm-provider.js

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import LLMConfig from '@/llm/llmconfig.js'
2+
3+
/**
4+
* Abstract class for LLM providers (e.g. OpenAI, Gemini, etc.)
5+
* @abstract
6+
* @class LLMProvider
7+
*/
8+
export default class LLMProvider {
9+
/**
10+
* Constructor for LLMProvider
11+
* @param {string} apikey - API key for the LLM service (your secret key)
12+
* @param {string} model - Model identifier (e.g. gemini-1.5-pro)
13+
* @param {LLMConfig} llmconfig - Configuration for LLM
14+
* @param {string} systemPrompt - System prompt for the LLM
15+
*/
16+
constructor(apikey, model, llmconfig, systemPrompt) {
17+
this.apikey = apikey;
18+
this.model = model;
19+
this.llmconfig = llmconfig;
20+
this.systemPrompt = systemPrompt
21+
}
22+
23+
/**
24+
* Executes the LLM with given prompt
25+
* @abstract
26+
* @param {string} userPrompt - User input prompt (The code will go inside here)
27+
* @param {Array<role: string, parts: Array<text: string>>} history - The history of the conversation
28+
* @throws {Error} When not implemented by child class
29+
* @returns {Promise<string>} The LLM response
30+
*/
31+
async run(userPrompt, history) {
32+
throw new Error('The LLM chat method run() must be implemented');
33+
}
34+
}

src/llm/provider/google.js

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import LLMProvider from '@/llm/llm-provider.js';
2+
import LLMConfig from '@/llm/llm-config.js';
3+
import { GoogleGenerativeAI } from '@google/generative-ai';
4+
5+
/**
6+
* Class for Google Gemini LLM
7+
* @class GoogleProvider
8+
*/
9+
class GoogleProvider extends LLMProvider {
10+
/**
11+
* Constructor for Google Gemini LLM
12+
* @param {string} apiKey - API key for google ai studio (https://aistudio.google.com)
13+
* @param {string} modelName - Select model available in google ai studio
14+
* @param {LLMConfig} llmconfig - Configuration for LLM
15+
* @param {string} systemPrompt - System prompt for the LLM
16+
*/
17+
constructor(apiKey, modelName, llmconfig, systemPrompt) {
18+
super(apiKey, modelName, llmconfig, systemPrompt);
19+
this.llm = new GoogleGenerativeAI({ apiKey: apiKey, systemInstruction: systemPrompt, model: modelName });
20+
21+
this.config = {
22+
temperature: llmconfig.temperature,
23+
topP: llmconfig.topP,
24+
topK: llmconfig.topK,
25+
maxOutputTokens: llmconfig.maxOutputTokens,
26+
responseMimeType: "text/plain",
27+
}
28+
}
29+
30+
/**
31+
* Executes the LLM with given prompt
32+
* @param {string} userPrompt - User input prompt (The code will go inside here)
33+
* @param {Array<role: string, parts: Array<text: string>>} history - The history of the conversation
34+
* @returns {Promise<string>} The LLM response
35+
*/
36+
async run(userPrompt, history) {
37+
const chatSession = this.llm.startChat({
38+
...this.config,
39+
history: history
40+
});
41+
return await chatSession.sendMessage(userPrompt);
42+
}
43+
}

src/test/llm/llm-config.test.js

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import LLMConfig from '@/llm/llm-config.js';
2+
3+
describe('LLMConfig', () => {
4+
describe('constructor validation', () => {
5+
it('should create instance with valid parameters', () => {
6+
const config = new LLMConfig(0.7, 0.9, 50, 1000);
7+
expect(config).toBeInstanceOf(LLMConfig);
8+
});
9+
10+
it('should throw error for temperature < 0', () => {
11+
expect(() => new LLMConfig(-0.1, 0.9, 50, 1000))
12+
.toThrow('Temperature must be between 0.0 and 2.0');
13+
});
14+
15+
it('should throw error for temperature > 2', () => {
16+
expect(() => new LLMConfig(2.1, 0.9, 50, 1000))
17+
.toThrow('Temperature must be between 0.0 and 2.0');
18+
});
19+
20+
});
21+
});

0 commit comments

Comments
 (0)