Skip to content

Commit 504f7f3

Browse files
authored
Merge pull request #156 from daeisbae/149-support-for-ollama
Ollama Support (#149)
2 parents c161a5e + 0f0f916 commit 504f7f3

File tree

3 files changed

+53
-0
lines changed

3 files changed

+53
-0
lines changed

README.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,22 @@
2828
4. Build the server (`npm run build`)
2929
5. Run (`npm start`)
3030

31+
#### Ollama Configuration Guide
32+
33+
- It's recommended if you can run bigger LLM than 14b parameter.
34+
- You do not need to provide the API KEY
35+
- Set LLM_PROVIDER to Ollama (It is going to connect to default ollama endpoint)
36+
- Set LLM_MODELNAME to the model name you can see from Ollama using the command `ollama ls`
37+
- It is recommended to set TOKEN_PROCESSING_CHARACTER_LIMIT between 10000-20000 (Approx 300-600 lines of code) if you are using low param LLM (ex. 8b, 14b)
38+
39+
**Example:**
40+
41+
```
42+
LLM_PROVIDER=ollama
43+
LLM_APIKEY=
44+
LLM_MODELNAME=qwen2.5:14b
45+
```
46+
3147
### Additional Information
3248

3349
> [!CAUTION]

src/llm/provider/ollama.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import { ChatOllama } from '@langchain/ollama';
2+
import LLMConfig from '../llm-config';
3+
import { HistoryItem, LLMProvider } from '../llm-provider';
4+
5+
export class OllamaProvider extends LLMProvider {
6+
private llm: ChatOllama;
7+
8+
constructor(
9+
modelName: string,
10+
llmconfig: LLMConfig) {
11+
super('', modelName, llmconfig, '');
12+
this.llm = new ChatOllama({
13+
model: modelName,
14+
temperature: llmconfig.temperature,
15+
// format: 'json', // Forcing JSON output degraded the quality of the responses
16+
topP: llmconfig.topP,
17+
topK: llmconfig.topK,
18+
});
19+
}
20+
21+
async run(userPrompt: string, history: HistoryItem[]): Promise<string> {
22+
const response = await this.llm.invoke(userPrompt);
23+
console.log(response.content.toString());
24+
return response.content.toString();
25+
}
26+
}

src/service/llm-factory.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import DeepSeekProvider from '@/llm/provider/deepseek'
33
import GoogleProvider from '@/llm/provider/google'
44
import LLMConfig from '@/llm/llm-config'
55
import dotenv from 'dotenv'
6+
import { OllamaProvider } from '@/llm/provider/ollama'
67
dotenv.config()
78

89
/**
@@ -19,11 +20,21 @@ export default class LLMFactory {
1920
const apiKey = process.env.LLM_APIKEY
2021
const modelName = process.env.LLM_MODELNAME
2122

23+
if (!provider) {
24+
throw new Error('LLM Provider is not specified. Please set LLM_PROVIDER in the environment\nExample: LLM_PROVIDER=google, LLM_PROVIDER=deepseek, LLM_PROVIDER=ollama')
25+
}
26+
27+
if(!modelName) {
28+
throw new Error('LLM Model name is not specified. Example: LLM_MODELNAME=llama3.3 for llama3.3')
29+
}
30+
2231
switch (provider) {
2332
case 'google':
2433
return new GoogleProvider(apiKey!, modelName!, llmConfig)
2534
case 'deepseek':
2635
return new DeepSeekProvider(apiKey!, modelName!, llmConfig)
36+
case 'ollama':
37+
return new OllamaProvider(modelName!, llmConfig)
2738
default:
2839
throw new Error(`Unsupported LLM provider: ${provider}`)
2940
}

0 commit comments

Comments
 (0)