Skip to content

Commit 4ec4084

Browse files
committed
Switch from GPT-3.5 to GPT-4o Mini
1 parent b6aca64 commit 4ec4084

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

.github/workflows/test-openai.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
env:
2222
LLM_API_BASE_URL: 'https://api.openai.com/v1'
2323
LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }}
24-
LLM_CHAT_MODEL: 'gpt-3.5-turbo-0125'
24+
LLM_CHAT_MODEL: 'gpt-4o-mini'
2525
LLM_STREAMING: 'no'
2626

2727
- run: cat output.txt

ask-llm.clj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
(defn chat [messages]
3434
(let [body {:messages messages
35-
:model (or LLM-CHAT-MODEL "gpt-3.5-turbo")
35+
:model (or LLM-CHAT-MODEL "gpt-4o-mini")
3636
:stop ["<|im_end|>" "<|end|>" "<|eot_id|>"]
3737
:max_tokens 200
3838
:temperature 0}

ask-llm.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ const LLM_DEBUG = process.env.LLM_DEBUG;
3535
const chat = async (messages, handler) => {
3636
const url = `${LLM_API_BASE_URL}/chat/completions`;
3737
const auth = LLM_API_KEY ? { 'Authorization': `Bearer ${LLM_API_KEY}` } : {};
38-
const model = LLM_CHAT_MODEL || 'gpt-3.5-turbo';
38+
const model = LLM_CHAT_MODEL || 'gpt-4o-mini';
3939
const stop = ['<|im_end|>', '<|end|>', '<|eot_id|>'];
4040
const max_tokens = 200;
4141
const temperature = 0;
@@ -149,4 +149,4 @@ const SYSTEM_PROMPT = 'Answer the question politely and concisely.';
149149
}
150150

151151
qa();
152-
})();
152+
})();

ask-llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ async def chat(messages, handler=None):
2424
if auth_header:
2525
headers["Authorization"] = auth_header
2626

27-
model = LLM_CHAT_MODEL or "gpt-3.5-turbo"
27+
model = LLM_CHAT_MODEL or "gpt-4o-mini"
2828
stop = ["<|im_end|>", "<|end|>", "<|eot_id|>"]
2929
max_tokens = 200
3030
temperature = 0

0 commit comments

Comments
 (0)