Skip to content

Commit 044ba2a

Browse files
committed
examples/gpt-oss: fix examples
1 parent b0f6b99 commit 044ba2a

File tree

2 files changed

+56
-13
lines changed

2 files changed

+56
-13
lines changed

examples/gpt-oss-tools-stream.py

Lines changed: 37 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,17 @@
1+
# /// script
2+
# requires-python = ">=3.11"
3+
# dependencies = [
4+
# "gpt-oss",
5+
# "ollama",
6+
# "rich",
7+
# ]
8+
# ///
19
import random
210
from typing import Iterator
311

4-
from ollama import chat
12+
from rich import print
13+
14+
from ollama import Client
515
from ollama._types import ChatResponse
616

717

@@ -40,35 +50,53 @@ def get_weather_conditions(city: str) -> str:
4050

4151
messages = [{'role': 'user', 'content': 'What is the weather like in London? What are the conditions in Toronto?'}]
4252

53+
client = Client(
54+
# Ollama Turbo
55+
# host="https://ollama.com", headers={'Authorization': (os.getenv('OLLAMA_API_KEY'))}
56+
)
4357

44-
model = 'gpt-oss:20b'
58+
model = 'gpt-oss:120b'
4559
# gpt-oss can call tools while "thinking"
4660
# a loop is needed to call the tools and get the results
4761
final = True
4862
while True:
49-
response_stream: Iterator[ChatResponse] = chat(model=model, messages=messages, tools=[get_weather, get_weather_conditions], stream=True)
63+
response_stream: Iterator[ChatResponse] = client.chat(model=model, messages=messages, tools=[get_weather, get_weather_conditions], stream=True)
64+
tool_calls = []
65+
thinking = ''
66+
content = ''
5067

5168
for chunk in response_stream:
69+
if chunk.message.tool_calls:
70+
tool_calls.extend(chunk.message.tool_calls)
71+
5272
if chunk.message.content:
5373
if not (chunk.message.thinking or chunk.message.thinking == '') and final:
54-
print('\nFinal result: ')
74+
print('\n\n' + '='*10)
75+
print('Final result: ')
5576
final = False
5677
print(chunk.message.content, end='', flush=True)
78+
5779
if chunk.message.thinking:
80+
# accumulate thinking
81+
thinking += chunk.message.thinking
5882
print(chunk.message.thinking, end='', flush=True)
5983

84+
85+
if thinking != '' or content != '':
86+
messages.append({'role': 'assistant', 'thinking': thinking, 'content': content, 'tool_calls': tool_calls})
87+
6088
print()
6189

62-
if chunk.message.tool_calls:
63-
for tool_call in chunk.message.tool_calls:
90+
if tool_calls:
91+
for tool_call in tool_calls:
6492
function_to_call = available_tools.get(tool_call.function.name)
6593
if function_to_call:
66-
print('\nCalling tool: ', tool_call.function.name, 'with arguments: ', tool_call.function.arguments)
94+
print('\nCalling tool:', tool_call.function.name, 'with arguments: ', tool_call.function.arguments)
6795
result = function_to_call(**tool_call.function.arguments)
6896
print('Tool result: ', result + '\n')
6997

70-
messages.append(chunk.message)
71-
messages.append({'role': 'tool', 'content': result, 'tool_name': tool_call.function.name})
98+
result_message = {'role': 'tool', 'content': result, 'tool_name': tool_call.function.name}
99+
messages.append(result_message)
72100
else:
73101
print(f'Tool {tool_call.function.name} not found')
74102

examples/gpt-oss-tools.py

Lines changed: 19 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,17 @@
1+
# /// script
2+
# requires-python = ">=3.11"
3+
# dependencies = [
4+
# "gpt-oss",
5+
# "ollama",
6+
# "rich",
7+
# ]
8+
# ///
9+
import os
110
import random
211

3-
from ollama import chat
12+
from rich import print
13+
14+
from ollama import Client
415
from ollama._types import ChatResponse
516

617

@@ -40,11 +51,15 @@ def get_weather_conditions(city: str) -> str:
4051
messages = [{'role': 'user', 'content': 'What is the weather like in London? What are the conditions in Toronto?'}]
4152

4253

54+
client = Client(
55+
# Ollama Turbo
56+
# host="https://ollama.com", headers={'Authorization': (os.getenv('OLLAMA_API_KEY'))}
57+
)
4358
model = 'gpt-oss:20b'
4459
# gpt-oss can call tools while "thinking"
4560
# a loop is needed to call the tools and get the results
4661
while True:
47-
response: ChatResponse = chat(model=model, messages=messages, tools=[get_weather, get_weather_conditions])
62+
response: ChatResponse = client.chat(model=model, messages=messages, tools=[get_weather, get_weather_conditions])
4863

4964
if response.message.content:
5065
print('Content: ')
@@ -53,14 +68,14 @@ def get_weather_conditions(city: str) -> str:
5368
print('Thinking: ')
5469
print(response.message.thinking + '\n')
5570

71+
messages.append(response.message)
72+
5673
if response.message.tool_calls:
5774
for tool_call in response.message.tool_calls:
5875
function_to_call = available_tools.get(tool_call.function.name)
5976
if function_to_call:
6077
result = function_to_call(**tool_call.function.arguments)
6178
print('Result from tool call name: ', tool_call.function.name, 'with arguments: ', tool_call.function.arguments, 'result: ', result + '\n')
62-
63-
messages.append(response.message)
6479
messages.append({'role': 'tool', 'content': result, 'tool_name': tool_call.function.name})
6580
else:
6681
print(f'Tool {tool_call.function.name} not found')

0 commit comments

Comments
 (0)