Skip to content

Commit 1b2c77d

Browse files
committed
examples: fmt and streaming version
1 parent c4f0f74 commit 1b2c77d

File tree

3 files changed

+214
-5
lines changed

3 files changed

+214
-5
lines changed

examples/README.md

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,10 @@ See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md)
2828
- [multi-tool.py](multi-tool.py) - Using multiple tools, with thinking enabled
2929

3030
#### gpt-oss
31-
- [gpt-oss-tools.py](gpt-oss-tools.py) - Using tools with gpt-oss
32-
- [gpt-oss-tools-stream.py](gpt-oss-tools-stream.py) - Using tools with gpt-oss, with streaming enabled
33-
- [gpt-oss-tools-browser.py](gpt-oss-tools-browser.py) - Using browser tools with gpt-oss
31+
- [gpt-oss-tools.py](gpt-oss-tools.py)
32+
- [gpt-oss-tools-stream.py](gpt-oss-tools-stream.py)
33+
- [gpt-oss-tools-browser.py](gpt-oss-tools-browser.py) - Using browser research tools with gpt-oss
34+
- [gpt-oss-tools-browser-stream.py](gpt-oss-tools-browser-stream.py) - Using browser research tools with gpt-oss, with streaming enabled
3435

3536

3637
### Multimodal with Images - Chat with a multimodal (image chat) model
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
# /// script
2+
# requires-python = ">=3.11"
3+
# dependencies = [
4+
# "gpt-oss",
5+
# "ollama",
6+
# "rich",
7+
# ]
8+
# ///
9+
10+
import asyncio
11+
import json
12+
from typing import Iterator, Optional
13+
14+
from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool
15+
from openai_harmony import Author, Role, TextContent
16+
from openai_harmony import Message as HarmonyMessage
17+
from rich import print
18+
19+
from ollama import Client
20+
from ollama._types import ChatResponse
21+
22+
_backend = ExaBackend(source='web')
23+
_browser_tool = SimpleBrowserTool(backend=_backend)
24+
25+
26+
def heading(text):
27+
print(text)
28+
print('=' * (len(text) +3))
29+
30+
31+
async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str:
32+
# map Ollama message to Harmony format
33+
harmony_message = HarmonyMessage(
34+
author=Author(role=Role.USER),
35+
content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))],
36+
recipient='browser.search',
37+
)
38+
39+
result_text: str = ''
40+
async for response in _browser_tool._process(harmony_message):
41+
if response.content:
42+
for content in response.content:
43+
if isinstance(content, TextContent):
44+
result_text += content.text
45+
return result_text or f'No results for query: {query}'
46+
47+
48+
async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str:
49+
payload = {
50+
'id': id,
51+
'cursor': cursor,
52+
'loc': loc,
53+
'num_lines': num_lines,
54+
'view_source': view_source,
55+
'source': source
56+
}
57+
58+
harmony_message = HarmonyMessage(
59+
author=Author(role=Role.USER),
60+
content=[TextContent(text=json.dumps(payload))],
61+
recipient='browser.open',
62+
)
63+
64+
result_text: str = ''
65+
async for response in _browser_tool._process(harmony_message):
66+
if response.content:
67+
for content in response.content:
68+
if isinstance(content, TextContent):
69+
result_text += content.text
70+
return result_text or f'Could not open: {id}'
71+
72+
73+
async def _browser_find_async(pattern: str, cursor: int = -1) -> str:
74+
payload = {'pattern': pattern, 'cursor': cursor}
75+
76+
harmony_message = HarmonyMessage(
77+
author=Author(role=Role.USER),
78+
content=[TextContent(text=json.dumps(payload))],
79+
recipient='browser.find',
80+
)
81+
82+
result_text: str = ''
83+
async for response in _browser_tool._process(harmony_message):
84+
if response.content:
85+
for content in response.content:
86+
if isinstance(content, TextContent):
87+
result_text += content.text
88+
return result_text or f'Pattern not found: {pattern}'
89+
90+
91+
def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str:
92+
return asyncio.run(_browser_search_async(query=query, topn=topn, source=source))
93+
94+
95+
def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str:
96+
return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source))
97+
98+
99+
def browser_find(pattern: str, cursor: int = -1) -> str:
100+
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
101+
102+
103+
# Schema definitions for each browser tool
104+
browser_search_schema = {
105+
'type': 'function',
106+
'function': {
107+
'name': 'browser.search',
108+
},
109+
}
110+
111+
browser_open_schema = {
112+
'type': 'function',
113+
'function': {
114+
'name': 'browser.open',
115+
},
116+
}
117+
118+
browser_find_schema = {
119+
'type': 'function',
120+
'function': {
121+
'name': 'browser.find',
122+
},
123+
}
124+
125+
available_tools = {
126+
'browser.search': browser_search,
127+
'browser.open': browser_open,
128+
'browser.find': browser_find,
129+
}
130+
131+
132+
model = 'gpt-oss:20b'
133+
print('Model: ', model, '\n')
134+
135+
prompt = 'What is Ollama?'
136+
print('You: ', prompt, '\n')
137+
messages = [{'role': 'user', 'content': prompt}]
138+
139+
client = Client()
140+
141+
# gpt-oss can call tools while "thinking"
142+
# a loop is needed to call the tools and get the results
143+
final = True
144+
while True:
145+
response_stream: Iterator[ChatResponse] = client.chat(
146+
model=model,
147+
messages=messages,
148+
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
149+
options={'num_ctx': 32000}, # 8192 is the recommended lower limit for the context window
150+
stream=True
151+
)
152+
153+
tool_calls = []
154+
thinking = ''
155+
content = ''
156+
157+
for chunk in response_stream:
158+
if chunk.message.tool_calls:
159+
tool_calls.extend(chunk.message.tool_calls)
160+
161+
if chunk.message.content:
162+
if not (chunk.message.thinking or chunk.message.thinking == '') and final:
163+
heading('\n\nFinal result: ')
164+
final = False
165+
print(chunk.message.content, end='', flush=True)
166+
167+
if chunk.message.thinking:
168+
thinking += chunk.message.thinking
169+
print(chunk.message.thinking, end='', flush=True)
170+
171+
if thinking != '':
172+
messages.append({'role': 'assistant', 'content': thinking, 'tool_calls': tool_calls})
173+
174+
print()
175+
176+
if tool_calls:
177+
for tool_call in tool_calls:
178+
tool_name = tool_call.function.name
179+
args = tool_call.function.arguments or {}
180+
function_to_call = available_tools.get(tool_name)
181+
182+
if function_to_call:
183+
heading(f'\nCalling tool: {tool_name}')
184+
if args:
185+
print(f'Arguments: {args}')
186+
187+
try:
188+
result = function_to_call(**args)
189+
print(f'Tool result: {result[:200]}')
190+
if len(result) > 200:
191+
heading('... [truncated]')
192+
print()
193+
194+
result_message = {'role': 'tool', 'content': result, 'tool_name': tool_name}
195+
messages.append(result_message)
196+
197+
except Exception as e:
198+
err = f'Error from {tool_name}: {e}'
199+
print(err)
200+
messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name})
201+
else:
202+
print(f'Tool {tool_name} not found')
203+
else:
204+
# no more tool calls, we can stop the loop
205+
break

examples/gpt-oss-tools-browser.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
# dependencies = [
44
# "gpt-oss",
55
# "ollama",
6+
# "rich",
67
# ]
78
# ///
89

@@ -97,7 +98,6 @@ def browser_find(pattern: str, cursor: int = -1) -> str:
9798
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
9899

99100

100-
model = 'gpt-oss:20b'
101101

102102
# Schema definitions for each browser tool
103103
browser_search_schema = {
@@ -128,6 +128,9 @@ def browser_find(pattern: str, cursor: int = -1) -> str:
128128
}
129129

130130

131+
model = 'gpt-oss:20b'
132+
print('Model: ', model, '\n')
133+
131134
prompt = 'What is Ollama?'
132135
print('You: ', prompt, '\n')
133136
messages = [{'role': 'user', 'content': prompt}]
@@ -138,7 +141,7 @@ def browser_find(pattern: str, cursor: int = -1) -> str:
138141
model=model,
139142
messages=messages,
140143
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
141-
options={'num_ctx': 8192} # 8192 is the recommended lower limit for the context window
144+
options={'num_ctx': 32000} # 8192 is the recommended lower limit for the context window
142145
)
143146

144147
if hasattr(response.message, 'thinking') and response.message.thinking:

0 commit comments

Comments
 (0)