1
+ import random
2
+ from typing import Iterator
3
+
4
+ from ollama import ChatResponse , Client
5
+ from ollama ._types import ChatResponse
6
+
7
+
8
+ def get_temperature (city : str ) -> int :
9
+ """
10
+ Get the temperature for a city in Celsius
11
+
12
+ Args:
13
+ city (str): The name of the city
14
+
15
+ Returns:
16
+ int: The current temperature in Celsius
17
+ """
18
+ # This is a mock implementation - would need to use a real weather API
19
+ import random
20
+ if city not in ['London' , 'Paris' , 'New York' , 'Tokyo' , 'Sydney' ]:
21
+ return 'Unknown city'
22
+
23
+ return str (random .randint (0 , 35 )) + " degrees Celsius"
24
+
25
+
26
+ def get_conditions (city : str ) -> str :
27
+ """
28
+ Get the weather conditions for a city
29
+ """
30
+ if city not in ['London' , 'Paris' , 'New York' , 'Tokyo' , 'Sydney' ]:
31
+ return 'Unknown city'
32
+ # This is a mock implementation - would need to use a real weather API
33
+ conditions = ['sunny' , 'cloudy' , 'rainy' , 'snowy' ]
34
+ return random .choice (conditions )
35
+
36
+
37
+ available_functions = {
38
+ 'get_temperature' : get_temperature ,
39
+ 'get_conditions' : get_conditions ,
40
+ }
41
+
42
+
43
+ cities = ['London' , 'Paris' , 'New York' , 'Tokyo' , 'Sydney' ]
44
+ city = random .choice (cities )
45
+ city2 = random .choice (cities )
46
+ messages = [{'role' : 'user' , 'content' : f'What is the temperature in { city } ? and what are the weather conditions in { city2 } ?' }]
47
+ print ('----- Prompt:' , messages [0 ]['content' ], '\n ' )
48
+
49
+ model = 'qwen3'
50
+ client = Client ()
51
+ response : Iterator [ChatResponse ] = client .chat (model , stream = True , messages = messages , tools = [get_temperature , get_conditions ], think = True )
52
+
53
+ for chunk in response :
54
+ if chunk .message .thinking :
55
+ print (chunk .message .thinking , end = '' , flush = True )
56
+ if chunk .message .content :
57
+ print (chunk .message .content , end = '' , flush = True )
58
+ if chunk .message .tool_calls :
59
+ for tool in chunk .message .tool_calls :
60
+ if function_to_call := available_functions .get (tool .function .name ):
61
+ print ('\n Calling function:' , tool .function .name , 'with arguments:' , tool .function .arguments )
62
+ output = function_to_call (** tool .function .arguments )
63
+ print ('> Function output:' , output , '\n ' )
64
+
65
+ # Add the assistant message and tool call result to the messages
66
+ messages .append (chunk .message )
67
+ messages .append ({'role' : 'tool' , 'content' : str (output ), 'tool_name' : tool .function .name })
68
+ else :
69
+ print ('Function' , tool .function .name , 'not found' )
70
+
71
+ print ('----- Sending result back to model \n ' )
72
+ if any (msg .get ('role' ) == 'tool' for msg in messages ):
73
+ res = client .chat (model , stream = True , tools = [get_temperature , get_conditions ], messages = messages , think = True )
74
+ done_thinking = False
75
+ for chunk in res :
76
+ if chunk .message .thinking :
77
+ print (chunk .message .thinking , end = '' , flush = True )
78
+ if chunk .message .content :
79
+ if not done_thinking :
80
+ print ('\n ----- Final result:' )
81
+ done_thinking = True
82
+ print (chunk .message .content , end = '' , flush = True )
83
+ if chunk .message .tool_calls :
84
+ # Model should be explaining the tool calls and the results in this output
85
+ print ('Model returned tool calls:' )
86
+ print (chunk .message .tool_calls )
87
+ else :
88
+ print ('No tool calls returned' )
0 commit comments