|
1 | 1 | import json
|
2 |
| -import ollama |
| 2 | +import ollama |
3 | 3 | import asyncio
|
4 | 4 |
|
| 5 | + |
5 | 6 | # Simulates an API call to get flight times
|
6 | 7 | # In a real application, this would fetch data from a live database or API
|
7 | 8 | def get_flight_times(departure: str, arrival: str) -> str:
|
8 |
| - flights = { |
9 |
| - "NYC-LAX": {"departure": "08:00 AM", "arrival": "11:30 AM", "duration": "5h 30m"}, |
10 |
| - "LAX-NYC": {"departure": "02:00 PM", "arrival": "10:30 PM", "duration": "5h 30m"}, |
11 |
| - "LHR-JFK": {"departure": "10:00 AM", "arrival": "01:00 PM", "duration": "8h 00m"}, |
12 |
| - "JFK-LHR": {"departure": "09:00 PM", "arrival": "09:00 AM", "duration": "7h 00m"}, |
13 |
| - "CDG-DXB": {"departure": "11:00 AM", "arrival": "08:00 PM", "duration": "6h 00m"}, |
14 |
| - "DXB-CDG": {"departure": "03:00 AM", "arrival": "07:30 AM", "duration": "7h 30m"}, |
15 |
| - } |
| 9 | + flights = { |
| 10 | + 'NYC-LAX': {'departure': '08:00 AM', 'arrival': '11:30 AM', 'duration': '5h 30m'}, |
| 11 | + 'LAX-NYC': {'departure': '02:00 PM', 'arrival': '10:30 PM', 'duration': '5h 30m'}, |
| 12 | + 'LHR-JFK': {'departure': '10:00 AM', 'arrival': '01:00 PM', 'duration': '8h 00m'}, |
| 13 | + 'JFK-LHR': {'departure': '09:00 PM', 'arrival': '09:00 AM', 'duration': '7h 00m'}, |
| 14 | + 'CDG-DXB': {'departure': '11:00 AM', 'arrival': '08:00 PM', 'duration': '6h 00m'}, |
| 15 | + 'DXB-CDG': {'departure': '03:00 AM', 'arrival': '07:30 AM', 'duration': '7h 30m'}, |
| 16 | + } |
| 17 | + |
| 18 | + key = f'{departure}-{arrival}'.upper() |
| 19 | + return json.dumps(flights.get(key, {'error': 'Flight not found'})) |
16 | 20 |
|
17 |
| - key = f"{departure}-{arrival}".upper() |
18 |
| - return json.dumps(flights.get(key, {"error": "Flight not found"})) |
19 | 21 |
|
20 | 22 | async def run(model: str):
|
21 |
| - client = ollama.AsyncClient() |
22 |
| - # Initialize conversation with a user query |
23 |
| - messages = [{"role": "user", "content": "What is the flight time from New York (NYC) to Los Angeles (LAX)?"}] |
| 23 | + client = ollama.AsyncClient() |
| 24 | + # Initialize conversation with a user query |
| 25 | + messages = [{'role': 'user', 'content': 'What is the flight time from New York (NYC) to Los Angeles (LAX)?'}] |
24 | 26 |
|
25 |
| - # First API call: Send the query and function description to the model |
26 |
| - response = await client.chat( |
27 |
| - model=model, |
28 |
| - messages=messages, |
29 |
| - tools=[ |
30 |
| - { |
31 |
| - "type": "function", |
32 |
| - "function": { |
33 |
| - "name": "get_flight_times", |
34 |
| - "description": "Get the flight times between two cities", |
35 |
| - "parameters": { |
36 |
| - "type": "object", |
37 |
| - "properties": { |
38 |
| - "departure": { |
39 |
| - "type": "string", |
40 |
| - "description": "The departure city (airport code)", |
41 |
| - }, |
42 |
| - "arrival": { |
43 |
| - "type": "string", |
44 |
| - "description": "The arrival city (airport code)", |
45 |
| - }, |
46 |
| - }, |
47 |
| - "required": ["departure", "arrival"], |
48 |
| - }, |
49 |
| - }, |
| 27 | + # First API call: Send the query and function description to the model |
| 28 | + response = await client.chat( |
| 29 | + model=model, |
| 30 | + messages=messages, |
| 31 | + tools=[ |
| 32 | + { |
| 33 | + 'type': 'function', |
| 34 | + 'function': { |
| 35 | + 'name': 'get_flight_times', |
| 36 | + 'description': 'Get the flight times between two cities', |
| 37 | + 'parameters': { |
| 38 | + 'type': 'object', |
| 39 | + 'properties': { |
| 40 | + 'departure': { |
| 41 | + 'type': 'string', |
| 42 | + 'description': 'The departure city (airport code)', |
| 43 | + }, |
| 44 | + 'arrival': { |
| 45 | + 'type': 'string', |
| 46 | + 'description': 'The arrival city (airport code)', |
| 47 | + }, |
50 | 48 | },
|
51 |
| - ], |
52 |
| - ) |
53 |
| - |
54 |
| - # Add the model's response to the conversation history |
55 |
| - messages.append(response["message"]) |
| 49 | + 'required': ['departure', 'arrival'], |
| 50 | + }, |
| 51 | + }, |
| 52 | + }, |
| 53 | + ], |
| 54 | + ) |
56 | 55 |
|
57 |
| - # Check if the model decided to use the provided function |
58 |
| - if not response["message"].get("tool_calls"): |
59 |
| - print("The model didn't use the function. Its response was:") |
60 |
| - print(response["message"]["content"]) |
61 |
| - return |
| 56 | + # Add the model's response to the conversation history |
| 57 | + messages.append(response['message']) |
62 | 58 |
|
63 |
| - # Process function calls made by the model |
64 |
| - if response["message"].get("tool_calls"): |
65 |
| - available_functions = { |
66 |
| - "get_flight_times": get_flight_times, |
| 59 | + # Check if the model decided to use the provided function |
| 60 | + if not response['message'].get('tool_calls'): |
| 61 | + print("The model didn't use the function. Its response was:") |
| 62 | + print(response['message']['content']) |
| 63 | + return |
| 64 | + |
| 65 | + # Process function calls made by the model |
| 66 | + if response['message'].get('tool_calls'): |
| 67 | + available_functions = { |
| 68 | + 'get_flight_times': get_flight_times, |
| 69 | + } |
| 70 | + for tool in response['message']['tool_calls']: |
| 71 | + function_to_call = available_functions[tool['function']['name']] |
| 72 | + function_response = function_to_call(tool['function']['arguments']['departure'], tool['function']['arguments']['arrival']) |
| 73 | + # Add function response to the conversation |
| 74 | + messages.append( |
| 75 | + { |
| 76 | + 'role': 'tool', |
| 77 | + 'content': function_response, |
67 | 78 | }
|
68 |
| - for tool in response["message"]["tool_calls"]: |
69 |
| - function_to_call = available_functions[tool["function"]["name"]] |
70 |
| - function_response = function_to_call( |
71 |
| - tool["function"]["arguments"]["departure"], |
72 |
| - tool["function"]["arguments"]["arrival"] |
73 |
| - ) |
74 |
| - # Add function response to the conversation |
75 |
| - messages.append({ |
76 |
| - "role": "tool", |
77 |
| - "content": function_response, |
78 |
| - }) |
| 79 | + ) |
| 80 | + |
| 81 | + # Second API call: Get final response from the model |
| 82 | + final_response = await client.chat(model=model, messages=messages) |
| 83 | + print(final_response['message']['content']) |
79 | 84 |
|
80 |
| - # Second API call: Get final response from the model |
81 |
| - final_response = await client.chat(model=model,messages=messages) |
82 |
| - print(final_response["message"]["content"]) |
83 | 85 |
|
84 | 86 | # Run the async function
|
85 |
| -asyncio.run(run("mistral")) |
| 87 | +asyncio.run(run('mistral')) |
0 commit comments