|
268 | 268 | "name": "stdout",
|
269 | 269 | "output_type": "stream",
|
270 | 270 | "text": [
|
271 |
| - "Response(id='resp_68950697c804819f941747ba366bd44d06882e5f785f5932', created_at=1754597015.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)\n" |
| 271 | + "Response(id='resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55', created_at=1754612164.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)\n" |
272 | 272 | ]
|
273 | 273 | }
|
274 | 274 | ],
|
|
297 | 297 | "@patch\n",
|
298 | 298 | "def _repr_markdown_(self:Response):\n",
|
299 | 299 | " det = '\\n- '.join(f'{k}: {v}' for k,v in dict(self).items())\n",
|
300 |
| - " res = nested_idx(self, 'output', 0, 'content', 0, 'text')\n", |
| 300 | + " res = self.output_text\n", |
301 | 301 | " if not res: return f\"- {det}\"\n",
|
302 | 302 | " return f\"\"\"{res}\n",
|
303 | 303 | "\n",
|
|
321 | 321 | "\n",
|
322 | 322 | "<details>\n",
|
323 | 323 | "\n",
|
324 |
| - "- id: resp_68950697c804819f941747ba366bd44d06882e5f785f5932\n", |
325 |
| - "- created_at: 1754597015.0\n", |
| 324 | + "- id: resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55\n", |
| 325 | + "- created_at: 1754612164.0\n", |
326 | 326 | "- error: None\n",
|
327 | 327 | "- incomplete_details: None\n",
|
328 | 328 | "- instructions: None\n",
|
329 | 329 | "- metadata: {}\n",
|
330 | 330 | "- model: gpt-4.1-mini-2025-04-14\n",
|
331 | 331 | "- object: response\n",
|
332 |
| - "- output: [ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]\n", |
| 332 | + "- output: [ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]\n", |
333 | 333 | "- parallel_tool_calls: True\n",
|
334 | 334 | "- temperature: 1.0\n",
|
335 | 335 | "- tool_choice: auto\n",
|
|
355 | 355 | "</details>"
|
356 | 356 | ],
|
357 | 357 | "text/plain": [
|
358 |
| - "Response(id='resp_68950697c804819f941747ba366bd44d06882e5f785f5932', created_at=1754597015.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)" |
| 358 | + "Response(id='resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55', created_at=1754612164.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)" |
359 | 359 | ]
|
360 | 360 | },
|
361 | 361 | "execution_count": null,
|
|
0 commit comments