Skip to content

Commit c2b737d

Browse files
committed
fixes #28
1 parent 1c326f0 commit c2b737d

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

00_core.ipynb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@
268268
"name": "stdout",
269269
"output_type": "stream",
270270
"text": [
271-
"Response(id='resp_68950697c804819f941747ba366bd44d06882e5f785f5932', created_at=1754597015.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)\n"
271+
"Response(id='resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55', created_at=1754612164.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)\n"
272272
]
273273
}
274274
],
@@ -297,7 +297,7 @@
297297
"@patch\n",
298298
"def _repr_markdown_(self:Response):\n",
299299
" det = '\\n- '.join(f'{k}: {v}' for k,v in dict(self).items())\n",
300-
" res = nested_idx(self, 'output', 0, 'content', 0, 'text')\n",
300+
" res = self.output_text\n",
301301
" if not res: return f\"- {det}\"\n",
302302
" return f\"\"\"{res}\n",
303303
"\n",
@@ -321,15 +321,15 @@
321321
"\n",
322322
"<details>\n",
323323
"\n",
324-
"- id: resp_68950697c804819f941747ba366bd44d06882e5f785f5932\n",
325-
"- created_at: 1754597015.0\n",
324+
"- id: resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55\n",
325+
"- created_at: 1754612164.0\n",
326326
"- error: None\n",
327327
"- incomplete_details: None\n",
328328
"- instructions: None\n",
329329
"- metadata: {}\n",
330330
"- model: gpt-4.1-mini-2025-04-14\n",
331331
"- object: response\n",
332-
"- output: [ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]\n",
332+
"- output: [ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')]\n",
333333
"- parallel_tool_calls: True\n",
334334
"- temperature: 1.0\n",
335335
"- tool_choice: auto\n",
@@ -355,7 +355,7 @@
355355
"</details>"
356356
],
357357
"text/plain": [
358-
"Response(id='resp_68950697c804819f941747ba366bd44d06882e5f785f5932', created_at=1754597015.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_6895069889c8819fa0b2707b022db17406882e5f785f5932', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)"
358+
"Response(id='resp_689541c42db881a29afcdd68b26f9e7b07eca96f5a70ff55', created_at=1754612164.0, error=None, incomplete_details=None, instructions=None, metadata={}, model='gpt-4.1-mini-2025-04-14', object='response', output=[ResponseOutputMessage(id='msg_689541c51e3c81a2a85b561c6840cab707eca96f5a70ff55', content=[ResponseOutputText(annotations=[], text='Hi Jeremy! How can I assist you today?', type='output_text', logprobs=[])], role='assistant', status='completed', type='message')], parallel_tool_calls=True, temperature=1.0, tool_choice='auto', tools=[], top_p=1.0, background=False, max_output_tokens=100, max_tool_calls=None, previous_response_id=None, prompt=None, prompt_cache_key=None, reasoning=Reasoning(effort=None, generate_summary=None, summary=None), safety_identifier=None, service_tier='default', status='completed', text=ResponseTextConfig(format=ResponseFormatText(type='text'), verbosity='medium'), top_logprobs=0, truncation='disabled', usage=In: 9; Out: 11; Total: 20, user=None, store=True)"
359359
]
360360
},
361361
"execution_count": null,

cosette/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def can_set_temp(m): return m in has_temp_models
6161
@patch
6262
def _repr_markdown_(self:Response):
6363
det = '\n- '.join(f'{k}: {v}' for k,v in dict(self).items())
64-
res = nested_idx(self, 'output', 0, 'content', 0, 'text')
64+
res = self.output_text
6565
if not res: return f"- {det}"
6666
return f"""{res}
6767

settings.ini

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ version = 0.2.2
55
min_python = 3.9
66
license = apache2
77
black_formatting = False
8-
requirements = fastcore>=1.8.4 openai>=1.99.2 msglm>=0.0.8 toolslm>=0.3.0
8+
requirements = fastcore>=1.8.4 openai>=1.99.3 msglm>=0.0.8 toolslm>=0.3.0
99
dev_requirements = ipython
1010
doc_path = _docs
1111
lib_path = cosette

0 commit comments

Comments
 (0)