from IPython.display import display,ImageThe async version
Setup
Async SDK
model = models[1]
cli = AsyncAnthropic()prompt = "I'm Jeremy"
m = mk_msg(prompt)
r = await cli.messages.create(messages=[m], model=model, max_tokens=100)
rHello Jeremy! Nice to meet you. How can I help you today?
- id:
msg_01WSoScnCMw2HpwQuNEkGpaw - content:
[{'citations': None, 'text': 'Hello Jeremy! Nice to meet you. How can I help you today?', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 10, 'output_tokens': 18, 'server_tool_use': None, 'service_tier': 'standard'}
msgs = mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs[{'role': 'user', 'content': "I'm Jeremy"},
{'role': 'assistant',
'content': [TextBlock(citations=None, text='Hello Jeremy! Nice to meet you. How can I help you today?', type='text')]},
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
await cli.messages.create(messages=msgs, model=model, max_tokens=200)Of course! Your name is Jeremy.
- id:
msg_01M6s6n5jPHq8fe4zkopaisc - content:
[{'citations': None, 'text': 'Of course! Your name is Jeremy.', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 42, 'output_tokens': 11, 'server_tool_use': None, 'service_tier': 'standard'}
AsyncClient
AsyncClient (model, cli=None, log=False, cache=False)
Async Anthropic messages client.
Exported source
class AsyncClient(Client):
def __init__(self, model, cli=None, log=False, cache=False):
"Async Anthropic messages client."
super().__init__(model,cli,log,cache)
if not cli: self.c = AsyncAnthropic(default_headers={'anthropic-beta': 'prompt-caching-2024-07-31'})c = AsyncClient(model)c._r(r)
c.useIn: 10; Out: 18; Cache create: 0; Cache read: 0; Total Tokens: 28; Search: 0
AsyncClient.__call__
AsyncClient.__call__ (msgs:list, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tools:Optional[list]=None, tool_choice:Optional[dict]=None, cb=None, cli=None, log=False, cache=False)
Make an async call to Claude.
| Type | Default | Details | |
|---|---|---|---|
| msgs | list | List of messages in the dialog | |
| sp | str | The system prompt | |
| temp | int | 0 | Temperature |
| maxtok | int | 4096 | Maximum tokens |
| maxthinktok | int | 0 | Maximum thinking tokens |
| prefill | str | Optional prefill to pass to Claude as start of its response | |
| stream | bool | False | Stream response? |
| stop | NoneType | None | Stop sequence |
| tools | Optional | None | List of tools to make available to Claude |
| tool_choice | Optional | None | Optionally force use of some tool |
| cb | NoneType | None | Callback to pass result to when complete |
| cli | NoneType | None | |
| log | bool | False | |
| cache | bool | False |
Exported source
@asave_iter
async def _astream(o, cm, prefill, cb):
async with cm as s:
yield prefill
async for x in s.text_stream: yield x
o.value = await s.get_final_message()
await cb(o.value)Exported source
@patch
@delegates(Client)
async def __call__(self:AsyncClient,
msgs:list, # List of messages in the dialog
sp='', # The system prompt
temp=0, # Temperature
maxtok=4096, # Maximum tokens
maxthinktok=0, # Maximum thinking tokens
prefill='', # Optional prefill to pass to Claude as start of its response
stream:bool=False, # Stream response?
stop=None, # Stop sequence
tools:Optional[list]=None, # List of tools to make available to Claude
tool_choice:Optional[dict]=None, # Optionally force use of some tool
cb=None, # Callback to pass result to when complete
**kwargs):
"Make an async call to Claude."
msgs,kwargs = self._precall(msgs, prefill, sp, temp, maxtok, maxthinktok, stream,
stop, tools, tool_choice, kwargs)
m = self.c.messages
f = m.stream if stream else m.create
res = f(model=self.model, messages=msgs, **kwargs)
async def _cb(v):
self._log(v, prefill=prefill, msgs=msgs, **kwargs)
if cb: await cb(v)
if stream: return _astream(res, prefill, _cb)
res = await res
try: return res
finally: await _cb(res)c = AsyncClient(model, log=True)
c.useIn: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0
c.model = models[1]
await c('Hi')Hello! How can I help you today?
- id:
msg_01QrsbLFyNfSdHD5u3m8T8qE - content:
[{'citations': None, 'text': 'Hello! How can I help you today?', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 8, 'output_tokens': 12, 'server_tool_use': None, 'service_tier': 'standard'}
c.useIn: 8; Out: 12; Cache create: 0; Cache read: 0; Total Tokens: 20; Search: 0
q = "Very concisely, what is the meaning of life?"
pref = 'According to Douglas Adams,'
await c(q, prefill=pref)According to Douglas Adams, it’s 42.
More seriously: Create meaning through connections, growth, and contribution to something beyond yourself.
- id:
msg_01XaJGwfM8wwKn3GvByAMa6Y - content:
[{'citations': None, 'text': "According to Douglas Adams, it's 42.\n\nMore seriously: Create meaning through connections, growth, and contribution to something beyond yourself.", 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 27, 'server_tool_use': None, 'service_tier': 'standard'}
c.useIn: 32; Out: 39; Cache create: 0; Cache read: 0; Total Tokens: 71; Search: 0
r = await c(q, prefill=pref, stream=True)
async for o in r: print(o, end='')
r.valueAccording to Douglas Adams, it's 42.
More seriously: Create meaning through connections, growth, and contribution to something beyond yourself.
According to Douglas Adams, it’s 42.
More seriously: Create meaning through connections, growth, and contribution to something beyond yourself.
- id:
msg_01Svm4SGJ9dUBVFxHuKrTzev - content:
[{'citations': None, 'text': "According to Douglas Adams, it's 42.\n\nMore seriously: Create meaning through connections, growth, and contribution to something beyond yourself.", 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 27, 'server_tool_use': None, 'service_tier': 'standard'}
c.useIn: 56; Out: 66; Cache create: 0; Cache read: 0; Total Tokens: 122; Search: 0
def sums(
a:int, # First thing to sum
b:int=1 # Second thing to sum
) -> int: # The sum of the inputs
"Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + ba,b = 604542,6458932
pr = f"What is {a}+{b}?"
sp = "You are a summing expert."tools=[sums]
choice = mk_tool_choice('sums')
choice{'type': 'tool', 'name': 'sums'}
msgs = mk_msgs(pr)
r = await c(msgs, sp=sp, tools=tools, tool_choice=choice)
rToolUseBlock(id=‘toolu_01TKnF1t9kRoGDwTPUYUnwpS’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
- id:
msg_01UtME5uDv4RamrTZr8HB7Vv - content:
[{'id': 'toolu_01TKnF1t9kRoGDwTPUYUnwpS', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
tool_use - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 712, 'output_tokens': 57, 'server_tool_use': None, 'service_tier': 'standard'}
mk_funcres_async
mk_funcres_async (fc, ns)
Given tool use block fc, get tool result, and create a tool_result response.
mk_toolres_async
mk_toolres_async (r:collections.abc.Mapping, ns:Optional[collections.abc.Mapping]=None)
Create a tool_result message from response r.
| Type | Default | Details | |
|---|---|---|---|
| r | Mapping | Tool use request response from Claude | |
| ns | Optional | None | Namespace to search for tools |
tr = await mk_toolres_async(r, ns=globals())
trFinding the sum of 604542 and 6458932
[{'role': 'assistant',
'content': [{'id': 'toolu_01TKnF1t9kRoGDwTPUYUnwpS',
'input': {'a': 604542, 'b': 6458932},
'name': 'sums',
'type': 'tool_use'}]},
{'role': 'user',
'content': [{'type': 'tool_result',
'tool_use_id': 'toolu_01TKnF1t9kRoGDwTPUYUnwpS',
'content': '7063474'}]}]
msgs += tr
r = contents(await c(msgs, sp=sp, tools=sums))
r'The sum of 604542 + 6458932 = **7,063,474**'
Structured Output
AsyncClient.structured
AsyncClient.structured (msgs:list, tools:Optional[list]=None, ns:Optional[collections.abc.Mapping]=None, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tool_choice:Optional[dict]=None, cb=None, metadata:MetadataParam|Omit=<anthropic.Omit object at 0x7f2a47114b50>, service_tier:"Literal[ 'auto','standard_only']|Omit"=<anthropic.Omit object at 0x7f2a47114b50>, stop_sequences:Sequenc eNotStr[str]|Omit=<anthropic.Omit object at 0x7f2a47114b50>, system:Union[str,Iterable[TextBl ockParam]]|Omit=<anthropic.Omit object at 0x7f2a47114b50>, temperature:float|Omit=<anthropic.Omit object at 0x7f2a47114b50>, thinking:ThinkingConfigParam|Omit=<anthropic.Omit object at 0x7f2a47114b50>, top_k:int|Omit=<anthropic.Omit object at 0x7f2a47114b50>, top_p:float|Omit=<anthropic.Omit object at 0x7f2a47114b50>, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Ti meout|None|NotGiven=NOT_GIVEN)
Return the value of all tool calls (generally used for structured outputs)
| Type | Default | Details | |
|---|---|---|---|
| msgs | list | List of messages in the dialog | |
| tools | Optional | None | List of tools to make available to Claude |
| ns | Optional | None | Namespace to search for tools |
| sp | str | The system prompt | |
| temp | int | 0 | Temperature |
| maxtok | int | 4096 | Maximum tokens |
| maxthinktok | int | 0 | Maximum thinking tokens |
| prefill | str | Optional prefill to pass to Claude as start of its response | |
| stream | bool | False | Stream response? |
| stop | NoneType | None | Stop sequence |
| tool_choice | Optional | None | Optionally force use of some tool |
| cb | NoneType | None | Callback to pass result to when complete |
| metadata | MetadataParam | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| service_tier | Literal[‘auto’, ‘standard_only’] | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| stop_sequences | SequenceNotStr[str] | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| system | Union[str, Iterable[TextBlockParam]] | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| temperature | float | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| thinking | ThinkingConfigParam | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| top_k | int | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| top_p | float | Omit | <anthropic.Omit object at 0x7f2a47114b50> | |
| extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
| extra_query | Query | None | None | |
| extra_body | Body | None | None | |
| timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
await c.structured(pr, sums)Finding the sum of 604542 and 6458932
[7063474]
cToolUseBlock(id=‘toolu_01HfWqUFm529AfFNnNx3U7FD’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
| Metric | Count | Cost (USD) |
|---|---|---|
| Input tokens | 2,180 | 0.006540 |
| Output tokens | 204 | 0.003060 |
| Cache tokens | 0 | 0.000000 |
| Server tool use | 0 | 0.000000 |
| Total | 2,384 | $0.009600 |
AsyncChat
AsyncChat
AsyncChat (model:Optional[str]=None, cli:Optional[claudette.core.Client]=None, sp='', tools:Optional[list]=None, temp=0, cont_pr:Optional[str]=None, cache:bool=False, hist:list=None, ns:Optional[collections.abc.Mapping]=None)
Anthropic async chat client.
| Type | Default | Details | |
|---|---|---|---|
| model | Optional | None | Model to use (leave empty if passing cli) |
| cli | Optional | None | Client to use (leave empty if passing model) |
| sp | str | ||
| tools | Optional | None | |
| temp | int | 0 | |
| cont_pr | Optional | None | |
| cache | bool | False | |
| hist | list | None | |
| ns | Optional | None |
Exported source
@delegates()
class AsyncChat(Chat):
def __init__(self,
model:Optional[str]=None, # Model to use (leave empty if passing `cli`)
cli:Optional[Client]=None, # Client to use (leave empty if passing `model`)
**kwargs):
"Anthropic async chat client."
super().__init__(model, cli, **kwargs)
if not cli: self.c = AsyncClient(model)sp = "Always use tools if available, and calculations are requested."
chat = AsyncChat(model, sp=sp)
chat.c.use, chat.h(In: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0, [])
AsyncChat.__call__
AsyncChat.__call__ (pr=None, temp=None, maxtok=4096, maxthinktok=0, stream=False, prefill='', tool_choice:Union[str,bool,dict,NoneType]=None, **kw)
Call self as a function.
| Type | Default | Details | |
|---|---|---|---|
| pr | NoneType | None | Prompt / message |
| temp | NoneType | None | Temperature |
| maxtok | int | 4096 | Maximum tokens |
| maxthinktok | int | 0 | Maximum thinking tokens |
| stream | bool | False | Stream response? |
| prefill | str | Optional prefill to pass to Claude as start of its response | |
| tool_choice | Union | None | Optionally force use of some tool |
| kw | VAR_KEYWORD |
Exported source
@patch
async def _append_pr(self:AsyncChat, pr=None):
prev_role = nested_idx(self.h, -1, 'role') if self.h else 'assistant' # First message should be 'user' if no history
if pr and prev_role == 'user': await self()
self._post_pr(pr, prev_role)Exported source
@patch
async def __call__(self:AsyncChat,
pr=None, # Prompt / message
temp=None, # Temperature
maxtok=4096, # Maximum tokens
maxthinktok=0, # Maximum thinking tokens
stream=False, # Stream response?
prefill='', # Optional prefill to pass to Claude as start of its response
tool_choice:Optional[Union[str,bool,dict]]=None, # Optionally force use of some tool
**kw):
if temp is None: temp=self.temp
await self._append_pr(pr)
async def _cb(v):
self.last = await mk_toolres_async(v, ns=self.ns)
self.h += self.last
return await self.c(self.h, stream=stream, prefill=prefill, sp=self.sp, temp=temp, maxtok=maxtok, maxthinktok=maxthinktok, tools=self.tools, tool_choice=tool_choice, cb=_cb, **kw)await chat("I'm Jeremy")
await chat("What's my name?")Your name is Jeremy! You told me that at the start of our conversation.
- id:
msg_01V997mDpucmU87zhnF5dydS - content:
[{'citations': None, 'text': 'Your name is Jeremy! You told me that at the start of our conversation.', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 47, 'output_tokens': 19, 'server_tool_use': None, 'service_tier': 'standard'}
q = "Very concisely, what is the meaning of life?"
pref = 'According to Douglas Adams,'
await chat(q, prefill=pref)According to Douglas Adams, it’s 42.
More seriously: to find purpose through connection, growth, and contributing something meaningful to others.
- id:
msg_01HLoW2YqZkemPeqQGgrTqxN - content:
[{'citations': None, 'text': "According to Douglas Adams, it's 42. \n\nMore seriously: to find purpose through connection, growth, and contributing something meaningful to others.", 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 86, 'output_tokens': 29, 'server_tool_use': None, 'service_tier': 'standard'}
chat = AsyncChat(model, sp=sp)
r = await chat("I'm Jeremy", stream=True)
async for o in r: print(o, end='')
r.valueHello Jeremy! Nice to meet you. How can I help you today?
Hello Jeremy! Nice to meet you. How can I help you today?
- id:
msg_01AggZKp4AKoGg7dzBE2qREi - content:
[{'citations': None, 'text': 'Hello Jeremy! Nice to meet you. How can I help you today?', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 21, 'output_tokens': 18, 'server_tool_use': None, 'service_tier': 'standard'}
pr = f"What is {a}+{b}?"
chat = AsyncChat(model, sp=sp, tools=[sums])
r = await chat(pr)
rFinding the sum of 604542 and 6458932
ToolUseBlock(id=‘toolu_01JqWJBzKkm78XjarRXvCy9W’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
- id:
msg_01MokGeSj7HcFxZMsmDPk674 - content:
[{'id': 'toolu_01JqWJBzKkm78XjarRXvCy9W', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
tool_use - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 624, 'output_tokens': 72, 'server_tool_use': None, 'service_tier': 'standard'}
await chat()The sum of 604542 + 6458932 = 7,063,474
- id:
msg_015ZSkMnf9gBRUf2u6mQz1VZ - content:
[{'citations': None, 'text': 'The sum of 604542 + 6458932 = **7,063,474**', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 712, 'output_tokens': 24, 'server_tool_use': None, 'service_tier': 'standard'}
fn = Path('samples/puppy.jpg')
img = fn.read_bytes()
Image(img)
q = "In brief, what color flowers are in this image?"
msg = mk_msg([img, q])
await c([msg])The flowers in this image are purple.
- id:
msg_01JN8RWq8Mhwkukqg9pwFxpa - content:
[{'citations': None, 'text': 'The flowers in this image are **purple**.', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 110, 'output_tokens': 12, 'server_tool_use': None, 'service_tier': 'standard'}
Add cache=True to automatically add to Claude’s KV cache.
chat = AsyncChat(model, sp=sp, cache=True)
await chat("Lorem ipsum dolor sit amet" * 150)I notice you’ve sent me the phrase “Lorem ipsum dolor sit amet” repeated many times. “Lorem ipsum” is placeholder text commonly used in design and publishing to demonstrate visual form without meaningful content.
Is there something specific I can help you with? For example: - Do you have a question or task you’d like assistance with? - Were you testing something? - Did you mean to send different content?
I’m here to help with a wide range of tasks including answering questions, analysis, writing, problem-solving, and more. Please let me know what you need!
- id:
msg_01NyGkjEfKDhBEjDVjYALzXX - content:
[{'citations': None, 'text': 'I notice you\'ve sent me the phrase "Lorem ipsum dolor sit amet" repeated many times. "Lorem ipsum" is placeholder text commonly used in design and publishing to demonstrate visual form without meaningful content.\n\nIs there something specific I can help you with? For example:\n- Do you have a question or task you\'d like assistance with?\n- Were you testing something?\n- Did you mean to send different content?\n\nI\'m here to help with a wide range of tasks including answering questions, analysis, writing, problem-solving, and more. Please let me know what you need!', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 1063}, 'cache_creation_input_tokens': 1063, 'cache_read_input_tokens': 0, 'input_tokens': 3, 'output_tokens': 125, 'server_tool_use': None, 'service_tier': 'standard'}
chat.useIn: 3; Out: 125; Cache create: 1063; Cache read: 0; Total Tokens: 1191; Search: 0
In this followup call, nearly all the tokens are cached, so the only the new additional tokens are charged at the full rate.
await chat("Whoops, sorry about that!")No problem at all! These things happen. 😊
How can I help you today? Feel free to ask me anything or let me know what you’re working on!
- id:
msg_01HQnPcituHpsSC5fsN6Qugq - content:
[{'citations': None, 'text': "No problem at all! These things happen. 😊\n\nHow can I help you today? Feel free to ask me anything or let me know what you're working on!", 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 136}, 'cache_creation_input_tokens': 136, 'cache_read_input_tokens': 1063, 'input_tokens': 3, 'output_tokens': 39, 'server_tool_use': None, 'service_tier': 'standard'}
chat.useIn: 6; Out: 164; Cache create: 1199; Cache read: 1063; Total Tokens: 2432; Search: 0
Extended Thinking
Let’s call the model without extended thinking enabled.
chat = AsyncChat(model)
await chat("Write a sentence about Python!")Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development to data science and machine learning.
- id:
msg_01VrHou5ydkh9c3ypukvNoCA - content:
[{'citations': None, 'text': 'Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development to data science and machine learning.', 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 13, 'output_tokens': 38, 'server_tool_use': None, 'service_tier': 'standard'}
Now, let’s call the model with extended thinking enabled.
r = await chat("Write a sentence about Python!", maxthinktok=1024)
rPython’s extensive standard library and vast ecosystem of third-party packages make it an excellent choice for beginners and experienced developers alike.
Thinking
The user is asking me to write another sentence about Python. I’ll provide a different sentence this time to give them varied information.- id:
msg_01BzPZRU8hp3x4Rrv5gQiRVQ - content:
[{'signature': 'ErMCCkYICRgCKkDkES/w96cekkCn0ELoX3z+fFhvFUpSKOz4rtski52b6YaZbiUX6VXwW10/l/ZWtAWBXX6fNJ1sS2fCG1iP4KKiEgy0cpkiDex1h/6N/cAaDLtJ/s8siMsHfZ+2kiIwF6B33yCXMmE9Io1fGIpq3GKUQ/7wJuL/6OWumsarW6Fk50cjQRiKi9b4+bkdYNf9KpoBlHSDyEd4IEFTqvz+3k+YmmZFD0cz/7CnZQvYC4FuLzLflDa+afAWOLQTxOTJEiCNXEthPEwBBkkFm/wjeITw5V4QUVbH8fAVOguFQlLh03vEUFeTcsJ5ZEhRBR11tcBXuRaKB55PEawKIMoS0tYRkIt0jcKOkwIgDXfFUl+w/K9PipK+m/uxTn6w91NWXSvvinycMiHN0KREFhgB', 'thinking': "The user is asking me to write another sentence about Python. I'll provide a different sentence this time to give them varied information.", 'type': 'thinking'}, {'citations': None, 'text': "Python's extensive standard library and vast ecosystem of third-party packages make it an excellent choice for beginners and experienced developers alike.", 'type': 'text'}] - model:
claude-sonnet-4-5-20250929 - role:
assistant - stop_reason:
end_turn - stop_sequence:
None - type:
message - usage:
{'cache_creation': {'ephemeral_1h_input_tokens': 0, 'ephemeral_5m_input_tokens': 0}, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 88, 'output_tokens': 64, 'server_tool_use': None, 'service_tier': 'standard'}
r.content[ThinkingBlock(signature='ErMCCkYICRgCKkDkES/w96cekkCn0ELoX3z+fFhvFUpSKOz4rtski52b6YaZbiUX6VXwW10/l/ZWtAWBXX6fNJ1sS2fCG1iP4KKiEgy0cpkiDex1h/6N/cAaDLtJ/s8siMsHfZ+2kiIwF6B33yCXMmE9Io1fGIpq3GKUQ/7wJuL/6OWumsarW6Fk50cjQRiKi9b4+bkdYNf9KpoBlHSDyEd4IEFTqvz+3k+YmmZFD0cz/7CnZQvYC4FuLzLflDa+afAWOLQTxOTJEiCNXEthPEwBBkkFm/wjeITw5V4QUVbH8fAVOguFQlLh03vEUFeTcsJ5ZEhRBR11tcBXuRaKB55PEawKIMoS0tYRkIt0jcKOkwIgDXfFUl+w/K9PipK+m/uxTn6w91NWXSvvinycMiHN0KREFhgB', thinking="The user is asking me to write another sentence about Python. I'll provide a different sentence this time to give them varied information.", type='thinking'),
TextBlock(citations=None, text="Python's extensive standard library and vast ecosystem of third-party packages make it an excellent choice for beginners and experienced developers alike.", type='text')]