from IPython.display import display,Image
The async version
Setup
Async SDK
= models[1]
model = AsyncAnthropic() cli
= "I'm Jeremy"
prompt = mk_msg(prompt)
m = await cli.messages.create(messages=[m], model=model, max_tokens=100)
r r
Nice to meet you, Jeremy! How are you doing today? Is there anything I can help you with?
- id:
msg_01T8U2hnk8veDSzKVouiYo3r
- content:
[{'citations': None, 'text': 'Nice to meet you, Jeremy! How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 10, 'output_tokens': 25, 'server_tool_use': None, 'service_tier': 'standard'}
= mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs msgs
[{'role': 'user', 'content': "I'm Jeremy"},
{'role': 'assistant',
'content': [TextBlock(citations=None, text='Nice to meet you, Jeremy! How are you doing today? Is there anything I can help you with?', type='text')]},
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
await cli.messages.create(messages=msgs, model=model, max_tokens=200)
Your name is Jeremy - you introduced yourself to me just a moment ago at the start of our conversation.
- id:
msg_01U86Rv9E4EqNs3DPKfsqUmC
- content:
[{'citations': None, 'text': 'Your name is Jeremy - you introduced yourself to me just a moment ago at the start of our conversation.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 49, 'output_tokens': 24, 'server_tool_use': None, 'service_tier': 'standard'}
AsyncClient
AsyncClient (model, cli=None, log=False, cache=False)
Async Anthropic messages client.
Exported source
class AsyncClient(Client):
def __init__(self, model, cli=None, log=False, cache=False):
"Async Anthropic messages client."
super().__init__(model,cli,log,cache)
if not cli: self.c = AsyncAnthropic(default_headers={'anthropic-beta': 'prompt-caching-2024-07-31'})
= AsyncClient(model) c
c._r(r) c.use
In: 10; Out: 25; Cache create: 0; Cache read: 0; Total Tokens: 35; Search: 0
AsyncClient.__call__
AsyncClient.__call__ (msgs:list, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tools:Optional[list]=None, tool_choice:Optional[dict]=None, cb=None, cli=None, log=False, cache=False)
Make an async call to Claude.
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tools | Optional | None | List of tools to make available to Claude |
tool_choice | Optional | None | Optionally force use of some tool |
cb | NoneType | None | Callback to pass result to when complete |
cli | NoneType | None | |
log | bool | False | |
cache | bool | False |
Exported source
@asave_iter
async def _astream(o, cm, prefill, cb):
async with cm as s:
yield prefill
async for x in s.text_stream: yield x
= await s.get_final_message()
o.value cb(o.value)
Exported source
@patch
@delegates(Client)
async def __call__(self:AsyncClient,
list, # List of messages in the dialog
msgs:='', # The system prompt
sp=0, # Temperature
temp=4096, # Maximum tokens
maxtok=0, # Maximum thinking tokens
maxthinktok='', # Optional prefill to pass to Claude as start of its response
prefillbool=False, # Stream response?
stream:=None, # Stop sequence
stoplist]=None, # List of tools to make available to Claude
tools:Optional[dict]=None, # Optionally force use of some tool
tool_choice:Optional[=None, # Callback to pass result to when complete
cb**kwargs):
"Make an async call to Claude."
= self._precall(msgs, prefill, sp, temp, maxtok, maxthinktok, stream,
msgs,kwargs
stop, tools, tool_choice, kwargs)= self.c.messages
m = m.stream if stream else m.create
f = f(model=self.model, messages=msgs, **kwargs)
res def _cb(v):
self._log(v, prefill=prefill, msgs=msgs, **kwargs)
if cb: cb(v)
if stream: return _astream(res, prefill, _cb)
= await res
res try: return res
finally: _cb(res)
= AsyncClient(model, log=True)
c c.use
In: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0
= models[1]
c.model await c('Hi')
Hello! How are you doing today? Is there anything I can help you with?
- id:
msg_01QhNvAivy9FdrQZkpzuPUio
- content:
[{'citations': None, 'text': 'Hello! How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 8, 'output_tokens': 20, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 8; Out: 20; Cache create: 0; Cache read: 0; Total Tokens: 28; Search: 0
= "Very concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await c(q, prefill=pref)
According to Douglas Adams,42. But seriously, there’s no universal answer - it’s deeply personal. Common themes include: finding purpose, connecting with others, growing as a person, and creating meaning through your choices and relationships.
- id:
msg_01RSKwPnJtu9CCfWhCoFuy9M
- content:
[{'citations': None, 'text': "According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Common themes include: finding purpose, connecting with others, growing as a person, and creating meaning through your choices and relationships.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 44, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 32; Out: 64; Cache create: 0; Cache read: 0; Total Tokens: 96; Search: 0
= await c(q, prefill=pref, stream=True)
r async for o in r: print(o, end='')
r.value
According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Common themes include: finding purpose, connecting with others, growing as a person, and creating meaning through your choices and relationships.
According to Douglas Adams,42. But seriously, there’s no universal answer - it’s deeply personal. Common themes include: finding purpose, connecting with others, growing as a person, and creating meaning through your choices and relationships.
- id:
msg_01DcueRh1YPgXwdEWLNmLjnV
- content:
[{'citations': None, 'text': "According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Common themes include: finding purpose, connecting with others, growing as a person, and creating meaning through your choices and relationships.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 44, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 56; Out: 108; Cache create: 0; Cache read: 0; Total Tokens: 164; Search: 0
def sums(
int, # First thing to sum
a:int=1 # Second thing to sum
b:-> int: # The sum of the inputs
) "Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + b
= 604542,6458932
a,b = f"What is {a}+{b}?"
pr = "You are a summing expert." sp
=[sums]
tools= mk_tool_choice('sums')
choice choice
{'type': 'tool', 'name': 'sums'}
= mk_msgs(pr)
msgs = await c(msgs, sp=sp, tools=tools, tool_choice=choice)
r r
ToolUseBlock(id=‘toolu_01SNmtdZP2u1gwB1Q5VDmeUd’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
- id:
msg_0141aUhAxoKavmDaRFUWSjgL
- content:
[{'id': 'toolu_01SNmtdZP2u1gwB1Q5VDmeUd', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 439, 'output_tokens': 57, 'server_tool_use': None, 'service_tier': 'standard'}
mk_funcres_async
mk_funcres_async (fc, ns)
Given tool use block fc
, get tool result, and create a tool_result response.
mk_toolres_async
mk_toolres_async (r:collections.abc.Mapping, ns:Optional[collections.abc.Mapping]=None, obj:Optional=None)
Create a tool_result
message from response r
.
Type | Default | Details | |
---|---|---|---|
r | Mapping | Tool use request response from Claude | |
ns | Optional | None | Namespace to search for tools |
obj | Optional | None | Class to search for tools |
= await mk_toolres_async(r, ns=globals())
tr tr
Finding the sum of 604542 and 6458932
[{'role': 'assistant',
'content': [{'id': 'toolu_01SNmtdZP2u1gwB1Q5VDmeUd',
'input': {'a': 604542, 'b': 6458932},
'name': 'sums',
'type': 'tool_use'}]},
{'role': 'user',
'content': [{'type': 'tool_result',
'tool_use_id': 'toolu_01SNmtdZP2u1gwB1Q5VDmeUd',
'content': '7063474'}]}]
+= tr
msgs = contents(await c(msgs, sp=sp, tools=sums))
r r
'604542 + 6458932 = 7,063,474'
Structured Output
AsyncClient.structured
AsyncClient.structured (msgs:list, tools:Optional[list]=None, obj:Optional=None, ns:Optional[collections.abc.Mapping]=None, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tool_choice:Optional[dict]=None, cb=None, metadata:MetadataParam|NotGiven=NOT_GIVEN, servic e_tier:"Literal['auto','standard_only']|NotGiven" =NOT_GIVEN, stop_sequences:List[str]|NotGiven=NOT_GIVEN, syst em:Union[str,Iterable[TextBlockParam]]|NotGiven=N OT_GIVEN, temperature:float|NotGiven=NOT_GIVEN, thinking:ThinkingConfigParam|NotGiven=NOT_GIVEN, top_k:int|NotGiven=NOT_GIVEN, top_p:float|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Ti meout|None|NotGiven=NOT_GIVEN)
Return the value of all tool calls (generally used for structured outputs)
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
tools | Optional | None | List of tools to make available to Claude |
obj | Optional | None | Class to search for tools |
ns | Optional | None | Namespace to search for tools |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tool_choice | Optional | None | Optionally force use of some tool |
cb | NoneType | None | Callback to pass result to when complete |
metadata | MetadataParam | NotGiven | NOT_GIVEN | |
service_tier | Literal[‘auto’, ‘standard_only’] | NotGiven | NOT_GIVEN | |
stop_sequences | List[str] | NotGiven | NOT_GIVEN | |
system | Union[str, Iterable[TextBlockParam]] | NotGiven | NOT_GIVEN | |
temperature | float | NotGiven | NOT_GIVEN | |
thinking | ThinkingConfigParam | NotGiven | NOT_GIVEN | |
top_k | int | NotGiven | NOT_GIVEN | |
top_p | float | NotGiven | NOT_GIVEN | |
extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
extra_query | Query | None | None | |
extra_body | Body | None | None | |
timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
await c.structured(pr, sums)
Finding the sum of 604542 and 6458932
[7063474]
c
ToolUseBlock(id=‘toolu_01Ka54wKJ1vtdroZBQZfmwg3’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
Metric | Count | Cost (USD) |
---|---|---|
Input tokens | 1,451 | 0.004353 |
Output tokens | 241 | 0.003615 |
Cache tokens | 0 | 0.000000 |
Server tool use | 0 | 0.000000 |
Total | 1,692 | $0.007968 |
AsyncChat
AsyncChat
AsyncChat (model:Optional[str]=None, cli:Optional[claudette.core.Client]=None, sp='', tools:Optional[list]=None, temp=0, cont_pr:Optional[str]=None, cache:bool=False, hist:list=None, ns:Optional[collections.abc.Mapping]=None)
Anthropic async chat client.
Type | Default | Details | |
---|---|---|---|
model | Optional | None | Model to use (leave empty if passing cli ) |
cli | Optional | None | Client to use (leave empty if passing model ) |
sp | str | ||
tools | Optional | None | |
temp | int | 0 | |
cont_pr | Optional | None | |
cache | bool | False | |
hist | list | None | |
ns | Optional | None |
Exported source
@delegates()
class AsyncChat(Chat):
def __init__(self,
str]=None, # Model to use (leave empty if passing `cli`)
model:Optional[=None, # Client to use (leave empty if passing `model`)
cli:Optional[Client]**kwargs):
"Anthropic async chat client."
super().__init__(model, cli, **kwargs)
if not cli: self.c = AsyncClient(model)
= "Always use tools if available, and calculations are requested."
sp = AsyncChat(model, sp=sp)
chat chat.c.use, chat.h
(In: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0, [])
AsyncChat.__call__
AsyncChat.__call__ (pr=None, temp=None, maxtok=4096, maxthinktok=0, stream=False, prefill='', tool_choice:Union[str,bool,dict,NoneType]=None, **kw)
Call self as a function.
Type | Default | Details | |
---|---|---|---|
pr | NoneType | None | Prompt / message |
temp | NoneType | None | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
stream | bool | False | Stream response? |
prefill | str | Optional prefill to pass to Claude as start of its response | |
tool_choice | Union | None | Optionally force use of some tool |
kw | VAR_KEYWORD |
Exported source
@patch
async def _append_pr(self:AsyncChat, pr=None):
= nested_idx(self.h, -1, 'role') if self.h else 'assistant' # First message should be 'user' if no history
prev_role if pr and prev_role == 'user': await self()
self._post_pr(pr, prev_role)
Exported source
@patch
async def __call__(self:AsyncChat,
=None, # Prompt / message
pr=None, # Temperature
temp=4096, # Maximum tokens
maxtok=0, # Maximum thinking tokens
maxthinktok=False, # Stream response?
stream='', # Optional prefill to pass to Claude as start of its response
prefillstr,bool,dict]]=None, # Optionally force use of some tool
tool_choice:Optional[Union[**kw):
if temp is None: temp=self.temp
await self._append_pr(pr)
def _cb(v):
self.last = mk_toolres(v, ns=self.ns)
self.h += self.last
return await self.c(self.h, stream=stream, prefill=prefill, sp=self.sp, temp=temp, maxtok=maxtok, maxthinktok=maxthinktok, tools=self.tools, tool_choice=tool_choice, cb=_cb, **kw)
await chat("I'm Jeremy")
await chat("What's my name?")
Your name is Jeremy! You introduced yourself to me at the beginning of our conversation.
- id:
msg_01LBAgDauRDd4hmJrEcbK6cg
- content:
[{'citations': None, 'text': 'Your name is Jeremy! You introduced yourself to me at the beginning of our conversation.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 55, 'output_tokens': 20, 'server_tool_use': None, 'service_tier': 'standard'}
= "Very concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await chat(q, prefill=pref)
According to Douglas Adams,42. Philosophically, to find purpose, connect with others, and create meaning through our choices and experiences.
- id:
msg_016BbvGhV3U4Fc7ybsN935QP
- content:
[{'citations': None, 'text': 'According to Douglas Adams,42. Philosophically, to find purpose, connect with others, and create meaning through our choices and experiences.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 95, 'output_tokens': 26, 'server_tool_use': None, 'service_tier': 'standard'}
= AsyncChat(model, sp=sp)
chat = await chat("I'm Jeremy", stream=True)
r async for o in r: print(o, end='')
r.value
Hello Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?
Hello Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?
- id:
msg_01Kn8UQrWbXcQZeEbefKC4pj
- content:
[{'citations': None, 'text': 'Hello Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 21, 'output_tokens': 26, 'server_tool_use': None, 'service_tier': 'standard'}
= f"What is {a}+{b}?"
pr = AsyncChat(model, sp=sp, tools=[sums])
chat = await chat(pr)
r r
Finding the sum of 604542 and 6458932
I’ll calculate 604542 + 6458932 for you using the available sum function.
- id:
msg_013sQpQikXuVgTKhDnoi83hS
- content:
[{'citations': None, 'text': "I'll calculate 604542 + 6458932 for you using the available sum function.", 'type': 'text'}, {'id': 'toolu_01P97zYsHMmydyXu9LPA9FDE', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 441, 'output_tokens': 94, 'server_tool_use': None, 'service_tier': 'standard'}
await chat()
The sum of 604542 + 6458932 = 7,063,474.
- id:
msg_013ZwMoUg4DFWbrnzmFUGQwN
- content:
[{'citations': None, 'text': 'The sum of 604542 + 6458932 = 7,063,474.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 550, 'output_tokens': 24, 'server_tool_use': None, 'service_tier': 'standard'}
= Path('samples/puppy.jpg')
fn = fn.read_bytes()
img Image(img)
= "In brief, what color flowers are in this image?"
q = mk_msg([img, q])
msg await c([msg])
The flowers in this image are purple.
- id:
msg_01Xz1hWuG2mpzUKKSPm81PDF
- content:
[{'citations': None, 'text': 'The flowers in this image are purple.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 110, 'output_tokens': 11, 'server_tool_use': None, 'service_tier': 'standard'}
Add cache=True
to automatically add to Claude’s KV cache.
= AsyncChat(model, sp=sp, cache=True)
chat await chat("Lorem ipsum dolor sit amet" * 150)
I see you’ve shared a very long block of repeated “Lorem ipsum dolor sit amet” text. Lorem ipsum is commonly used as placeholder text in the printing and typesetting industry.
Is there something specific you’d like me to help you with regarding this text? For example, I could:
- Count how many times the phrase is repeated
- Help you format it differently
- Replace it with actual content
- Explain what Lorem ipsum is and its uses
- Help you with a different task entirely
Just let me know what you need assistance with!
- id:
msg_01PNcWvduh3btWkyvPDZLynx
- content:
[{'citations': None, 'text': 'I see you\'ve shared a very long block of repeated "Lorem ipsum dolor sit amet" text. Lorem ipsum is commonly used as placeholder text in the printing and typesetting industry.\n\nIs there something specific you\'d like me to help you with regarding this text? For example, I could:\n\n- Count how many times the phrase is repeated\n- Help you format it differently\n- Replace it with actual content\n- Explain what Lorem ipsum is and its uses\n- Help you with a different task entirely\n\nJust let me know what you need assistance with!', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 1062, 'cache_read_input_tokens': 0, 'input_tokens': 4, 'output_tokens': 118, 'server_tool_use': None, 'service_tier': 'standard'}
chat.use
In: 4; Out: 118; Cache create: 1062; Cache read: 0; Total Tokens: 1184; Search: 0
In this followup call, nearly all the tokens are cached, so the only the new additional tokens are charged at the full rate.
await chat("Whoops, sorry about that!")
No worries at all! That happens to everyone - copy/paste mishaps are pretty common.
Is there something I can actually help you with today?
- id:
msg_01NZBZAj4buBoYGjZyastWHa
- content:
[{'citations': None, 'text': 'No worries at all! That happens to everyone - copy/paste mishaps are pretty common. \n\nIs there something I can actually help you with today?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 129, 'cache_read_input_tokens': 1062, 'input_tokens': 4, 'output_tokens': 36, 'server_tool_use': None, 'service_tier': 'standard'}
chat.use
In: 8; Out: 154; Cache create: 1191; Cache read: 1062; Total Tokens: 2415; Search: 0
Extended Thinking
Let’s call the model without extended thinking enabled.
= AsyncChat(model)
chat await chat("Write a sentence about Python!")
Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development and data science to artificial intelligence and automation.
- id:
msg_01M8zZjMET8ZErMe9wBP3bXN
- content:
[{'citations': None, 'text': 'Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development and data science to artificial intelligence and automation.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 13, 'output_tokens': 40, 'server_tool_use': None, 'service_tier': 'standard'}
Now, let’s call the model with extended thinking enabled.
= await chat("Write a sentence about Python!", maxthinktok=1024)
r r
Python’s extensive library ecosystem and cross-platform compatibility make it an excellent choice for both beginners learning to code and experienced developers building complex applications.
Thinking
The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have missed my previous response. I’ll provide a new sentence about Python that covers different aspects than my previous response.- id:
msg_01KKDxpzWv4p7d3HNwoRyToT
- content:
[{'signature': 'EqkDCkYIBBgCKkB3jWSUnSIhD+6R32ANWGZV/TKTJYcjAhONNdVh9exilT0GgwOefigfAMPI7bl4IPE3+0+Uxno0x50rRfoqd//jEgxv9XVvRAf4fKyTPS4aDH0bqDSH9+wawaSnCiIwvJxqdhMu+hbtXI4QeYfMxXvqMaI90TM/csgW54F7oKRUvlkfS9BuGxp3KvwIxAjWKpACNXaPCx+R4xDqCAlY27FgRFSyKKwzt5tsD27s7hilQAYB89/e5cRI2QW95KBzXL/sCjiuad1DoCygrlarFdoqsBncUAxwwhgZOue8PKlSrPeAKyIdGOARocLp9/BflW83ZXFNrvojO26dNtDry+L2DFNm0JBp22IxIPRuEo3GD336dX7KOJTe7jimPfaUM7fIw2dphxUwD1IiX/UEeOHILt9R5JnTdsRYBzr0R+Tz/XBX4WStA60S78/DiYodZESejTV0hpHrXm8mFGHem35eL38D1Av0P8WQ0JWot3CA4XaPmZaNPz9fOMWE09jVcF0N5Dn5vmSTuRNoLe7hNDbyQc/CvFv5MAi9jIRH4+E/srsYAQ==', 'thinking': "The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have missed my previous response. I'll provide a new sentence about Python that covers different aspects than my previous response.", 'type': 'thinking'}, {'citations': None, 'text': "Python's extensive library ecosystem and cross-platform compatibility make it an excellent choice for both beginners learning to code and experienced developers building complex applications.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 90, 'output_tokens': 89, 'server_tool_use': None, 'service_tier': 'standard'}
r.content
[ThinkingBlock(signature='EqkDCkYIBBgCKkB3jWSUnSIhD+6R32ANWGZV/TKTJYcjAhONNdVh9exilT0GgwOefigfAMPI7bl4IPE3+0+Uxno0x50rRfoqd//jEgxv9XVvRAf4fKyTPS4aDH0bqDSH9+wawaSnCiIwvJxqdhMu+hbtXI4QeYfMxXvqMaI90TM/csgW54F7oKRUvlkfS9BuGxp3KvwIxAjWKpACNXaPCx+R4xDqCAlY27FgRFSyKKwzt5tsD27s7hilQAYB89/e5cRI2QW95KBzXL/sCjiuad1DoCygrlarFdoqsBncUAxwwhgZOue8PKlSrPeAKyIdGOARocLp9/BflW83ZXFNrvojO26dNtDry+L2DFNm0JBp22IxIPRuEo3GD336dX7KOJTe7jimPfaUM7fIw2dphxUwD1IiX/UEeOHILt9R5JnTdsRYBzr0R+Tz/XBX4WStA60S78/DiYodZESejTV0hpHrXm8mFGHem35eL38D1Av0P8WQ0JWot3CA4XaPmZaNPz9fOMWE09jVcF0N5Dn5vmSTuRNoLe7hNDbyQc/CvFv5MAi9jIRH4+E/srsYAQ==', thinking="The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have missed my previous response. I'll provide a new sentence about Python that covers different aspects than my previous response.", type='thinking'),
TextBlock(citations=None, text="Python's extensive library ecosystem and cross-platform compatibility make it an excellent choice for both beginners learning to code and experienced developers building complex applications.", type='text')]