from IPython.display import display,Image
The async version
Setup
Async SDK
= models[1]
model = AsyncAnthropic() cli
= "I'm Jeremy"
prompt = mk_msg(prompt)
m = await cli.messages.create(messages=[m], model=model, max_tokens=100)
r r
Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?
- id:
msg_015XNtKiFEJF4HXdSeSYPtfs
- content:
[{'citations': None, 'text': 'Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 10, 'output_tokens': 26, 'server_tool_use': None, 'service_tier': 'standard'}
= mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs msgs
[{'role': 'user', 'content': "I'm Jeremy"},
{'role': 'assistant',
'content': [TextBlock(citations=None, text='Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?', type='text')]},
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
await cli.messages.create(messages=msgs, model=model, max_tokens=200)
Your name is Jeremy - you introduced yourself to me just a moment ago in your first message!
- id:
msg_01Ar5E7i3k8VZCEeRyD2fiZm
- content:
[{'citations': None, 'text': 'Your name is Jeremy - you introduced yourself to me just a moment ago in your first message!', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 50, 'output_tokens': 22, 'server_tool_use': None, 'service_tier': 'standard'}
AsyncClient
AsyncClient (model, cli=None, log=False, cache=False)
Async Anthropic messages client.
Exported source
class AsyncClient(Client):
def __init__(self, model, cli=None, log=False, cache=False):
"Async Anthropic messages client."
super().__init__(model,cli,log,cache)
if not cli: self.c = AsyncAnthropic(default_headers={'anthropic-beta': 'prompt-caching-2024-07-31'})
= AsyncClient(model) c
c._r(r) c.use
In: 10; Out: 26; Cache create: 0; Cache read: 0; Total Tokens: 36; Search: 0
AsyncClient.__call__
AsyncClient.__call__ (msgs:list, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tools:Optional[list]=None, tool_choice:Optional[dict]=None, cb=None, cli=None, log=False, cache=False)
Make an async call to Claude.
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tools | Optional | None | List of tools to make available to Claude |
tool_choice | Optional | None | Optionally force use of some tool |
cb | NoneType | None | Callback to pass result to when complete |
cli | NoneType | None | |
log | bool | False | |
cache | bool | False |
Exported source
@asave_iter
async def _astream(o, cm, prefill, cb):
async with cm as s:
yield prefill
async for x in s.text_stream: yield x
= await s.get_final_message()
o.value await cb(o.value)
Exported source
@patch
@delegates(Client)
async def __call__(self:AsyncClient,
list, # List of messages in the dialog
msgs:='', # The system prompt
sp=0, # Temperature
temp=4096, # Maximum tokens
maxtok=0, # Maximum thinking tokens
maxthinktok='', # Optional prefill to pass to Claude as start of its response
prefillbool=False, # Stream response?
stream:=None, # Stop sequence
stoplist]=None, # List of tools to make available to Claude
tools:Optional[dict]=None, # Optionally force use of some tool
tool_choice:Optional[=None, # Callback to pass result to when complete
cb**kwargs):
"Make an async call to Claude."
= self._precall(msgs, prefill, sp, temp, maxtok, maxthinktok, stream,
msgs,kwargs
stop, tools, tool_choice, kwargs)= self.c.messages
m = m.stream if stream else m.create
f = f(model=self.model, messages=msgs, **kwargs)
res async def _cb(v):
self._log(v, prefill=prefill, msgs=msgs, **kwargs)
if cb: await cb(v)
if stream: return _astream(res, prefill, _cb)
= await res
res try: return res
finally: await _cb(res)
= AsyncClient(model, log=True)
c c.use
In: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0
= models[1]
c.model await c('Hi')
Hello! How are you doing today? Is there anything I can help you with?
- id:
msg_019u8wbhEBNjkKamuAbZMp7F
- content:
[{'citations': None, 'text': 'Hello! How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 8, 'output_tokens': 20, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 8; Out: 20; Cache create: 0; Cache read: 0; Total Tokens: 28; Search: 0
= "Very concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await c(q, prefill=pref)
According to Douglas Adams,42. But seriously, there’s no universal answer - it’s deeply personal. Many find meaning through relationships, purpose, growth, helping others, or spiritual beliefs. The search itself might be part of the point.
- id:
msg_0169VpQ8KfSySZzFgoHW7Vf5
- content:
[{'citations': None, 'text': "According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Many find meaning through relationships, purpose, growth, helping others, or spiritual beliefs. The search itself might be part of the point.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 46, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 32; Out: 66; Cache create: 0; Cache read: 0; Total Tokens: 98; Search: 0
= await c(q, prefill=pref, stream=True)
r async for o in r: print(o, end='')
r.value
According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Many find meaning through relationships, purpose, growth, helping others, or spiritual beliefs. The search itself might be part of the point.
According to Douglas Adams,42. But seriously, there’s no universal answer - it’s deeply personal. Many find meaning through relationships, purpose, growth, helping others, or spiritual beliefs. The search itself might be part of the point.
- id:
msg_0157uvVorteH83HXU8mmtSUG
- content:
[{'citations': None, 'text': "According to Douglas Adams,42. But seriously, there's no universal answer - it's deeply personal. Many find meaning through relationships, purpose, growth, helping others, or spiritual beliefs. The search itself might be part of the point.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 46, 'server_tool_use': None, 'service_tier': 'standard'}
c.use
In: 56; Out: 112; Cache create: 0; Cache read: 0; Total Tokens: 168; Search: 0
def sums(
int, # First thing to sum
a:int=1 # Second thing to sum
b:-> int: # The sum of the inputs
) "Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + b
= 604542,6458932
a,b = f"What is {a}+{b}?"
pr = "You are a summing expert." sp
=[sums]
tools= mk_tool_choice('sums')
choice choice
{'type': 'tool', 'name': 'sums'}
= mk_msgs(pr)
msgs = await c(msgs, sp=sp, tools=tools, tool_choice=choice)
r r
ToolUseBlock(id=‘toolu_01RSf4W7bXgG3y63muRCNdph’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
- id:
msg_014wPskaa9NVZDc6Sny2E4WE
- content:
[{'id': 'toolu_01RSf4W7bXgG3y63muRCNdph', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 439, 'output_tokens': 57, 'server_tool_use': None, 'service_tier': 'standard'}
mk_funcres_async
mk_funcres_async (fc, ns)
Given tool use block fc
, get tool result, and create a tool_result response.
mk_toolres_async
mk_toolres_async (r:collections.abc.Mapping, ns:Optional[collections.abc.Mapping]=None)
Create a tool_result
message from response r
.
Type | Default | Details | |
---|---|---|---|
r | Mapping | Tool use request response from Claude | |
ns | Optional | None | Namespace to search for tools |
= await mk_toolres_async(r, ns=globals())
tr tr
Finding the sum of 604542 and 6458932
[{'role': 'assistant',
'content': [{'id': 'toolu_01RSf4W7bXgG3y63muRCNdph',
'input': {'a': 604542, 'b': 6458932},
'name': 'sums',
'type': 'tool_use'}]},
{'role': 'user',
'content': [{'type': 'tool_result',
'tool_use_id': 'toolu_01RSf4W7bXgG3y63muRCNdph',
'content': '7063474'}]}]
+= tr
msgs = contents(await c(msgs, sp=sp, tools=sums))
r r
'604542 + 6458932 = 7,063,474'
Structured Output
AsyncClient.structured
AsyncClient.structured (msgs:list, tools:Optional[list]=None, ns:Optional[collections.abc.Mapping]=None, sp='', temp=0, maxtok=4096, maxthinktok=0, prefill='', stream:bool=False, stop=None, tool_choice:Optional[dict]=None, cb=None, metadata:MetadataParam|NotGiven=NOT_GIVEN, servic e_tier:"Literal['auto','standard_only']|NotGiven" =NOT_GIVEN, stop_sequences:List[str]|NotGiven=NOT_GIVEN, syst em:Union[str,Iterable[TextBlockParam]]|NotGiven=N OT_GIVEN, temperature:float|NotGiven=NOT_GIVEN, thinking:ThinkingConfigParam|NotGiven=NOT_GIVEN, top_k:int|NotGiven=NOT_GIVEN, top_p:float|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Ti meout|None|NotGiven=NOT_GIVEN)
Return the value of all tool calls (generally used for structured outputs)
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
tools | Optional | None | List of tools to make available to Claude |
ns | Optional | None | Namespace to search for tools |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tool_choice | Optional | None | Optionally force use of some tool |
cb | NoneType | None | Callback to pass result to when complete |
metadata | MetadataParam | NotGiven | NOT_GIVEN | |
service_tier | Literal[‘auto’, ‘standard_only’] | NotGiven | NOT_GIVEN | |
stop_sequences | List[str] | NotGiven | NOT_GIVEN | |
system | Union[str, Iterable[TextBlockParam]] | NotGiven | NOT_GIVEN | |
temperature | float | NotGiven | NOT_GIVEN | |
thinking | ThinkingConfigParam | NotGiven | NOT_GIVEN | |
top_k | int | NotGiven | NOT_GIVEN | |
top_p | float | NotGiven | NOT_GIVEN | |
extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
extra_query | Query | None | None | |
extra_body | Body | None | None | |
timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
await c.structured(pr, sums)
Finding the sum of 604542 and 6458932
[7063474]
c
ToolUseBlock(id=‘toolu_014Wyt1cQPKDG2x1HuNEm6dq’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
Metric | Count | Cost (USD) |
---|---|---|
Input tokens | 1,451 | 0.004353 |
Output tokens | 245 | 0.003675 |
Cache tokens | 0 | 0.000000 |
Server tool use | 0 | 0.000000 |
Total | 1,696 | $0.008028 |
AsyncChat
AsyncChat
AsyncChat (model:Optional[str]=None, cli:Optional[claudette.core.Client]=None, sp='', tools:Optional[list]=None, temp=0, cont_pr:Optional[str]=None, cache:bool=False, hist:list=None, ns:Optional[collections.abc.Mapping]=None)
Anthropic async chat client.
Type | Default | Details | |
---|---|---|---|
model | Optional | None | Model to use (leave empty if passing cli ) |
cli | Optional | None | Client to use (leave empty if passing model ) |
sp | str | ||
tools | Optional | None | |
temp | int | 0 | |
cont_pr | Optional | None | |
cache | bool | False | |
hist | list | None | |
ns | Optional | None |
Exported source
@delegates()
class AsyncChat(Chat):
def __init__(self,
str]=None, # Model to use (leave empty if passing `cli`)
model:Optional[=None, # Client to use (leave empty if passing `model`)
cli:Optional[Client]**kwargs):
"Anthropic async chat client."
super().__init__(model, cli, **kwargs)
if not cli: self.c = AsyncClient(model)
= "Always use tools if available, and calculations are requested."
sp = AsyncChat(model, sp=sp)
chat chat.c.use, chat.h
(In: 0; Out: 0; Cache create: 0; Cache read: 0; Total Tokens: 0; Search: 0, [])
AsyncChat.__call__
AsyncChat.__call__ (pr=None, temp=None, maxtok=4096, maxthinktok=0, stream=False, prefill='', tool_choice:Union[str,bool,dict,NoneType]=None, **kw)
Call self as a function.
Type | Default | Details | |
---|---|---|---|
pr | NoneType | None | Prompt / message |
temp | NoneType | None | Temperature |
maxtok | int | 4096 | Maximum tokens |
maxthinktok | int | 0 | Maximum thinking tokens |
stream | bool | False | Stream response? |
prefill | str | Optional prefill to pass to Claude as start of its response | |
tool_choice | Union | None | Optionally force use of some tool |
kw | VAR_KEYWORD |
Exported source
@patch
async def _append_pr(self:AsyncChat, pr=None):
= nested_idx(self.h, -1, 'role') if self.h else 'assistant' # First message should be 'user' if no history
prev_role if pr and prev_role == 'user': await self()
self._post_pr(pr, prev_role)
Exported source
@patch
async def __call__(self:AsyncChat,
=None, # Prompt / message
pr=None, # Temperature
temp=4096, # Maximum tokens
maxtok=0, # Maximum thinking tokens
maxthinktok=False, # Stream response?
stream='', # Optional prefill to pass to Claude as start of its response
prefillstr,bool,dict]]=None, # Optionally force use of some tool
tool_choice:Optional[Union[**kw):
if temp is None: temp=self.temp
await self._append_pr(pr)
async def _cb(v):
self.last = await mk_toolres_async(v, ns=self.ns)
self.h += self.last
return await self.c(self.h, stream=stream, prefill=prefill, sp=self.sp, temp=temp, maxtok=maxtok, maxthinktok=maxthinktok, tools=self.tools, tool_choice=tool_choice, cb=_cb, **kw)
await chat("I'm Jeremy")
await chat("What's my name?")
Your name is Jeremy! You introduced yourself to me at the start of our conversation.
- id:
msg_01Pyu3uCBzVdC4kErLbq2peH
- content:
[{'citations': None, 'text': 'Your name is Jeremy! You introduced yourself to me at the start of our conversation.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 55, 'output_tokens': 20, 'server_tool_use': None, 'service_tier': 'standard'}
= "Very concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await chat(q, prefill=pref)
According to Douglas Adams,42. Philosophically, to find purpose, connect with others, and create meaning through your choices and experiences.
- id:
msg_01YQvh7nxU4isTjxnGPhi77s
- content:
[{'citations': None, 'text': 'According to Douglas Adams,42. Philosophically, to find purpose, connect with others, and create meaning through your choices and experiences.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 95, 'output_tokens': 26, 'server_tool_use': None, 'service_tier': 'standard'}
= AsyncChat(model, sp=sp)
chat = await chat("I'm Jeremy", stream=True)
r async for o in r: print(o, end='')
r.value
Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?
Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?
- id:
msg_019nc6kTgNmCB5CrPksMPoqJ
- content:
[{'citations': None, 'text': 'Hi Jeremy! Nice to meet you. How are you doing today? Is there anything I can help you with?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 21, 'output_tokens': 26, 'server_tool_use': None, 'service_tier': 'standard'}
= f"What is {a}+{b}?"
pr = AsyncChat(model, sp=sp, tools=[sums])
chat = await chat(pr)
r r
Finding the sum of 604542 and 6458932
I’ll calculate 604542 + 6458932 for you using the available sum function.
- id:
msg_01P4vF1nhMCocPbnMLYJQrYC
- content:
[{'citations': None, 'text': "I'll calculate 604542 + 6458932 for you using the available sum function.", 'type': 'text'}, {'id': 'toolu_017kjtY3mgsbQ32Khv45PGZG', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 441, 'output_tokens': 94, 'server_tool_use': None, 'service_tier': 'standard'}
await chat()
The sum of 604542 + 6458932 = 7,063,474.
- id:
msg_01U3weEUEBMngittChXXvEW9
- content:
[{'citations': None, 'text': 'The sum of 604542 + 6458932 = 7,063,474.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 550, 'output_tokens': 24, 'server_tool_use': None, 'service_tier': 'standard'}
= Path('samples/puppy.jpg')
fn = fn.read_bytes()
img Image(img)
= "In brief, what color flowers are in this image?"
q = mk_msg([img, q])
msg await c([msg])
The flowers in this image are purple.
- id:
msg_01TNWd92J6RYs1QCccodCKH9
- content:
[{'citations': None, 'text': 'The flowers in this image are purple.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 110, 'output_tokens': 11, 'server_tool_use': None, 'service_tier': 'standard'}
Add cache=True
to automatically add to Claude’s KV cache.
= AsyncChat(model, sp=sp, cache=True)
chat await chat("Lorem ipsum dolor sit amet" * 150)
I see you’ve shared a very long string of repeated “Lorem ipsum dolor sit amet” text. Lorem ipsum is commonly used as placeholder text in the printing and typesetting industry.
If you’d like me to help you with something specific regarding this text, I’d be happy to assist! For example, I could:
- Count how many times the phrase repeats
- Help you format it differently
- Replace it with actual content
- Use it for a specific purpose you have in mind
Is there something particular you’d like me to do with this Lorem ipsum text, or did you have a question or task in mind?
- id:
msg_01PqiRkprKgWUzucSUUgkeSp
- content:
[{'citations': None, 'text': 'I see you\'ve shared a very long string of repeated "Lorem ipsum dolor sit amet" text. Lorem ipsum is commonly used as placeholder text in the printing and typesetting industry.\n\nIf you\'d like me to help you with something specific regarding this text, I\'d be happy to assist! For example, I could:\n\n- Count how many times the phrase repeats\n- Help you format it differently\n- Replace it with actual content\n- Use it for a specific purpose you have in mind\n\nIs there something particular you\'d like me to do with this Lorem ipsum text, or did you have a question or task in mind?', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 1062, 'cache_read_input_tokens': 0, 'input_tokens': 4, 'output_tokens': 133, 'server_tool_use': None, 'service_tier': 'standard'}
chat.use
In: 4; Out: 133; Cache create: 1062; Cache read: 0; Total Tokens: 1199; Search: 0
In this followup call, nearly all the tokens are cached, so the only the new additional tokens are charged at the full rate.
await chat("Whoops, sorry about that!")
No worries at all! That happens to the best of us - copy/paste mishaps are pretty common.
Is there something I can actually help you with today? I’m here and ready to assist with whatever you need!
- id:
msg_01SZAUn8kxJPmCnVcqDmyfTm
- content:
[{'citations': None, 'text': "No worries at all! That happens to the best of us - copy/paste mishaps are pretty common. \n\nIs there something I can actually help you with today? I'm here and ready to assist with whatever you need!", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 144, 'cache_read_input_tokens': 1062, 'input_tokens': 4, 'output_tokens': 51, 'server_tool_use': None, 'service_tier': 'standard'}
chat.use
In: 8; Out: 184; Cache create: 1206; Cache read: 1062; Total Tokens: 2460; Search: 0
Extended Thinking
Let’s call the model without extended thinking enabled.
= AsyncChat(model)
chat await chat("Write a sentence about Python!")
Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development and data science to artificial intelligence and automation.
- id:
msg_01KFz2yMQdpnWmgcoinnjVNS
- content:
[{'citations': None, 'text': 'Python is a versatile, high-level programming language known for its clean syntax and readability, making it popular for everything from web development and data science to artificial intelligence and automation.', 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 13, 'output_tokens': 40, 'server_tool_use': None, 'service_tier': 'standard'}
Now, let’s call the model with extended thinking enabled.
= await chat("Write a sentence about Python!", maxthinktok=1024)
r r
Python’s extensive ecosystem of libraries and frameworks, combined with its beginner-friendly nature, has made it one of the most widely-used programming languages in both academic research and industry applications.
Thinking
The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have just repeated the request. I should provide a new sentence about Python that covers different aspects than my previous response.- id:
msg_01PxDWsEjoCqjRw7GS7AkTNe
- content:
[{'signature': 'EqsDCkYIBhgCKkDtJkWtKAsmWFYYcawASegfy35pgXLe4sanJLF2tla/a2QG1haUvfYoesbzCeyeZ96ck0P+C+L3K9AR2Gp4WLO8EgwZ5CtwhP63ipzAEu4aDN4dhtKzKvlHhgJmmCIwGkc3T9SB143oThdCiYgM2QSrnNrEAnHBHWvoQjG/AsrBkcNuxZMFdEjZM4yIcjcAKpIChw9+pZuH7NWzqjN572BbfJ9cqrOnZ/H++lKIS6D5zgSj/xUY1RnqPOwKGNKuLlNFAPBFVr+oS/MIvSNwL378do2DVzT7NWSPWfMxZuWkoJq0yjkD9Jcwt2YS2tn11yo5a2akDNBf8X1PcykbGZ2mWwWmEEqvlPh2WUG4lEHw69BO7dwlfr8SOzcFhK4VDG9skmYMUBQ2+CzI6P9AKGx7MaHJ4toGOtTHMSE2ZiOnutnlR/tqsdw4phEJtwM7ajetraeF2eUY16O/4znytiPbJ7bIPqawGWZbF+N3pn4Ubx2767/LRYLJu3DMF4BBelxOQzAQLKbSUPqGxLvgthtvA9Rn0sya4JYYFrwKSD2bjkrw2BgB', 'thinking': 'The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have just repeated the request. I should provide a new sentence about Python that covers different aspects than my previous response.', 'type': 'thinking'}, {'citations': None, 'text': "Python's extensive ecosystem of libraries and frameworks, combined with its beginner-friendly nature, has made it one of the most widely-used programming languages in both academic research and industry applications.", 'type': 'text'}]
- model:
claude-sonnet-4-20250514
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 90, 'output_tokens': 98, 'server_tool_use': None, 'service_tier': 'standard'}
r.content
[ThinkingBlock(signature='EqsDCkYIBhgCKkDtJkWtKAsmWFYYcawASegfy35pgXLe4sanJLF2tla/a2QG1haUvfYoesbzCeyeZ96ck0P+C+L3K9AR2Gp4WLO8EgwZ5CtwhP63ipzAEu4aDN4dhtKzKvlHhgJmmCIwGkc3T9SB143oThdCiYgM2QSrnNrEAnHBHWvoQjG/AsrBkcNuxZMFdEjZM4yIcjcAKpIChw9+pZuH7NWzqjN572BbfJ9cqrOnZ/H++lKIS6D5zgSj/xUY1RnqPOwKGNKuLlNFAPBFVr+oS/MIvSNwL378do2DVzT7NWSPWfMxZuWkoJq0yjkD9Jcwt2YS2tn11yo5a2akDNBf8X1PcykbGZ2mWwWmEEqvlPh2WUG4lEHw69BO7dwlfr8SOzcFhK4VDG9skmYMUBQ2+CzI6P9AKGx7MaHJ4toGOtTHMSE2ZiOnutnlR/tqsdw4phEJtwM7ajetraeF2eUY16O/4znytiPbJ7bIPqawGWZbF+N3pn4Ubx2767/LRYLJu3DMF4BBelxOQzAQLKbSUPqGxLvgthtvA9Rn0sya4JYYFrwKSD2bjkrw2BgB', thinking='The human is asking me to write a sentence about Python again. They might want a different sentence this time, or they might have just repeated the request. I should provide a new sentence about Python that covers different aspects than my previous response.', type='thinking'),
TextBlock(citations=None, text="Python's extensive ecosystem of libraries and frameworks, combined with its beginner-friendly nature, has made it one of the most widely-used programming languages in both academic research and industry applications.", type='text')]