= models[1]
model = AsyncAnthropic() cli
The async version
Setup
Async SDK
= "I'm Jeremy"
prompt = mk_msg(prompt)
m = await cli.messages.create(messages=[m], model=model, max_tokens=100)
r r
Hi Jeremy! Nice to meet you. I’m Claude. How can I help you today?
- id:
msg_01B9MPdH8yjfF3sCSUpqjdsa
- content:
[{'text': "Hi Jeremy! Nice to meet you. I'm Claude. How can I help you today?", 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 10, 'output_tokens': 22}
= mk_msgs([prompt, r, "I forgot my name. Can you remind me please?"])
msgs msgs
[{'role': 'user', 'content': "I'm Jeremy"},
{'role': 'assistant',
'content': [TextBlock(text="Hi Jeremy! Nice to meet you. I'm Claude. How can I help you today?", type='text')]},
{'role': 'user', 'content': 'I forgot my name. Can you remind me please?'}]
await cli.messages.create(messages=msgs, model=model, max_tokens=200)
You just told me your name is Jeremy.
- id:
msg_01RT1QZUtAGEG7p6oybG4gWD
- content:
[{'text': 'You just told me your name is Jeremy.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 46, 'output_tokens': 12}
AsyncClient
AsyncClient (model, cli=None, log=False, cache=False)
Async Anthropic messages client.
Exported source
class AsyncClient(Client):
def __init__(self, model, cli=None, log=False, cache=False):
"Async Anthropic messages client."
super().__init__(model,cli,log,cache)
if not cli: self.c = AsyncAnthropic(default_headers={'anthropic-beta': 'prompt-caching-2024-07-31'})
= AsyncClient(model) c
c._r(r) c.use
In: 10; Out: 22; Cache create: 0; Cache read: 0; Total: 32
AsyncClient.__call__
AsyncClient.__call__ (msgs:list, sp='', temp=0, maxtok=4096, prefill='', stream:bool=False, stop=None, tools:Optional[list]=None, tool_choice:Optional[dict]=None, cli=None, log=False, cache=False)
Make an async call to Claude.
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tools | Optional | None | List of tools to make available to Claude |
tool_choice | Optional | None | Optionally force use of some tool |
cli | NoneType | None | |
log | bool | False | |
cache | bool | False |
Exported source
@patch
async def _stream(self:AsyncClient, msgs:list, prefill='', **kwargs):
async with self.c.messages.stream(model=self.model, messages=mk_msgs(msgs, cache=self.cache), **kwargs) as s:
if prefill: yield prefill
async for o in s.text_stream: yield o
self._log(await s.get_final_message(), prefill, msgs, kwargs)
Exported source
@patch
@delegates(Client)
async def __call__(self:AsyncClient,
list, # List of messages in the dialog
msgs:='', # The system prompt
sp=0, # Temperature
temp=4096, # Maximum tokens
maxtok='', # Optional prefill to pass to Claude as start of its response
prefillbool=False, # Stream response?
stream:=None, # Stop sequence
stoplist]=None, # List of tools to make available to Claude
tools:Optional[dict]=None, # Optionally force use of some tool
tool_choice:Optional[**kwargs):
"Make an async call to Claude."
if tools: kwargs['tools'] = [get_schema(o) for o in listify(tools)]
if tool_choice: kwargs['tool_choice'] = mk_tool_choice(tool_choice)
= self._precall(msgs, prefill, stop, kwargs)
msgs if any(t == 'image' for t in get_types(msgs)): assert not self.text_only, f"Images are not supported by the current model type: {self.model}"
if stream: return self._stream(msgs, prefill=prefill, max_tokens=maxtok, system=sp, temperature=temp, **kwargs)
= await self.c.messages.create(
res =self.model, messages=msgs, max_tokens=maxtok, system=sp, temperature=temp, **kwargs)
modelreturn self._log(res, prefill, msgs, maxtok, sp, temp, stream=stream, stop=stop, **kwargs)
= AsyncClient(model, log=True)
c c.use
In: 0; Out: 0; Cache create: 0; Cache read: 0; Total: 0
= models[1]
c.model await c('Hi')
Hello! How can I help you today?
- id:
msg_01QJQMP2KX5zChTbjuS2rrLA
- content:
[{'citations': None, 'text': 'Hello! How can I help you today?', 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 8, 'output_tokens': 12}
c.use
In: 8; Out: 12; Cache create: 0; Cache read: 0; Total: 20
= "Concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await c(q, prefill=pref)
According to Douglas Adams, it’s 42. More seriously, there’s no universal answer - it’s deeply personal. Common perspectives include: finding happiness, making meaningful connections, pursuing purpose through work/creativity, helping others, or simply experiencing and appreciating existence.
- id:
msg_01DETfjeGKHJHTCBcinSyvYg
- content:
[{'text': "According to Douglas Adams, it's 42. More seriously, there's no universal answer - it's deeply personal. Common perspectives include: finding happiness, making meaningful connections, pursuing purpose through work/creativity, helping others, or simply experiencing and appreciating existence.", 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 24, 'output_tokens': 52}
c.use
In: 32; Out: 64; Cache create: 0; Cache read: 0; Total: 96
async for o in await c(q, prefill=pref, stream=True): print(o, end='')
According to Douglas Adams, it's 42. More seriously, there's no universal answer - it's deeply personal. Common perspectives include: finding happiness, creating meaning through relationships and achievements, pursuing knowledge, helping others, or following spiritual/religious beliefs. You get to decide what gives your life meaning.
c.use
In: 56; Out: 124; Cache create: 0; Cache read: 0; Total: 180
def sums(
int, # First thing to sum
a:int=1 # Second thing to sum
b:-> int: # The sum of the inputs
) "Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + b
= 604542,6458932
a,b = f"What is {a}+{b}?"
pr = "You are a summing expert." sp
=[sums]
tools= mk_tool_choice('sums')
choice choice
{'type': 'tool', 'name': 'sums'}
= mk_msgs(pr)
msgs = await c(msgs, sp=sp, tools=tools, tool_choice=choice)
r r
ToolUseBlock(id=‘toolu_0138PU6MrjrcgWuuceEwCKDK’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
- id:
msg_01HE98wyEqPtHGq7H3ACGhrV
- content:
[{'id': 'toolu_0138PU6MrjrcgWuuceEwCKDK', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 438, 'output_tokens': 57}
= mk_toolres(r, ns=globals())
tr tr
Finding the sum of 604542 and 6458932
[{'role': 'assistant',
'content': [ToolUseBlock(id='toolu_0138PU6MrjrcgWuuceEwCKDK', input={'a': 604542, 'b': 6458932}, name='sums', type='tool_use')]},
{'role': 'user',
'content': [{'type': 'tool_result',
'tool_use_id': 'toolu_0138PU6MrjrcgWuuceEwCKDK',
'content': '7063474'}]}]
+= tr
msgs = contents(await c(msgs, sp=sp, tools=sums))
r r
'The sum of 604542 and 6458932 is 7063474.'
Structured Output
AsyncClient.structured
AsyncClient.structured (msgs:list, tools:Optional[list]=None, obj:Optional=None, ns:Optional[collections.abc.Mapping]=None, sp='', temp=0, maxtok=4096, prefill='', stream:bool=False, stop=None, tool_choice:Optional[dict]=None, metadata:MetadataParam|NotGiven=NOT_GIVEN, stop_sequences:List[str]|NotGiven=NOT_GIVEN, syst em:Union[str,Iterable[TextBlockParam]]|NotGiven=N OT_GIVEN, temperature:float|NotGiven=NOT_GIVEN, top_k:int|NotGiven=NOT_GIVEN, top_p:float|NotGiven=NOT_GIVEN, extra_headers:Headers|None=None, extra_query:Query|None=None, extra_body:Body|None=None, timeout:float|httpx.Ti meout|None|NotGiven=NOT_GIVEN)
Return the value of all tool calls (generally used for structured outputs)
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
tools | Optional | None | List of tools to make available to Claude |
obj | Optional | None | Class to search for tools |
ns | Optional | None | Namespace to search for tools |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
tool_choice | Optional | None | Optionally force use of some tool |
metadata | MetadataParam | NotGiven | NOT_GIVEN | |
stop_sequences | List[str] | NotGiven | NOT_GIVEN | |
system | Union[str, Iterable[TextBlockParam]] | NotGiven | NOT_GIVEN | |
temperature | float | NotGiven | NOT_GIVEN | |
top_k | int | NotGiven | NOT_GIVEN | |
top_p | float | NotGiven | NOT_GIVEN | |
extra_headers | Optional | None | Use the following arguments if you need to pass additional parameters to the API that aren’t available via kwargs. The extra values given here take precedence over values defined on the client or passed to this method. |
extra_query | Query | None | None | |
extra_body | Body | None | None | |
timeout | float | httpx.Timeout | None | NotGiven | NOT_GIVEN |
await c.structured(pr, sums)
Finding the sum of 604542 and 6458932
[7063474]
c
ToolUseBlock(id=‘toolu_01MGLN3RSg7ZEVghQ8vBbFvK’, input={‘a’: 604542, ‘b’: 6458932}, name=‘sums’, type=‘tool_use’)
Metric | Count | Cost (USD) |
---|---|---|
Input tokens | 1,448 | 0.004344 |
Output tokens | 261 | 0.003915 |
Cache tokens | 0 | 0.000000 |
Total | 1,709 | $0.008259 |
AsyncChat
AsyncChat
AsyncChat (model:Optional[str]=None, cli:Optional[claudette.core.Client]=None, sp='', tools:Optional[list]=None, temp=0, cont_pr:Optional[str]=None, cache:bool=False)
Anthropic async chat client.
Type | Default | Details | |
---|---|---|---|
model | Optional | None | Model to use (leave empty if passing cli ) |
cli | Optional | None | Client to use (leave empty if passing model ) |
sp | str | ||
tools | Optional | None | |
temp | int | 0 | |
cont_pr | Optional | None | |
cache | bool | False |
Exported source
@delegates()
class AsyncChat(Chat):
def __init__(self,
str]=None, # Model to use (leave empty if passing `cli`)
model:Optional[=None, # Client to use (leave empty if passing `model`)
cli:Optional[Client]**kwargs):
"Anthropic async chat client."
super().__init__(model, cli, **kwargs)
if not cli: self.c = AsyncClient(model)
= "Never mention what tools you use."
sp = AsyncChat(model, sp=sp)
chat chat.c.use, chat.h
(In: 0; Out: 0; Cache create: 0; Cache read: 0; Total: 0, [])
AsyncChat.__call__
AsyncChat.__call__ (pr=None, temp=None, maxtok=4096, stream=False, prefill='', tool_choice:Union[str,bool,dict,NoneType]=None, **kw)
Call self as a function.
Type | Default | Details | |
---|---|---|---|
pr | NoneType | None | Prompt / message |
temp | NoneType | None | Temperature |
maxtok | int | 4096 | Maximum tokens |
stream | bool | False | Stream response? |
prefill | str | Optional prefill to pass to Claude as start of its response | |
tool_choice | Union | None | Optionally force use of some tool |
kw | VAR_KEYWORD |
Exported source
@patch
async def _stream(self:AsyncChat, res):
async for o in res: yield o
self.h += mk_toolres(self.c.result, ns=self.tools, obj=self)
Exported source
@patch
async def _append_pr(self:AsyncChat, pr=None):
= nested_idx(self.h, -1, 'role') if self.h else 'assistant' # First message should be 'user' if no history
prev_role if pr and prev_role == 'user': await self()
self._post_pr(pr, prev_role)
Exported source
@patch
async def __call__(self:AsyncChat,
=None, # Prompt / message
pr=None, # Temperature
temp=4096, # Maximum tokens
maxtok=False, # Stream response?
stream='', # Optional prefill to pass to Claude as start of its response
prefillstr,bool,dict]]=None, # Optionally force use of some tool
tool_choice:Optional[Union[**kw):
if temp is None: temp=self.temp
await self._append_pr(pr)
= await self.c(self.h, stream=stream, prefill=prefill, sp=self.sp, temp=temp, maxtok=maxtok,
res =self.tools, tool_choice=tool_choice,**kw)
toolsif stream: return self._stream(res)
self.h += mk_toolres(self.c.result, ns=mk_ns(*listify(self.tools)))
return res
await chat("I'm Jeremy")
await chat("What's my name?")
Your name is Jeremy.
- id:
msg_01BXAR1LsLKWC48tGqhsCwvK
- content:
[{'text': 'Your name is Jeremy.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 41, 'output_tokens': 8}
= "Concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await chat(q, prefill=pref)
According to Douglas Adams, 42. But in reality, it’s to find personal meaning through experiences, relationships, and pursuing what matters to you.
- id:
msg_019JMS841eTKjteomdZPcxUh
- content:
[{'text': "According to Douglas Adams, 42. But in reality, it's to find personal meaning through experiences, relationships, and pursuing what matters to you.", 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 69, 'output_tokens': 28}
= AsyncChat(model, sp=sp)
chat async for o in await chat("I'm Jeremy", stream=True): print(o, end='')
Hello Jeremy! Nice to meet you. How are you today?
= f"What is {a}+{b}?"
pr = AsyncChat(model, sp=sp, tools=[sums])
chat = await chat(pr)
r r
Finding the sum of 604542 and 6458932
Let me calculate that sum for you.
- id:
msg_014yUKprFaUmofUXFRCzDNEi
- content:
[{'text': 'Let me calculate that sum for you.', 'type': 'text'}, {'id': 'toolu_01CgQzpBcF9TgriNCfRxQyw4', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 437, 'output_tokens': 81}
+= " Say the answer in a sentence."
pr = AsyncChat(model, sp=sp, tools=[sums])
chat = await chat(pr)
r r
Finding the sum of 604542 and 6458932
Let me calculate that sum for you.
- id:
msg_01Fz1v4XAo6kVy4tN78C1ANT
- content:
[{'text': 'Let me calculate that sum for you.', 'type': 'text'}, {'id': 'toolu_01LzBzw8C9gVJBo4QwDqALhZ', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 444, 'output_tokens': 81}
= Path('samples/puppy.jpg')
fn = fn.read_bytes()
img display.Image(img)
= "In brief, what color flowers are in this image?"
q = mk_msg([img, q])
msg await c([msg])
In this adorable puppy photo, there are purple/lavender colored flowers (appears to be asters or similar daisy-like flowers) in the background.
- id:
msg_01AfDQFSfMvqPb95VoLcCJHY
- content:
[{'text': 'In this adorable puppy photo, there are purple/lavender colored flowers (appears to be asters or similar daisy-like flowers) in the background.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20241022
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0, 'input_tokens': 110, 'output_tokens': 37}
= AsyncChat(model, sp=sp, cache=True)
chat await chat("Lorem ipsum dolor sit amet" * 150)
chat.use
In: 4; Out: 117; Cache create: 0; Cache read: 1058; Total: 1179
await chat("Whoops, sorry about that!")
chat.use
In: 8; Out: 150; Cache create: 0; Cache read: 2244; Total: 2402