= models[1]
model = AsyncAnthropic() cli
The async version
Setup
Async SDK
= {'role': 'user', 'content': "I'm Jeremy"}
m = await cli.messages.create(messages=[m], model=model, max_tokens=100)
r r
Hello Jeremy! It’s nice to meet you. How can I assist you today? Is there anything specific you’d like to talk about or any questions you have?
- id:
msg_019gsEQs5dqb3kgwNJbTH27M
- content:
[{'text': "Hello Jeremy! It's nice to meet you. How can I assist you today? Is there anything specific you'd like to talk about or any questions you have?", 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 10, 'output_tokens': 36}
AsyncClient
AsyncClient (model, cli=None, log=False)
Async Anthropic messages client.
Exported source
class AsyncClient(Client):
def __init__(self, model, cli=None, log=False):
"Async Anthropic messages client."
super().__init__(model,cli,log)
if not cli: self.c = AsyncAnthropic(default_headers={'anthropic-beta': 'prompt-caching-2024-07-31'})
= AsyncClient(model) c
c._r(r) c.use
In: 10; Out: 36; Total: 46
AsyncClient.__call__
AsyncClient.__call__ (msgs:list, sp='', temp=0, maxtok=4096, prefill='', stream:bool=False, stop=None, cli=None, log=False)
Make an async call to Claude.
Type | Default | Details | |
---|---|---|---|
msgs | list | List of messages in the dialog | |
sp | str | The system prompt | |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
prefill | str | Optional prefill to pass to Claude as start of its response | |
stream | bool | False | Stream response? |
stop | NoneType | None | Stop sequence |
cli | NoneType | None | |
log | bool | False |
Exported source
@patch
async def _stream(self:AsyncClient, msgs:list, prefill='', **kwargs):
async with self.c.messages.stream(model=self.model, messages=mk_msgs(msgs), **kwargs) as s:
if prefill: yield prefill
async for o in s.text_stream: yield o
self._log(await s.get_final_message(), prefill, msgs, kwargs)
Exported source
@patch
@delegates(Client)
async def __call__(self:AsyncClient,
list, # List of messages in the dialog
msgs:='', # The system prompt
sp=0, # Temperature
temp=4096, # Maximum tokens
maxtok='', # Optional prefill to pass to Claude as start of its response
prefillbool=False, # Stream response?
stream:=None, # Stop sequence
stop**kwargs):
"Make an async call to Claude."
= self._precall(msgs, prefill, stop, kwargs)
msgs if stream: return self._stream(msgs, prefill=prefill, max_tokens=maxtok, system=sp, temperature=temp, **kwargs)
= await self.c.messages.create(
res =self.model, messages=msgs, max_tokens=maxtok, system=sp, temperature=temp, **kwargs)
modelreturn self._log(res, prefill, msgs, maxtok, sp, temp, stream=stream, stop=stop, **kwargs)
= AsyncClient(model, log=True)
c c.use
In: 0; Out: 0; Total: 0
= models[1]
c.model await c('Hi')
Hello! How can I assist you today? Feel free to ask any questions or let me know if you need help with anything.
- id:
msg_01L9vqP9r1LcmvSk8vWGLbPo
- content:
[{'text': 'Hello! How can I assist you today? Feel free to ask any questions or let me know if you need help with anything.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 8, 'output_tokens': 29, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
c.use
In: 8; Out: 29; Total: 37
= "Concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await c(q, prefill=pref)
According to Douglas Adams, the meaning of life is 42. More seriously, there’s no universally agreed upon meaning of life. Many philosophers and religions have proposed different answers, but it remains an open question that individuals must grapple with for themselves.
- id:
msg_01KAJbCneA2oCRPVm9EkyDXF
- content:
[{'text': "According to Douglas Adams, the meaning of life is 42. More seriously, there's no universally agreed upon meaning of life. Many philosophers and religions have proposed different answers, but it remains an open question that individuals must grapple with for themselves.", 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 24, 'output_tokens': 51, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
async for o in (await c('Hi', stream=True)): print(o, end='')
Hello! How can I assist you today? Feel free to ask any questions or let me know if you need help with anything.
c.use
In: 40; Out: 109; Total: 149
async for o in (await c(q, prefill=pref, stream=True)): print(o, end='')
According to Douglas Adams, the meaning of life is 42. More seriously, there's no universally agreed upon meaning of life. Many philosophers and religions have proposed different answers, but it remains an open question that individuals must grapple with for themselves.
c.use
In: 64; Out: 160; Total: 224
def sums(
int, # First thing to sum
a:int=1 # Second thing to sum
b:-> int: # The sum of the inputs
) "Adds a + b."
print(f"Finding the sum of {a} and {b}")
return a + b
= 604542,6458932
a,b = f"What is {a}+{b}?"
pr = "You are a summing expert." sp
=[get_schema(sums)]
tools= mk_tool_choice('sums') choice
= [get_schema(sums)]
tools = mk_msgs(pr)
msgs = await c(msgs, sp=sp, tools=tools, tool_choice=choice)
r = mk_toolres(r, ns=globals())
tr += tr
msgs await c(msgs, sp=sp, tools=tools)) contents(
Finding the sum of 604542 and 6458932
'As a summing expert, I\'m happy to help you with this addition. The sum of 604542 and 6458932 is 7063474.\n\nTo break it down:\n604542 (first number)\n+ 6458932 (second number)\n= 7063474 (total sum)\n\nThis result was calculated using the "sums" function, which adds two numbers together. Is there anything else you\'d like me to sum for you?'
AsyncChat
AsyncChat
AsyncChat (model:Optional[str]=None, cli:Optional[claudette.core.Client]=None, sp='', tools:Optional[list]=None, temp=0, cont_pr:Optional[str]=None)
Anthropic async chat client.
Type | Default | Details | |
---|---|---|---|
model | Optional | None | Model to use (leave empty if passing cli ) |
cli | Optional | None | Client to use (leave empty if passing model ) |
sp | str | ||
tools | Optional | None | |
temp | int | 0 | |
cont_pr | Optional | None |
Exported source
@delegates()
class AsyncChat(Chat):
def __init__(self,
str]=None, # Model to use (leave empty if passing `cli`)
model:Optional[=None, # Client to use (leave empty if passing `model`)
cli:Optional[Client]**kwargs):
"Anthropic async chat client."
super().__init__(model, cli, **kwargs)
if not cli: self.c = AsyncClient(model)
= "Never mention what tools you use."
sp = AsyncChat(model, sp=sp)
chat chat.c.use, chat.h
(In: 0; Out: 0; Total: 0, [])
AsyncChat.__call__
AsyncChat.__call__ (pr=None, temp=0, maxtok=4096, stream=False, prefill='', **kw)
Call self as a function.
Type | Default | Details | |
---|---|---|---|
pr | NoneType | None | Prompt / message |
temp | int | 0 | Temperature |
maxtok | int | 4096 | Maximum tokens |
stream | bool | False | Stream response? |
prefill | str | Optional prefill to pass to Claude as start of its response | |
kw |
Exported source
@patch
async def _stream(self:AsyncChat, res):
async for o in res: yield o
self.h += mk_toolres(self.c.result, ns=self.tools, obj=self)
Exported source
@patch
async def _append_pr(self:AsyncChat, pr=None):
= nested_idx(self.h, -1, 'role') if self.h else 'assistant' # First message should be 'user' if no history
prev_role if pr and prev_role == 'user': await self()
self._post_pr(pr, prev_role)
Exported source
@patch
async def __call__(self:AsyncChat,
=None, # Prompt / message
pr=0, # Temperature
temp=4096, # Maximum tokens
maxtok=False, # Stream response?
stream='', # Optional prefill to pass to Claude as start of its response
prefill**kw):
await self._append_pr(pr)
if self.tools: kw['tools'] = [get_schema(o) for o in self.tools]
= await self.c(self.h, stream=stream, prefill=prefill, sp=self.sp, temp=temp, maxtok=maxtok, **kw)
res if stream: return self._stream(res)
self.h += mk_toolres(self.c.result, ns=self.tools, obj=self)
return res
await chat("I'm Jeremy")
await chat("What's my name?")
Your name is Jeremy, as you mentioned in your previous message.
- id:
msg_01NMugMXWpDP9iuTXeLkHarn
- content:
[{'text': 'Your name is Jeremy, as you mentioned in your previous message.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 64, 'output_tokens': 16, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
= "Concisely, what is the meaning of life?"
q = 'According to Douglas Adams,'
pref await chat(q, prefill=pref)
According to Douglas Adams, the meaning of life is 42. More seriously, there’s no universally agreed upon answer. Common philosophical perspectives include:
- Finding personal fulfillment
- Serving others
- Pursuing happiness
- Creating meaning through our choices
- Experiencing and appreciating existence
Ultimately, many believe each individual must determine their own life’s meaning.
- id:
msg_01VPWUQn5Do1Kst8RYUDQvCu
- content:
[{'text': "According to Douglas Adams, the meaning of life is 42. More seriously, there's no universally agreed upon answer. Common philosophical perspectives include:\n\n1. Finding personal fulfillment\n2. Serving others\n3. Pursuing happiness\n4. Creating meaning through our choices\n5. Experiencing and appreciating existence\n\nUltimately, many believe each individual must determine their own life's meaning.", 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 100, 'output_tokens': 82, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
= AsyncChat(model, sp=sp)
chat async for o in (await chat("I'm Jeremy", stream=True)): print(o, end='')
Hello Jeremy! It's nice to meet you. How are you doing today? Is there anything in particular you'd like to chat about or any questions I can help you with?
= f"What is {a}+{b}?"
pr = AsyncChat(model, sp=sp, tools=[sums])
chat = await chat(pr)
r r
Finding the sum of 604542 and 6458932
To answer this question, I can use the “sums” function to add these two numbers together. Let me do that for you.
- id:
msg_015z1rffSWFxvj7rSpzc43ZE
- content:
[{'text': 'To answer this question, I can use the "sums" function to add these two numbers together. Let me do that for you.', 'type': 'text'}, {'id': 'toolu_01SNKhtfnDQBC4RGY4mUCq1v', 'input': {'a': 604542, 'b': 6458932}, 'name': 'sums', 'type': 'tool_use'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
tool_use
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 428, 'output_tokens': 101, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
await chat()
The sum of 604542 and 6458932 is 7063474.
- id:
msg_018KAsE2YGiXWjUJkLPrXpb2
- content:
[{'text': 'The sum of 604542 and 6458932 is 7063474.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 543, 'output_tokens': 23, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}
= Path('samples/puppy.jpg')
fn = fn.read_bytes() img
= "In brief, what color flowers are in this image?"
q = mk_msg([img_msg(img), text_msg(q)])
msg await c([msg])
The flowers in this image are purple. They appear to be small, daisy-like flowers, possibly asters or some type of purple daisy, blooming in the background behind the adorable puppy in the foreground.
- id:
msg_017qgZggLjUY915mWbWCkb9X
- content:
[{'text': 'The flowers in this image are purple. They appear to be small, daisy-like flowers, possibly asters or some type of purple daisy, blooming in the background behind the adorable puppy in the foreground.', 'type': 'text'}]
- model:
claude-3-5-sonnet-20240620
- role:
assistant
- stop_reason:
end_turn
- stop_sequence:
None
- type:
message
- usage:
{'input_tokens': 110, 'output_tokens': 50, 'cache_creation_input_tokens': 0, 'cache_read_input_tokens': 0}