Skip to content

Commit

Permalink
Merge pull request #116 from microsoft/python
Browse files Browse the repository at this point in the history
async implementation - closes #97
  • Loading branch information
sethjuarez authored Oct 26, 2024
2 parents 341ac54 + c9ac5bf commit 83a2eae
Show file tree
Hide file tree
Showing 111 changed files with 11,472 additions and 1,650 deletions.
711 changes: 223 additions & 488 deletions runtime/prompty/pdm.lock

Large diffs are not rendered by default.

432 changes: 309 additions & 123 deletions runtime/prompty/prompty/__init__.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion runtime/prompty/prompty/azure/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# __init__.py
from prompty.core import InvokerException
from prompty.invoker import InvokerException

try:
from .executor import AzureOpenAIExecutor
Expand Down
92 changes: 89 additions & 3 deletions runtime/prompty/prompty/azure/executor.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import azure.identity
import importlib.metadata
from typing import Iterator
from openai import AzureOpenAI
from typing import AsyncIterator, Iterator
from openai import AzureOpenAI, AsyncAzureOpenAI

from prompty.tracer import Tracer
from ..core import Invoker, InvokerFactory, Prompty, PromptyStream
from ..core import AsyncPromptyStream, Prompty, PromptyStream
from ..invoker import Invoker, InvokerFactory

VERSION = importlib.metadata.version("prompty")

Expand Down Expand Up @@ -130,3 +131,88 @@ def invoke(self, data: any) -> any:
return PromptyStream("AzureOpenAIExecutor", response)
else:
return response

async def invoke_async(self, data: str) -> str:
"""Invoke the Prompty Chat Parser (Async)
Parameters
----------
data : str
The data to parse
Returns
-------
str
The parsed data
"""
with Tracer.start("AzureOpenAIAsync") as trace:
trace("type", "LLM")
trace("signature", "AzureOpenAIAsync.ctor")
trace("description", "Async Azure OpenAI Constructor")
trace("inputs", self.kwargs)
client = AsyncAzureOpenAI(
default_headers={
"User-Agent": f"prompty/{VERSION}",
"x-ms-useragent": f"prompty/{VERSION}",
},
**self.kwargs,
)
trace("result", client)

with Tracer.start("create") as trace:
trace("type", "LLM")
trace("description", "Azure OpenAI Client")

if self.api == "chat":
trace("signature", "AzureOpenAIAsync.chat.completions.create")
args = {
"model": self.deployment,
"messages": data if isinstance(data, list) else [data],
**self.parameters,
}
trace("inputs", args)
response = await client.chat.completions.create(**args)
trace("result", response)

elif self.api == "completion":
trace("signature", "AzureOpenAIAsync.completions.create")
args = {
"prompt": data,
"model": self.deployment,
**self.parameters,
}
trace("inputs", args)
response = await client.completions.create(**args)
trace("result", response)

elif self.api == "embedding":
trace("signature", "AzureOpenAIAsync.embeddings.create")
args = {
"input": data if isinstance(data, list) else [data],
"model": self.deployment,
**self.parameters,
}
trace("inputs", args)
response = await client.embeddings.create(**args)
trace("result", response)

elif self.api == "image":
trace("signature", "AzureOpenAIAsync.images.generate")
args = {
"prompt": data,
"model": self.deployment,
**self.parameters,
}
trace("inputs", args)
response = await client.images.generate.create(**args)
trace("result", response)

# stream response
if isinstance(response, AsyncIterator):
if self.api == "chat":
# TODO: handle the case where there might be no usage in the stream
return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
else:
return AsyncPromptyStream("AzureOpenAIExecutorAsync", response)
else:
return response
68 changes: 66 additions & 2 deletions runtime/prompty/prompty/azure/processor.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from typing import Iterator
from typing import AsyncIterator, Iterator
from openai.types.completion import Completion
from openai.types.images_response import ImagesResponse
from openai.types.chat.chat_completion import ChatCompletion
from ..core import Invoker, InvokerFactory, Prompty, PromptyStream, ToolCall
from ..core import AsyncPromptyStream, Prompty, PromptyStream, ToolCall
from ..invoker import Invoker, InvokerFactory
from openai.types.create_embedding_response import CreateEmbeddingResponse


Expand Down Expand Up @@ -76,3 +77,66 @@ def generator():
return PromptyStream("AzureOpenAIProcessor", generator())
else:
return data

async def invoke_async(self, data: str) -> str:
"""Invoke the Prompty Chat Parser (Async)
Parameters
----------
data : str
The data to parse
Returns
-------
str
The parsed data
"""
if isinstance(data, ChatCompletion):
response = data.choices[0].message
# tool calls available in response
if response.tool_calls:
return [
ToolCall(
id=tool_call.id,
name=tool_call.function.name,
arguments=tool_call.function.arguments,
)
for tool_call in response.tool_calls
]
else:
return response.content

elif isinstance(data, Completion):
return data.choices[0].text
elif isinstance(data, CreateEmbeddingResponse):
if len(data.data) == 0:
raise ValueError("Invalid data")
elif len(data.data) == 1:
return data.data[0].embedding
else:
return [item.embedding for item in data.data]
elif isinstance(data, ImagesResponse):
self.prompty.model.parameters
item: ImagesResponse = data

if len(data.data) == 0:
raise ValueError("Invalid data")
elif len(data.data) == 1:
return data.data[0].url if item.data[0].url else item.data[0].b64_json
else:
return [item.url if item.url else item.b64_json for item in data.data]

elif isinstance(data, AsyncIterator):

async def generator():
async for chunk in data:
if (
len(chunk.choices) == 1
and chunk.choices[0].delta.content != None
):
content = chunk.choices[0].delta.content
yield content

return AsyncPromptyStream("AsyncAzureOpenAIProcessor", generator())
else:
return data
Loading

0 comments on commit 83a2eae

Please sign in to comment.