Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

openai[minor]: release 0.3 #29100

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
303 changes: 298 additions & 5 deletions libs/partners/openai/langchain_openai/chat_models/azure.py

Large diffs are not rendered by default.

627 changes: 384 additions & 243 deletions libs/partners/openai/langchain_openai/chat_models/base.py

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions libs/partners/openai/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions libs/partners/openai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "langchain-openai"
version = "0.2.14"
version = "0.3.0"
description = "An integration package connecting OpenAI and LangChain"
authors = []
readme = "README.md"
Expand All @@ -23,7 +23,7 @@ ignore_missing_imports = true

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
langchain-core = "^0.3.27"
langchain-core = "^0.3.29"
openai = "^1.58.1"
tiktoken = ">=0.7,<1"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ def chat_model_params(self) -> dict:
"azure_endpoint": OPENAI_API_BASE,
}

@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}

@pytest.mark.xfail(reason="Not yet supported.")
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
Original file line number Diff line number Diff line change
Expand Up @@ -630,20 +630,38 @@ def test_bind_tools_tool_choice() -> None:
assert not msg.tool_calls


def test_openai_structured_output() -> None:
@pytest.mark.parametrize("model", ["gpt-4o-mini", "o1"])
def test_openai_structured_output(model: str) -> None:
class MyModel(BaseModel):
"""A Person"""

name: str
age: int

llm = ChatOpenAI().with_structured_output(MyModel)
llm = ChatOpenAI(model=model).with_structured_output(MyModel)
result = llm.invoke("I'm a 27 year old named Erick")
assert isinstance(result, MyModel)
assert result.name == "Erick"
assert result.age == 27


def test_structured_output_errors_with_legacy_models() -> None:
class MyModel(BaseModel):
"""A Person"""

name: str
age: int

llm = ChatOpenAI(model="gpt-4").with_structured_output(MyModel)
with pytest.raises(ValueError) as exception_info:
_ = llm.invoke("I'm a 27 year old named Erick")
assert "with_structured_output" in str(exception_info.value)

with pytest.raises(ValueError) as exception_info:
_ = list(llm.stream("I'm a 27 year old named Erick"))
assert "with_structured_output" in str(exception_info.value)


def test_openai_proxy() -> None:
"""Test ChatOpenAI with proxy."""
chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080")
Expand Down Expand Up @@ -820,20 +838,18 @@ class magic_function(BaseModel):


@pytest.mark.parametrize(
("model", "method", "strict"),
[("gpt-4o", "function_calling", True), ("gpt-4o-2024-08-06", "json_schema", None)],
("model", "method"),
[("gpt-4o", "function_calling"), ("gpt-4o-2024-08-06", "json_schema")],
)
def test_structured_output_strict(
model: str,
method: Literal["function_calling", "json_schema"],
strict: Optional[bool],
model: str, method: Literal["function_calling", "json_schema"]
) -> None:
"""Test to verify structured output with strict=True."""

from pydantic import BaseModel as BaseModelProper
from pydantic import Field as FieldProper

llm = ChatOpenAI(model=model, temperature=0)
llm = ChatOpenAI(model=model)

class Joke(BaseModelProper):
"""Joke to tell user."""
Expand All @@ -842,10 +858,7 @@ class Joke(BaseModelProper):
punchline: str = FieldProper(description="answer to resolve the joke")

# Pydantic class
# Type ignoring since the interface only officially supports pydantic 1
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
# We'll need to do a pass updating the type signatures.
chat = llm.with_structured_output(Joke, method=method, strict=strict)
chat = llm.with_structured_output(Joke, method=method, strict=True)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, Joke)

Expand All @@ -854,7 +867,7 @@ class Joke(BaseModelProper):

# Schema
chat = llm.with_structured_output(
Joke.model_json_schema(), method=method, strict=strict
Joke.model_json_schema(), method=method, strict=True
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
Expand All @@ -875,26 +888,24 @@ class InvalidJoke(BaseModelProper):
default="foo", description="answer to resolve the joke"
)

chat = llm.with_structured_output(InvalidJoke, method=method, strict=strict)
chat = llm.with_structured_output(InvalidJoke, method=method, strict=True)
with pytest.raises(openai.BadRequestError):
chat.invoke("Tell me a joke about cats.")
with pytest.raises(openai.BadRequestError):
next(chat.stream("Tell me a joke about cats."))

chat = llm.with_structured_output(
InvalidJoke.model_json_schema(), method=method, strict=strict
InvalidJoke.model_json_schema(), method=method, strict=True
)
with pytest.raises(openai.BadRequestError):
chat.invoke("Tell me a joke about cats.")
with pytest.raises(openai.BadRequestError):
next(chat.stream("Tell me a joke about cats."))


@pytest.mark.parametrize(
("model", "method", "strict"), [("gpt-4o-2024-08-06", "json_schema", None)]
)
@pytest.mark.parametrize(("model", "method"), [("gpt-4o-2024-08-06", "json_schema")])
def test_nested_structured_output_strict(
model: str, method: Literal["json_schema"], strict: Optional[bool]
model: str, method: Literal["json_schema"]
) -> None:
"""Test to verify structured output with strict=True for nested object."""

Expand All @@ -914,7 +925,7 @@ class JokeWithEvaluation(TypedDict):
self_evaluation: SelfEvaluation

# Schema
chat = llm.with_structured_output(JokeWithEvaluation, method=method, strict=strict)
chat = llm.with_structured_output(JokeWithEvaluation, method=method, strict=True)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline", "self_evaluation"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
}),
'max_retries': 2,
'max_tokens': 100,
'n': 1,
'openai_api_key': dict({
'id': list([
'AZURE_OPENAI_API_KEY',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
'max_retries': 2,
'max_tokens': 100,
'model_name': 'gpt-3.5-turbo',
'n': 1,
'openai_api_key': dict({
'id': list([
'OPENAI_API_KEY',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -877,8 +877,6 @@ def test__get_request_payload() -> None:
],
"model": "gpt-4o-2024-08-06",
"stream": False,
"n": 1,
"temperature": 0.7,
}
payload = llm._get_request_payload(messages)
assert payload == expected
Expand Down
3 changes: 3 additions & 0 deletions libs/partners/xai/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ integration_test integration_tests: TEST_FILE=tests/integration_tests/
test tests:
poetry run pytest --disable-socket --allow-unix-socket $(TEST_FILE)

test_watch:
poetry run ptw --snapshot-update --now . -- -vv $(TEST_FILE)

integration_test integration_tests:
poetry run pytest $(TEST_FILE)

Expand Down
7 changes: 4 additions & 3 deletions libs/partners/xai/langchain_xai/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,9 +320,9 @@ def _get_ls_params(
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n < 1:
if self.n is not None and self.n < 1:
raise ValueError("n must be at least 1.")
if self.n > 1 and self.streaming:
if self.n is not None and self.n > 1 and self.streaming:
raise ValueError("n must be 1 when streaming.")

client_params: dict = {
Expand All @@ -331,10 +331,11 @@ def validate_environment(self) -> Self:
),
"base_url": self.xai_api_base,
"timeout": self.request_timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if self.max_retries is not None:
client_params["max_retries"] = self.max_retries

if client_params["api_key"] is None:
raise ValueError(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
'max_retries': 2,
'max_tokens': 100,
'model_name': 'grok-beta',
'n': 1,
'request_timeout': 60.0,
'stop': list([
]),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from pydantic import BaseModel, Field
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import Field as FieldV1
from typing_extensions import Annotated, TypedDict

from langchain_tests.unit_tests.chat_models import (
ChatModelTests,
Expand Down Expand Up @@ -191,6 +192,19 @@ def tool_choice_value(self) -> Optional[str]:
def has_structured_output(self) -> bool:
return True

.. dropdown:: structured_output_kwargs

Dict property that can be used to specify additional kwargs for
``with_structured_output``. Useful for testing different models.

Example:

.. code-block:: python

@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}

.. dropdown:: supports_json_mode

Boolean property indicating whether the chat model supports JSON mode in
Expand Down Expand Up @@ -1128,18 +1142,17 @@ def has_tool_calling(self) -> bool:

Joke = _get_joke_class()
# Pydantic class
# Type ignoring since the interface only officially supports pydantic 1
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
# We'll need to do a pass updating the type signatures.
chat = model.with_structured_output(Joke) # type: ignore[arg-type]
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, Joke)

for chunk in chat.stream("Tell me a joke about cats."):
assert isinstance(chunk, Joke)

# Schema
chat = model.with_structured_output(Joke.model_json_schema())
chat = model.with_structured_output(
Joke.model_json_schema(), **self.structured_output_kwargs
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
Expand Down Expand Up @@ -1182,18 +1195,17 @@ def has_tool_calling(self) -> bool:
Joke = _get_joke_class()

# Pydantic class
# Type ignoring since the interface only officially supports pydantic 1
# or pydantic.v1.BaseModel but not pydantic.BaseModel from pydantic 2.
# We'll need to do a pass updating the type signatures.
chat = model.with_structured_output(Joke) # type: ignore[arg-type]
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
result = await chat.ainvoke("Tell me a joke about cats.")
assert isinstance(result, Joke)

async for chunk in chat.astream("Tell me a joke about cats."):
assert isinstance(chunk, Joke)

# Schema
chat = model.with_structured_output(Joke.model_json_schema())
chat = model.with_structured_output(
Joke.model_json_schema(), **self.structured_output_kwargs
)
result = await chat.ainvoke("Tell me a joke about cats.")
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
Expand Down Expand Up @@ -1244,15 +1256,17 @@ class Joke(BaseModelV1): # Uses langchain_core.pydantic_v1.BaseModel
punchline: str = FieldV1(description="answer to resolve the joke")

# Pydantic class
chat = model.with_structured_output(Joke)
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, Joke)

for chunk in chat.stream("Tell me a joke about cats."):
assert isinstance(chunk, Joke)

# Schema
chat = model.with_structured_output(Joke.schema())
chat = model.with_structured_output(
Joke.schema(), **self.structured_output_kwargs
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)
assert set(result.keys()) == {"setup", "punchline"}
Expand Down Expand Up @@ -1293,6 +1307,7 @@ def has_tool_calling(self) -> bool:
if not self.has_tool_calling:
pytest.skip("Test requires tool calling.")

# Pydantic
class Joke(BaseModel):
"""Joke to tell user."""

Expand All @@ -1301,7 +1316,7 @@ class Joke(BaseModel):
default=None, description="answer to resolve the joke"
)

chat = model.with_structured_output(Joke) # type: ignore[arg-type]
chat = model.with_structured_output(Joke, **self.structured_output_kwargs)
setup_result = chat.invoke(
"Give me the setup to a joke about cats, no punchline."
)
Expand All @@ -1310,6 +1325,24 @@ class Joke(BaseModel):
joke_result = chat.invoke("Give me a joke about cats, include the punchline.")
assert isinstance(joke_result, Joke)

# Schema
chat = model.with_structured_output(
Joke.model_json_schema(), **self.structured_output_kwargs
)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)

# TypedDict
class JokeDict(TypedDict):
"""Joke to tell user."""

setup: Annotated[str, ..., "question to set up a joke"]
punchline: Annotated[Optional[str], None, "answer to resolve the joke"]

chat = model.with_structured_output(JokeDict, **self.structured_output_kwargs)
result = chat.invoke("Tell me a joke about cats.")
assert isinstance(result, dict)

def test_json_mode(self, model: BaseChatModel) -> None:
"""Test structured output via `JSON mode. <https://python.langchain.com/docs/concepts/structured_outputs/#json-mode>`_

Expand Down
Loading
Loading