Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

openai.BadRequestError: Error code: 400 - {'object': 'error', 'message': 'Tool call IDs should be alphanumeric strings with length 9!', 'type': 'BadRequestError', 'param': None, 'code': 400} #29021

Open
5 tasks done
RachelShalom opened this issue Jan 4, 2025 · 2 comments
Labels
🤖:bug Related to a bug, vulnerability, unexpected error with an existing feature

Comments

@RachelShalom
Copy link

Checked other resources

  • I added a very descriptive title to this issue.
  • I searched the LangChain documentation with the integrated search.
  • I used the GitHub search to find a similar question and didn't find it.
  • I am sure that this is a bug in LangChain rather than my code.
  • The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).

Example Code

from dotenv import load_dotenv, find_dotenv
_  = load_dotenv(find_dotenv())
import os
import random
import string
from math import factorial
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import AnyMessage,SystemMessage, HumanMessage , ToolMessage
# from openai import OpenAI
# from langchain_community.chat_models import ChatOpenAI
from pydantic import BaseModel
from langchain_openai import ChatOpenAI, OpenAI
from langchain_mistralai import ChatMistralAI

from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.tools.jira.tool import JiraAction
import httpx

import io
http_client=httpx.Client(verify=False)
#create the tool
tool = TavilySearchResults(max_results=2)
print(type(tool))
print(tool.name)

#create an agent state: an annotated list of messages

class AgentState(TypedDict):
    messages: Annotated[list[AnyMessage], operator.add]



class Agent:

    def __init__(self, model, tools, system =""):
        self.system = system
        graph = StateGraph(AgentState)
        graph.add_node("llm", self.call_model)
        graph.add_node("action", self.take_action)
        graph.add_conditional_edges(
            "llm",
            self.exists_action,
            {True:"action", False:END}
        )
        graph.add_edge("action", "llm")
        graph.set_entry_point("llm")
        self.graph = graph.compile() # cpm[ile turns the graph into a langchain runnable
        self.tools = {t.name: t for t in tools}
        self.model = model.bind_tools(tools, tool_choice="tavily_search_results_json")
    def exists_action(self, state:AgentState):
        result = state['messages'][-1]
        return len(result.tool_calls) > 0
    
    def call_model(self, state:AgentState):
        messages = state["messages"]
        if self.system:
            messages = [SystemMessage(content=self.system)] + messages
        message = self.model.invoke(messages)
        return {'messages':[message]}
     
    def take_action(self, state:AgentState):
        tool_calls = state['messages'][-1].tool_calls
        results = []
        for t in tool_calls:
            print(f"calling: {t}")
            result = self.tools[t['name']].invoke(t['args'])
            #tool_call_id = t['id'].replace("chatcmpl-tool-", "call_")
            results.append(ToolMessage(tool_call_id=t['id'],name = t['name'] ,content=str(result)))
        print("Back Back to the model")
        return {'messages': results}
    @staticmethod
    def generate_tool_call_id(length=9):
        return ''.join(random.choices(string.ascii_letters + string.digits, k=length))

prompt = """
you are a smart research assistant use the search engine to look up information. \
you are allowed to make multiple calls(either together or in a sequence).
only look for information when you are sure of what you want .\
If you need to look up some information before asking a follow up questions,  you are allowed to do that!
"""
# base_url = os.getenv("DSX_DEVGENAI_BASEURL")
base_url=os.getenv("BASE_URL")   
key =  os.getenv("DEVGENAI_KEY")    
mistral_llm = ChatOpenAI(model ="mistral-7b-instruct-v03", model_name = "mistral-7b-instruct-v03",openai_api_base = base_url,http_client=http_client, openai_api_key = key, top_p = 1, temperature = 0.5, verbose = True,)       

SearchAgent = Agent(mistral_llm, [tool], system = f"[INST]{prompt}[/INST]")


messages = [HumanMessage(content = "what's the weather in Boston, MA?")]
result = SearchAgent.graph.invoke({"messages":messages})
print(result)
print(result["messages"][-1].content)


messages = [HumanMessage(content = "what's the weather in Boston, MA and in sf?")]
result = SearchAgent.graph.invoke({"messages":messages})
print(result["messages"][-1].content)

### Error Message and Stack Trace (if applicable)

<class 'langchain_community.tools.tavily_search.tool.TavilySearchResults'>
tavily_search_results_json
calling: {'name': 'tavily_search_results_json', 'args': {'query': 'weather in Boston, MA'}, 'id': 'chatcmpl-tool-9d0f9ae5c5e54b53be284d9d934663eb', 'type': 'tool_call'}
Back Back to the model
Traceback (most recent call last):
  File "/usr/lib/python3.11/runpy.py", line 198, in _run_module_as_main
    return _run_code(code, main_globals, None,
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/usr/lib/python3.11/runpy.py", line 88, in _run_code
    exec(code, run_globals)
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/__main__.py", line 71, in <module>
    cli.main()
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 501, in main
    run()
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/adapter/../../debugpy/launcher/../../debugpy/../debugpy/server/cli.py", line 351, in run_file
    runpy.run_path(target, run_name="__main__")
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 310, in run_path
    return _run_module_code(code, init_globals, run_name, pkg_name=pkg_name, script_name=fname)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 127, in _run_module_code
    _run_code(code, mod_globals, init_globals, mod_name, mod_spec, pkg_name, script_name)
  File "/home/rachel_shalom/.vscode-server/extensions/ms-python.debugpy-2024.14.0-linux-x64/bundled/libs/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py", line 118, in _run_code
    exec(code, run_globals)
  File "/home/rachel_shalom/devx/fast_api_stream/agentic_flows/agent_with_lang_example.py", line 107, in <module>
    result = SearchAgent.graph.invoke({"messages":messages})
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/pregel/__init__.py", line 1936, in invoke
    for chunk in self.stream(
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/pregel/__init__.py", line 1656, in stream
    for _ in runner.tick(
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/pregel/runner.py", line 167, in tick
    run_with_retry(
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/pregel/retry.py", line 40, in run_with_retry
    return task.proc.invoke(task.input, config)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/utils/runnable.py", line 408, in invoke
    input = step.invoke(input, config, **kwargs)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langgraph/utils/runnable.py", line 184, in invoke
    ret = context.run(self.func, input, **kwargs)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/devx/fast_api_stream/agentic_flows/agent_with_lang_example.py", line 64, in call_model
    message = self.model.invoke(messages)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/rachel_shalom/devx/devx_env/lib/python3.11/site-packages/langchain_core/runnables/base.py", line 5354, in invoke
    return self.bound.invoke(
           ^^^^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 286, in invoke
    self.generate_prompt(
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 786, in generate_prompt
    return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 643, in generate
    raise e
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 633, in generate
    self._generate_with_cache(
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 851, in _generate_with_cache
    result = self._generate(
             ^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/langchain_openai/chat_models/base.py", line 717, in _generate
    response = self.client.create(**payload)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/openai/_utils/_utils.py", line 275, in wrapper
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/openai/resources/chat/completions.py", line 859, in create
    return self._post(
           ^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/openai/_base_client.py", line 1280, in post
    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/openai/_base_client.py", line 957, in request
    return self._request(
           ^^^^^^^^^^^^^^
  File "/home/user/devx/devx_env/lib/python3.11/site-packages/openai/_base_client.py", line 1061, in _request
    raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'object': 'error', 'message': 'Tool call IDs should be alphanumeric strings with length 9!', 'type': 'BadRequestError', 'param': None, 'code': 400}
During task with name 'llm' and id '52c2b916-2911-1e0d-7990-9bbd4baf7a5f'

### Description

I am trying ti run a simple example with langgraph and tavilly search. while debugging I see that the model managed to get the argument for the "tool" search and  it fails in the last call where the input is a list of 4 messages:
 message = self.model.invoke(messages))
 where messages is :
 [SystemMessage(content='[INST]\nyou are a smart research assistant use the search engi...dditional_kwargs={}, response_metadata={}), HumanMessage(content="what's the weather in Boston, MA?", additional_kwargs={}, response_metadata={}), AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'chatcmpl-tool-949a406...details': {}, 'output_token_details': {}}), ToolMessage(content='[{\'url\': \'https://weather.com/weather/tenday/l/USMA0046:1:US\...pl-tool-949a40625f644abb8b47e0d0e04147cc')]
 
 it seems like it complains on the tool_call_id format but when I have tried to change the format of the id to examples I saw in the docs I got the same error. 
 not sure how to solve this one

### System Info

System Information
------------------
> OS:  Linux
> OS Version:  #1 SMP Fri Mar 29 23:14:13 UTC 2024
> Python Version:  3.11.11 (main, Dec  4 2024, 08:55:07) [GCC 11.4.0]

Package Information
-------------------
> langchain_core: 0.3.28
> langchain: 0.3.12
> langchain_community: 0.3.0
> langsmith: 0.1.147
> langchain_mistralai: 0.2.4
> langchain_openai: 0.2.14
> langchain_text_splitters: 0.3.4
> langgraph_sdk: 0.1.48

Optional packages not installed
-------------------------------
> langserve

Other Dependencies
------------------
> aiohttp: 3.9.5
> async-timeout: 4.0.3
> dataclasses-json: 0.6.5
> httpx: 0.27.0
> httpx-sse: 0.4.0
> jsonpatch: 1.33
> langsmith-pyo3: Installed. No version info available.
> numpy: 1.26.4
> openai: 1.58.1
> orjson: 3.10.7
> packaging: 24.0
> pydantic: 2.9.1
> pydantic-settings: 2.7.1
> PyYAML: 6.0.1
> requests: 2.32.3
> requests-toolbelt: 1.0.0
> SQLAlchemy: 2.0.29
> tenacity: 8.4.2
> tiktoken: 0.7.0
> tokenizers: 0.19.1
> typing-extensions: 4.12.2
@dosubot dosubot bot added the 🤖:bug Related to a bug, vulnerability, unexpected error with an existing feature label Jan 4, 2025
@keenborder786
Copy link
Contributor

@RachelShalom I have optimized your code and all is working well, please check:

import os
import random
import string
from math import factorial
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import AnyMessage,SystemMessage, HumanMessage , ToolMessage
from pydantic import BaseModel
from langchain_mistralai import ChatMistralAI

from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.tools.jira.tool import JiraAction
from langchain.agents.tool_calling_agent.base import create_tool_calling_agent
import httpx

http_client=httpx.Client(verify=False)
#create the tool
tool = TavilySearchResults(max_results=2)
print(type(tool))
print(tool.name)

#create an agent state: an annotated list of messages

class AgentState(TypedDict):
    messages: Annotated[list[AnyMessage], operator.add]



class Agent:
    def __init__(self, model, tools, system =""):
        self.system = system
        graph = StateGraph(AgentState)
        graph.add_node("llm", self.call_model)
        graph.add_node("action", self.take_action)
        graph.add_conditional_edges(
            "llm",
            self.exists_action,
            {True:"action", False:END}
        )
        graph.add_edge("action", "llm")
        graph.set_entry_point("llm")
        self.graph = graph.compile() # cpm[ile turns the graph into a langchain runnable
        self.tools = {t.name: t for t in tools}
        self.model = model.bind_tools(tools, tool_choice="auto") # Change this to auto
    def exists_action(self, state:AgentState):
        result = state['messages'][-1]
        return len(result.tool_calls) > 0
    
    def call_model(self, state:AgentState):
        messages = state["messages"]
        if self.system:
            messages = [SystemMessage(content=self.system)] + messages
        message = self.model.invoke(messages)
        return {'messages':[message]}
     
    def take_action(self, state:AgentState):
        tool_calls = state['messages'][-1].tool_calls
        results = []
        for t in tool_calls:
            result = self.tools[t['name']].invoke(t['args'])
            results.append(ToolMessage(tool_call_id=t['id'],name = t['name'] ,content=str(result)))
        return {'messages': results}
    @staticmethod
    def generate_tool_call_id(length=9):
        return ''.join(random.choices(string.ascii_letters + string.digits, k=length))

prompt = """
you are a smart research assistant use the search engine to look up information. \
you are allowed to make multiple calls(either together or in a sequence).
only look for information when you are sure of what you want .\
If you need to look up some information before asking a follow up questions,  you are allowed to do that!
"""

base_url=os.getenv("BASE_URL")   
key =  os.getenv("DEVGENAI_KEY")    
mistral_llm = ChatMistralAI(model ="mistral-large-latest", top_p = 1, temperature = 0.5, verbose = True,)   # Use MistralAI Client directly.   
SearchAgent = Agent(mistral_llm, [tool], system = f"[INST]{prompt}[/INST]")
messages = [HumanMessage(content = "what's the weather in Boston, MA?")]
result = SearchAgent.graph.invoke({"messages":messages})
print(result)
print(result["messages"][-1].content)

@RachelShalom
Copy link
Author

Hey @keenborder786 thank for this I see you changed the model for the mistralAI class. I am using a local endpoint ( not mistral AI) which does not seem to work well with mistralAIchatbot class

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
🤖:bug Related to a bug, vulnerability, unexpected error with an existing feature
Projects
None yet
Development

No branches or pull requests

2 participants