TypeError: 'ChatCompletionMessageToolCall' 对象不可下标访问
我在看Llama Index的文档,感觉这些内容有点过时。似乎在大型语言模型(LLM)领域,很多资料都是这样。我查了查OpenAI的文档,但找不到合适的API。可能是我漏掉了什么?
当我运行main.py
时,输出是:
Hello! How can I assist you today?
Traceback (most recent call last):
File "/Users/me/Documents/openai-agent/main.py", line 80, in <module>
print(agent.chat("What is 2123 * 215123"))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/me/Documents/openai-agent/main.py", line 54, in chat
function_message = self._call_function(tool_call)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/me/Documents/openai-agent/main.py", line 62, in _call_function
id_ = tool_call["id"]
~~~~~~~~~^^^^^^
TypeError: 'ChatCompletionMessageToolCall' object is not subscriptable
这是代码:
from typing import Sequence, List
from dotenv import load_dotenv
import json
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
import nest_asyncio
nest_asyncio.apply()
load_dotenv()
def multiply(a: int, b: int) -> int:
"""Multiplies two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Adds two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
class MyOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append(ChatMessage(role="user", content=message))
tools = [
tool.metadata.to_openai_tool() for _, tool in self._tools.items()
]
ai_message = self._llm.chat(chat_history, tools=tools).message
additional_kwargs = ai_message.additional_kwargs
chat_history.append(ai_message)
tool_calls = ai_message.additional_kwargs.get("tool_calls", None)
# parallel function calling is now supported
if tool_calls is not None:
for tool_call in tool_calls:
function_message = self._call_function(tool_call)
chat_history.append(function_message)
ai_message = self._llm.chat(chat_history).message
chat_history.append(ai_message)
return ai_message.content
def _call_function(self, tool_call: dict) -> ChatMessage:
id_ = tool_call["id"]
function_call = tool_call["function"]
tool = self._tools[function_call["name"]]
output = tool(**json.loads(function_call["arguments"]))
return ChatMessage(
name=function_call["name"],
content=str(output),
role="tool",
additional_kwargs={
"tool_call_id": id_,
"name": function_call["name"],
},
)
if __name__ == "__main__":
agent = MyOpenAIAgent(tools=[multiply_tool, add_tool])
print(agent.chat("Hi"))
print(agent.chat("What is 2123 * 215123"))
1 个回答
0
更新
这是截至1991年3月26日的最新可运行代码:
from typing import Sequence, List
from dotenv import load_dotenv
import json
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
import nest_asyncio
nest_asyncio.apply()
load_dotenv()
def multiply(a: int, b: int) -> int:
"""Multiplies two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Adds two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-0613")
agent = OpenAIAgent.from_tools(
[multiply_tool, add_tool], llm=llm, verbose=True
)
class MyOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append(ChatMessage(role="user", content=message))
tools = [
tool.metadata.to_openai_tool() for _, tool in self._tools.items()
]
ai_message = self._llm.chat(chat_history, tools=tools).message
additional_kwargs = ai_message.additional_kwargs
chat_history.append(ai_message)
tool_calls = ai_message.additional_kwargs.get("tool_calls", None)
# parallel function calling is now supported
if tool_calls is not None:
for tool_call in tool_calls:
function_message = self._call_function(tool_call)
chat_history.append(function_message)
ai_message = self._llm.chat(chat_history).message
chat_history.append(ai_message)
return ai_message.content
def _call_function(self, tool_call) -> ChatMessage:
id_ = tool_call.id
function_name = tool_call.function.name
tool_arguments_json = tool_call.function.arguments
tool_arguments = json.loads(tool_arguments_json)
tool = self._tools[function_name]
output = tool(**tool_arguments)
return ChatMessage(
name=function_name,
content=str(output),
role="tool",
additional_kwargs={
"tool_call_id": id_,
"name": function_name,
},
)
if __name__ == "__main__":
agent = MyOpenAIAgent(tools=[multiply_tool, add_tool])
print(agent.chat("Hi"))
print(agent.chat("What is 2123 * 215123"))