Please Help!!
Problem: Openai implementation and Llama implementation code + output provided. OpenAI agent implementation works perfectly, calling the search tool thrice as required and providing the complete answer. Llama implementation using my workplace api hosted on fireworks fails to do the same even when the code is completely unchanged, just the model has been changed. it calls the tool once and then stops.
Context: At my workplace I have been told to learn langgraph with agents. I started on the agents with langgraph course on deeplearning.ai , however later i was told to use the workplace’s fireworks hosted llama model. i am not getting any errors, so i dont even know what to fix here.
OpenAI implementation:
import os
import json
from openai import OpenAI
from datetime import datetime, timedelta
from dotenv import load_dotenv, find_dotenv
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage,ChatMessage
Load environment variables from .env file
load_dotenv()
_ = load_dotenv(find_dotenv())
Access the OpenAI API key from environment variables
we use only gpt-4o-mini from now on. yay!
openai_api_key = os.getenv(“OPENAI_API_KEY”)
langchain_api_key = os.getenv(“LANGCHAIN_API_KEY”)
Debug: Print the API key to verify it is loaded correctly (optional, remove in production)
print(f"API Key: {api_key}")
if openai_api_key is None:
raise ValueError(“API key is not set. Please set the OPENAI_API_KEY in the .env file.”)
Initialize the OpenAI client
client = OpenAI(api_key=openai_api_key)
llm = ChatOpenAI(model_name=“gpt-4o-mini”, temperature=0)
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
from langchain_community.tools.tavily_search import TavilySearchResults
tool = TavilySearchResults(max_results = 2)
print(type(tool))
print(tool.name)
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add]
class Agent:
def init(self, model, tools, system = " "):
self.system = system
graph = StateGraph(AgentState)
graph.add_node(“llm”,self.call_openai)
graph.add_node(“action”,self.take_action)
graph.add_conditional_edges(
“llm”,
here we set where the conditional edge starts from
self.exists_action,
function that will determine where to go from there on
{
This maps the respose of the function and where it should next go to
True : "action", False : END
}
)
graph.add_edge("action", "llm")
graph.set_entry_point("llm")
self.graph = graph.compile()
langchain runnable is ready
self.tools = {t.name : t for t in tools}
self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState):
result = state['messages'][-1]
return len(result.tool_calls)>0
def call_openai(self, state: AgentState):
messages = state['messages']
if self.system:
messages = [SystemMessage(content= self.system)] + messages
message = self.model.invoke(messages)
print(message)
return {'messages' : [message]}
since we annotated messages with operator.add, when we call the above return statement, it doesn’t overwrite the messages, but adds to it.
def take_action(self, state : AgentState):
tool_calls = state["messages"][-1].tool_calls
results = []
for t in tool_calls:
print(f"Calling: {t}")
result = self.tools[t['name']].invoke(t['args'])
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!")
return {'messages' : results}
prompt = “”“You are a smart research assistant. Use the search engine to look up information.
You are allowed to make multiple calls (either together or in sequence).
Only look up information when you are sure of what you want.
If you need to look up some information before asking a follow up question, you are allowed to do that!
“””
abot = Agent(model= llm, tools= [tool], system = prompt)
messages = [HumanMessage(content = “Who won IPL 2023? What is the gdp of that state and the state beside that combined?”)]
result = abot.graph.invoke({“messages” : messages})
print(result[‘messages’][-1].content)
OpenAI output:
<class 'langchain_community.tools.tavily_search.tool.TavilySearchResults'>
tavily_search_results_json
content='' additional_kwargs={'tool_calls': [{'id': 'call_uuUBBnZxDF5yhcCC7zn0ArOu', 'function': {'arguments': '{"query": "IPL 2023 winner"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}, {'id': 'call_mFfUnqm5mISKgr5vAnYlGwu8', 'function': {'arguments': '{"query": "GDP of Gujarat 2023"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}, {'id': 'call_tIDXlc3QuWYdHvrnyRx9ze3X', 'function': {'arguments': '{"query": "GDP of Maharashtra 2023"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 84, 'prompt_tokens': 166, 'total_tokens': 250, 'prompt_tokens_details': {'cached_tokens': 0}, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': 'fp_f59a81427f', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-04615292-a37e-4558-84d2-6371d835467f-0' tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'IPL 2023 winner'}, 'id': 'call_uuUBBnZxDF5yhcCC7zn0ArOu', 'type': 'tool_call'}, {'name': 'tavily_search_results_json', 'args': {'query': 'GDP of Gujarat 2023'}, 'id': 'call_mFfUnqm5mISKgr5vAnYlGwu8', 'type': 'tool_call'}, {'name': 'tavily_search_results_json', 'args': {'query': 'GDP of Maharashtra 2023'}, 'id': 'call_tIDXlc3QuWYdHvrnyRx9ze3X', 'type': 'tool_call'}] usage_metadata={'input_tokens': 166, 'output_tokens': 84, 'total_tokens': 250}
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'IPL 2023 winner'}, 'id': 'call_uuUBBnZxDF5yhcCC7zn0ArOu', 'type': 'tool_call'}
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'GDP of Gujarat 2023'}, 'id': 'call_mFfUnqm5mISKgr5vAnYlGwu8', 'type': 'tool_call'}
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'GDP of Maharashtra 2023'}, 'id': 'call_tIDXlc3QuWYdHvrnyRx9ze3X', 'type': 'tool_call'}
Back to the model!
content="The winner of IPL 2023 was the **Chennai Super Kings (CSK)**, who defeated the Gujarat Titans by five wickets in the final match held at the Narendra Modi Stadium in Ahmedabad. This victory marked CSK's fifth IPL title. [More details here](https://www.iplt20.com/news/3976/tata-ipl-2023-final-csk-vs-gt-match-reportOverall).\n\nNow./n/nNow), regarding the GDP of the states involved:\n\n1. **Gujarat**: The GDP of Gujarat for 2023 is estimated to be around ₹2.96 lakh crore (approximately $36 billion) based on the budget analysis for 2023-24. [Source](https://prsindia.org/budgets/states/gujarat-budget-analysis-2023-24).\n\n2./n/n2). **Maharashtra**: The GDP of Maharashtra for 2023-24 is estimated to be around ₹42.67 trillion (approximately $510 billion). [Source](https://en.wikipedia.org/wiki/Economy_of_Maharashtra).\n\n###./n/n###) Combined GDP of Gujarat and Maharashtra:\n- Gujarat: ₹2.96 lakh crore\n- Maharashtra: ₹42.67 trillion\n\nTo combine these figures:\n- Convert Gujarat's GDP to the same unit as Maharashtra's: ₹2.96 lakh crore = ₹2.96 trillion.\n- Combined GDP = ₹2.96 trillion + ₹42.67 trillion = ₹45.63 trillion (approximately $550 billion).\n\nThus, the combined GDP of Gujarat and Maharashtra is approximately **₹45.63 trillion** (or about **$550 billion**)." response_metadata={'token_usage': {'completion_tokens': 328, 'prompt_tokens': 2792, 'total_tokens': 3120, 'prompt_tokens_details': {'cached_tokens': 0}, 'completion_tokens_details': {'reasoning_tokens': 0}}, 'model_name': 'gpt-4o-mini', 'system_fingerprint': 'fp_f59a81427f', 'finish_reason': 'stop', 'logprobs': None} id='run-5ca9fd99-6884-4dc5-9ce6-ce0156bef852-0' usage_metadata={'input_tokens': 2792, 'output_tokens': 328, 'total_tokens': 3120}
The winner of IPL 2023 was the **Chennai Super Kings (CSK)**, who defeated the Gujarat Titans by five wickets in the final match held at the Narendra Modi Stadium in Ahmedabad. This victory marked CSK's fifth IPL title. [More details here](https://www.iplt20.com/news/3976/tata-ipl-2023-final-csk-vs-gt-match-reportOverall).
Now, regarding the GDP of the states involved:
**Gujarat**: The GDP of Gujarat for 2023 is estimated to be around ₹2.96 lakh crore (approximately $36 billion) based on the budget analysis for 2023-24. [Source](https://prsindia.org/budgets/states/gujarat-budget-analysis-2023-24).
**Maharashtra**: The GDP of Maharashtra for 2023-24 is estimated to be around ₹42.67 trillion (approximately $510 billion). [Source](https://en.wikipedia.org/wiki/Economy_of_Maharashtra).
### Combined GDP of Gujarat and Maharashtra:
- Gujarat: ₹2.96 lakh crore
- Maharashtra: ₹42.67 trillion
To combine these figures:
- Convert Gujarat's GDP to the same unit as Maharashtra's: ₹2.96 lakh crore = ₹2.96 trillion.
- Combined GDP = ₹2.96 trillion + ₹42.67 trillion = ₹45.63 trillion (approximately $550 billion).
Thus, the combined GDP of Gujarat and Maharashtra is approximately **₹45.63 trillion** (or about **$550 billion**).
Llama Implementation::
import os
import json
from openai import OpenAI
from datetime import datetime, timedelta
from dotenv import load_dotenv, find_dotenv
from langchain_openai import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage,ChatMessage
Load environment variables from .env file
load_dotenv()
_ = load_dotenv(find_dotenv())
Access the OpenAI API key from environment variables
we use only gpt-4o-mini from now on. yay!
openai_api_key = os.getenv(“OPENAI_API_KEY”)
langchain_api_key = os.getenv(“LANGCHAIN_API_KEY”)
Debug: Print the API key to verify it is loaded correctly (optional, remove in production)
print(f"API Key: {api_key}")
if openai_api_key is None:
raise ValueError(“API key is not set. Please set the OPENAI_API_KEY in the .env file.”)
Initialize the OpenAI client
client = OpenAI(api_key=openai_api_key)
llm = ChatOpenAI(model_name=“gpt-4o-mini”, temperature=0)
llm = ChatOpenAI(
model=“accounts/fireworks/models/llama-v3p1-70b-instruct”,
temperature=0,
api_key=os.getenv(“FIREWORKS_API_KEY”),
base_url=“https://api.fireworks.ai/inference/v1”,
)
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
from langchain_community.tools.tavily_search import TavilySearchResults
tool = TavilySearchResults(max_results = 2)
print(type(tool))
print(tool.name)
class AgentState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add]
class Agent:
def init(self, model, tools, system = " "):
self.system = system
graph = StateGraph(AgentState)
graph.add_node(“llm”,self.call_openai)
graph.add_node(“action”,self.take_action)
graph.add_conditional_edges(
“llm”,
here we set where the conditional edge starts from
self.exists_action,
function that will determine where to go from there on
{
This maps the respose of the function and where it should next go to
True : "action", False : END
}
)
graph.add_edge("action", "llm")
graph.set_entry_point("llm")
self.graph = graph.compile()
langchain runnable is ready
self.tools = {t.name : t for t in tools}
self.model = model.bind_tools(tools)
def exists_action(self, state: AgentState):
result = state['messages'][-1]
return len(result.tool_calls)>0
def call_openai(self, state: AgentState):
messages = state['messages']
if self.system:
messages = [SystemMessage(content= self.system)] + messages
message = self.model.invoke(messages)
print(message)
return {'messages' : [message]}
since we annotated messages with operator.add, when we call the above return statement, it doesn’t overwrite the messages, but adds to it.
def take_action(self, state : AgentState):
tool_calls = state["messages"][-1].tool_calls
results = []
for t in tool_calls:
print(f"Calling: {t}")
result = self.tools[t['name']].invoke(t['args'])
results.append(ToolMessage(tool_call_id=t['id'], name=t['name'], content=str(result)))
print("Back to the model!")
return {'messages' : results}
prompt = “”“You are a smart research assistant. Use the search engine to look up information.
You are allowed to make multiple calls (either together or in sequence).
Only look up information when you are sure of what you want.
If you need to look up some information before asking a follow up question, you are allowed to do that!
“””
abot = Agent(model= llm, tools= [tool], system = prompt)
messages = [HumanMessage(content = “Who won IPL 2023? What is the gdp of that state and the state beside that combined?”)]
result = abot.graph.invoke({“messages” : messages})
print(result[‘messages’][-1].content)
Llama Output:
<class 'langchain_community.tools.tavily_search.tool.TavilySearchResults'>
tavily_search_results_json
content='' additional_kwargs={'tool_calls': [{'id': 'call_JurtcbX3QsXqxPS9RJ0aCGAU', 'function': {'arguments': '{"query": "IPL 2023 winner"}', 'name': 'tavily_search_results_json'}, 'type': 'function', 'index': 0}]} response_metadata={'token_usage': {'completion_tokens': 27, 'prompt_tokens': 304, 'total_tokens': 331}, 'model_name': 'accounts/fireworks/models/llama-v3p1-70b-instruct', 'system_fingerprint': None, 'finish_reason': 'tool_calls', 'logprobs': None} id='run-4ec44c44-5970-44b5-b10b-e41ac47f35de-0' tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'IPL 2023 winner'}, 'id': 'call_JurtcbX3QsXqxPS9RJ0aCGAU', 'type': 'tool_call'}] usage_metadata={'input_tokens': 304, 'output_tokens': 27, 'total_tokens': 331}
Calling: {'name': 'tavily_search_results_json', 'args': {'query': 'IPL 2023 winner'}, 'id': 'call_JurtcbX3QsXqxPS9RJ0aCGAU', 'type': 'tool_call'}
Back to the model!
content='The winner of IPL 2023 is Chennai Super Kings.' response_metadata={'token_usage': {'completion_tokens': 13, 'prompt_tokens': 1004, 'total_tokens': 1017}, 'model_name': 'accounts/fireworks/models/llama-v3p1-70b-instruct', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-bb29ca04-b059-4c64-8692-ee6e02a270dc-0' usage_metadata={'input_tokens': 1004, 'output_tokens': 13, 'total_tokens': 1017}
The winner of IPL 2023 is Chennai Super Kings.