I’m trying to use Ollama and LlamaIndex with LangGraph. The first two lectures are Ok. I can run them without any errors. Here is the sample code:
from llama_index.llms.ollama.base import Ollama
from llama_index.core.llms import ChatMessage
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage, AIMessage
from langgraph.graph import StateGraph, END
from langchain_community.tools.tavily_search import TavilySearchResults
from typing import TypedDict, Annotated
import operator
import re
client = Ollama(
    base_url='http://localhost:11434',
    model="llama3.1",
    temperature=0
)
tool = TavilySearchResults(max_results=4)
class AgentState(TypedDict):
    messages: Annotated[list[AnyMessage], operator.add]
class Agent:
    def __init__(self, model:Ollama, tools, system=""):
        self.system = system
        graph = StateGraph(AgentState)
        graph.add_node("llm", self.call_llama)
        graph.add_node("action", self.take_action)
        graph.add_conditional_edges(
            "llm",
            self.exists_action,
            {True: "action", False: END}
        )
        graph.add_edge("action", "llm")
        graph.set_entry_point("llm")
        self.graph = graph.compile()
        self.tools = {t.name: t for t in tools}
        self.model = model
    def exists_action(self, state: AgentState):
        result = state['messages'][-1]
        return len(result.tool_calls) > 0
    
    def call_llama(self, state: AgentState):
        messages = []
        for msg in state['messages']:
            msg_dict = msg.model_dump()
            print(msg_dict)
            if msg_dict['type'] == "human":
                role = "user"
            elif msg_dict['type'] == "ai":
                role = "assistant"
            elif msg_dict['type'] == "tool":
                role = "tool"
            else:
                print(msg_dict['type'])
                role = "human"
            messages.append(ChatMessage(role=role, content=msg.content))
        if self.system:
            messages = [ChatMessage(role="system", content=self.system)] + messages
        
        message = self.model.chat(messages=messages)
        message = message.message.content
        return {'messages': [AIMessage(content=message)]}
    
    def take_action(self, state:AgentState):
        tool_calls = state['messages'][-1].tool_calls
        results = []
        for t in tool_calls:
            print(f"Calling: {t}")
            if not t['name'] in self.tool_names:
                print("\n...bad tool name...")
                result = "bad tool name, retry"
            else:
                result = self.tools[t['name']].invoke(t['args'])
            results.append(ToolMessage(tool_call_id=t["id"], name=t['name'], content=str(result)))
        print("Back to the model!")
        return {'messages': results}
prompt = """You are a smart research assistant. Use the search engine to look up information. \
You are allowed to make multiple calls (either together or in sequence). \
Only look up information when you are sure of what you want. \
If you need to look up some information before asking a follow-up question, you are allowed to do that!
"""
abot = Agent(client, [tool], system=prompt)
query = "Who won the super bowl in 2024? In what state is the winning team headquarters located? \
What is the GDP of that state? Answer each question." 
messages = [HumanMessage(content=query)]
result = abot.graph.invoke({"messages": messages})
print(result['messages'][-1].content)
This returns the same answer as the one in lecture 2. However, it doesn’t return or stream the output as in lecture 4. How can I achieve this?