cell 24 also have an issue:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[25], line 1
----> 1 graded_outputs = eval_chain.evaluate(examples, predictions)
File /usr/local/lib/python3.9/site-packages/langchain/evaluation/qa/eval_chain.py:60, in QAEvalChain.evaluate(self, examples, predictions, question_key, answer_key, prediction_key)
50 """Evaluate question answering examples and predictions."""
51 inputs = [
52 {
53 "query": example[question_key],
(...)
57 for i, example in enumerate(examples)
58 ]
---> 60 return self.apply(inputs)
File /usr/local/lib/python3.9/site-packages/langchain/chains/llm.py:157, in LLMChain.apply(self, input_list, callbacks)
155 except (KeyboardInterrupt, Exception) as e:
156 run_manager.on_chain_error(e)
--> 157 raise e
158 outputs = self.create_outputs(response)
159 run_manager.on_chain_end({"outputs": outputs})
File /usr/local/lib/python3.9/site-packages/langchain/chains/llm.py:154, in LLMChain.apply(self, input_list, callbacks)
149 run_manager = callback_manager.on_chain_start(
150 {"name": self.__class__.__name__},
151 {"input_list": input_list},
152 )
153 try:
--> 154 response = self.generate(input_list, run_manager=run_manager)
155 except (KeyboardInterrupt, Exception) as e:
156 run_manager.on_chain_error(e)
File /usr/local/lib/python3.9/site-packages/langchain/chains/llm.py:79, in LLMChain.generate(self, input_list, run_manager)
77 """Generate LLM result from inputs."""
78 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
---> 79 return self.llm.generate_prompt(
80 prompts, stop, callbacks=run_manager.get_child() if run_manager else None
81 )
File /usr/local/lib/python3.9/site-packages/langchain/chat_models/base.py:143, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks)
136 def generate_prompt(
137 self,
138 prompts: List[PromptValue],
139 stop: Optional[List[str]] = None,
140 callbacks: Callbacks = None,
141 ) -> LLMResult:
142 prompt_messages = [p.to_messages() for p in prompts]
--> 143 return self.generate(prompt_messages, stop=stop, callbacks=callbacks)
File /usr/local/lib/python3.9/site-packages/langchain/chat_models/base.py:92, in BaseChatModel.generate(self, messages, stop, callbacks)
90 run_manager.on_llm_error(e)
91 raise e
---> 92 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
93 generations = [res.generations for res in results]
94 output = LLMResult(generations=generations, llm_output=llm_output)
File /usr/local/lib/python3.9/site-packages/langchain/chat_models/openai.py:292, in ChatOpenAI._combine_llm_outputs(self, llm_outputs)
290 for k, v in token_usage.items():
291 if k in overall_token_usage:
--> 292 overall_token_usage[k] += v
293 else:
294 overall_token_usage[k] = v
TypeError: unsupported operand type(s) for +=: 'OpenAIObject' and 'OpenAIObject'