Error in Lesson 3 "Long-Term Agentic Memory With LangGraph"

response = response_agent.invoke(
{“messages”: [{“role”: “user”, “content”: “Jim is my friend”}]},
config=config
)

error report:

NotFoundError Traceback (most recent call last)
Cell In[31], line 1
----> 1 response = response_agent.invoke(
2 {“messages”: [{“role”: “user”, “content”: “Jim is my friend”}]},
3 config=config
4 )

File /usr/local/lib/python3.11/site-packages/langgraph/pregel/_init_.py:2069, in Pregel.invoke(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, **kwargs)
2067 else:
2068 chunks = []
→ 2069 for chunk in self.stream(
2070 input,
2071 config,
2072 stream_mode=stream_mode,
2073 output_keys=output_keys,
2074 interrupt_before=interrupt_before,
2075 interrupt_after=interrupt_after,
2076 debug=debug,
2077 **kwargs,
2078 ):
2079 if stream_mode == “values”:
2080 latest = chunk

File /usr/local/lib/python3.11/site-packages/langgraph/pregel/_init_.py:1724, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug, subgraphs)
1718 # Similarly to Bulk Synchronous Parallel / Pregel model
1719 # computation proceeds in steps, while there are channel updates.
1720 # Channel updates from step N are only visible in step N+1
1721 # channels are guaranteed to be immutable for the duration of the step,
1722 # with channel updates applied only at the transition between steps.
1723 while loop.tick(input_keys=self.input_channels):
→ 1724 for _ in runner.tick(
1725 loop.tasks.values(),
1726 timeout=self.step_timeout,
1727 retry_policy=self.retry_policy,
1728 get_waiter=get_waiter,
1729 ):
1730 # emit output
1731 yield from output()
1732 # emit output

File /usr/local/lib/python3.11/site-packages/langgraph/pregel/runner.py:230, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter)
228 t = tasks[0]
229 try:
→ 230 run_with_retry(
231 t,
232 retry_policy,
233 configurable={
234 CONFIG_KEY_SEND: partial(writer, t),
235 CONFIG_KEY_CALL: partial(call, t),
236 },
237 )
238 self.commit(t, None)
239 except Exception as exc:

File /usr/local/lib/python3.11/site-packages/langgraph/pregel/retry.py:40, in run_with_retry(task, retry_policy, configurable)
38 task.writes.clear()
39 # run the task
—> 40 return task.proc.invoke(task.input, config)
41 except ParentCommand as exc:
42 ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]

File /usr/local/lib/python3.11/site-packages/langgraph/utils/runnable.py:506, in RunnableSeq.invoke(self, input, config, **kwargs)
502 config = patch_config(
503 config, callbacks=run_manager.get_child(f"seq:step:{i + 1}")
504 )
505 if i == 0:
→ 506 input = step.invoke(input, config, **kwargs)
507 else:
508 input = step.invoke(input, config)

File /usr/local/lib/python3.11/site-packages/langgraph/utils/runnable.py:262, in RunnableCallable.invoke(self, input, config, **kwargs)
260 context = copy_context()
261 context.run(_set_config_context, child_config)
→ 262 ret = context.run(self.func, *args, **kwargs)
263 except BaseException as e:
264 run_manager.on_chain_error(e)

File /usr/local/lib/python3.11/site-packages/langgraph/prebuilt/chat_agent_executor.py:639, in create_react_agent..call_model(state, config)
637 def call_model(state: AgentState, config: RunnableConfig) → AgentState:
638 _validate_chat_history(state[“messages”])
→ 639 response = cast(AIMessage, model_runnable.invoke(state, config))
640 # add agent name to the AIMessage
641 response.name = name

File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:3024, in RunnableSequence.invoke(self, input, config, **kwargs)
3022 input = context.run(step.invoke, input, config, **kwargs)
3023 else:
→ 3024 input = context.run(step.invoke, input, config)
3025 # finish the root run
3026 except BaseException as e:

File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:5360, in RunnableBindingBase.invoke(self, input, config, **kwargs)
5354 def invoke(
5355 self,
5356 input: Input,
5357 config: Optional[RunnableConfig] = None,
5358 **kwargs: Optional[Any],
5359 ) → Output:
→ 5360 return self.bound.invoke(
5361 input,
5362 self._merge_configs(config),
5363 **{**self.kwargs, **kwargs},
5364 )

File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:284, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
273 def invoke(
274 self,
275 input: LanguageModelInput,
(…)
279 **kwargs: Any,
280 ) → BaseMessage:
281 config = ensure_config(config)
282 return cast(
283 ChatGeneration,
→ 284 self.generate_prompt(
285 [self._convert_input(input)],
286 stop=stop,
287 callbacks=config.get(“callbacks”),
288 tags=config.get(“tags”),
289 metadata=config.get(“metadata”),
290 run_name=config.get(“run_name”),
291 run_id=config.pop(“run_id”, None),
292 **kwargs,
293 ).generations[0][0],
294 ).message

File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:860, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
852 def generate_prompt(
853 self,
854 prompts: list[PromptValue],
(…)
857 **kwargs: Any,
858 ) → LLMResult:
859 prompt_messages = [p.to_messages() for p in prompts]
→ 860 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:690, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
687 for i, m in enumerate(messages):
688 try:
689 results.append(
→ 690 self._generate_with_cache(
691 m,
692 stop=stop,
693 run_manager=run_managers[i] if run_managers else None,
694 **kwargs,
695 )
696 )
697 except BaseException as e:
698 if run_managers:

File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:925, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
923 else:
924 if inspect.signature(self._generate).parameters.get(“run_manager”):
→ 925 result = self._generate(
926 messages, stop=stop, run_manager=run_manager, **kwargs
927 )
928 else:
929 result = self._generate(messages, stop=stop, **kwargs)

File /usr/local/lib/python3.11/site-packages/langchain_anthropic/chat_models.py:814, in ChatAnthropic._generate(self, messages, stop, run_manager, **kwargs)
812 return generate_from_stream(stream_iter)
813 payload = self._get_request_payload(messages, stop=stop, **kwargs)
→ 814 data = self._client.messages.create(**payload)
815 return self._format_output(data, **kwargs)

File /usr/local/lib/python3.11/site-packages/anthropic/_utils/_utils.py:275, in required_args..inner..wrapper(*args, **kwargs)
273 msg = f"Missing required argument: {quote(missing[0])}"
274 raise TypeError(msg)
→ 275 return func(*args, **kwargs)

File /usr/local/lib/python3.11/site-packages/anthropic/resources/messages/messages.py:904, in Messages.create(self, max_tokens, messages, model, metadata, stop_sequences, stream, system, temperature, tool_choice, tools, top_k, top_p, extra_headers, extra_query, extra_body, timeout)
897 if model in DEPRECATED_MODELS:
898 warnings.warn(
899 f"The model ‘{model}’ is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit Model deprecations - Claude API Docs for more information.",
900 DeprecationWarning,
901 stacklevel=3,
902 )
→ 904 return self._post(
905 “/v1/messages”,
906 body=maybe_transform(
907 {
908 “max_tokens”: max_tokens,
909 “messages”: messages,
910 “model”: model,
911 “metadata”: metadata,
912 “stop_sequences”: stop_sequences,
913 “stream”: stream,
914 “system”: system,
915 “temperature”: temperature,
916 “tool_choice”: tool_choice,
917 “tools”: tools,
918 “top_k”: top_k,
919 “top_p”: top_p,
920 },
921 message_create_params.MessageCreateParams,
922 ),
923 options=make_request_options(
924 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
925 ),
926 cast_to=Message,
927 stream=stream or False,
928 stream_cls=Stream[RawMessageStreamEvent],
929 )

File /usr/local/lib/python3.11/site-packages/anthropic/_base_client.py:1289, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1275 def post(
1276 self,
1277 path: str,
(…)
1284 stream_cls: type[_StreamT] | None = None,
1285 ) → ResponseT | _StreamT:
1286 opts = FinalRequestOptions.construct(
1287 method=“post”, url=path, json_data=body, files=to_httpx_files(files), **options
1288 )
→ 1289 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File /usr/local/lib/python3.11/site-packages/anthropic/_base_client.py:966, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
963 else:
964 retries_taken = 0
→ 966 return self._request(
967 cast_to=cast_to,
968 options=options,
969 stream=stream,
970 stream_cls=stream_cls,
971 retries_taken=retries_taken,
972 )

File /usr/local/lib/python3.11/site-packages/anthropic/_base_client.py:1070, in SyncAPIClient._request(self, cast_to, options, retries_taken, stream, stream_cls)
1067 err.response.read()
1069 log.debug(“Re-raising status error”)
→ 1070 raise self._make_status_error_from_response(err.response) from None
1072 return self._process_response(
1073 cast_to=cast_to,
1074 options=options,
(…)
1078 retries_taken=retries_taken,
1079 )

NotFoundError: Error code: 404 - {‘type’: ‘error’, ‘error’: {‘type’: ‘not_found_error’, ‘message’: ‘model: claude-3-5-sonnet-latest’}, ‘request_id’: ‘req_011CZc2brU4xcBeTYb1a5rky’}
During task with name ‘agent’ and id ‘d0c8bee3-e855-8908-2ab1-3f7baea4b6d7’

for m in response["messages"]:
    m.pretty_print()
-------------------------------------------

All the code segments with “response” are erroring out.

The model claude-3-5-sonnet-latest is retired. I tried anthropic:claude-sonnet-4-6 locally and it works.

Thanks.
I believe the course content need to be updated.