PermissionDeniedError when running L5/L6 steps. Restarted notebook didn’t help.
L5 step:
result = event_management_crew.kickoff(inputs=event_details)
Output:
[DEBUG]: == Working Agent: Venue Coordinator
[INFO]: == Starting Task: Find a venue in San Francisco that meets criteria for Tech Innovation Conference.
> Entering new CrewAgentExecutor chain...
---------------------------------------------------------------------------
PermissionDeniedError Traceback (most recent call last)
Cell In[14], line 1
----> 1 result = event_management_crew.kickoff(inputs=event_details)
File /usr/local/lib/python3.11/site-packages/crewai/crew.py:252, in Crew.kickoff(self, inputs)
249 metrics = []
251 if self.process == Process.sequential:
--> 252 result = self._run_sequential_process()
253 elif self.process == Process.hierarchical:
254 result, manager_metrics = self._run_hierarchical_process()
File /usr/local/lib/python3.11/site-packages/crewai/crew.py:293, in Crew._run_sequential_process(self)
288 if self.output_log_file:
289 self._file_handler.log(
290 agent=role, task=task.description, status="started"
291 )
--> 293 output = task.execute(context=task_output)
294 if not task.async_execution:
295 task_output = output
File /usr/local/lib/python3.11/site-packages/crewai/task.py:173, in Task.execute(self, agent, context, tools)
171 self.thread.start()
172 else:
--> 173 result = self._execute(
174 task=self,
175 agent=agent,
176 context=context,
177 tools=tools,
178 )
179 return result
File /usr/local/lib/python3.11/site-packages/crewai/task.py:182, in Task._execute(self, agent, task, context, tools)
181 def _execute(self, agent, task, context, tools):
--> 182 result = agent.execute_task(
183 task=task,
184 context=context,
185 tools=tools,
186 )
188 exported_output = self._export_output(result)
190 self.output = TaskOutput(
191 description=self.description,
192 exported_output=exported_output,
193 raw_output=result,
194 )
File /usr/local/lib/python3.11/site-packages/crewai/agent.py:221, in Agent.execute_task(self, task, context, tools)
218 self.agent_executor.tools_description = render_text_description(parsed_tools)
219 self.agent_executor.tools_names = self.__tools_names(parsed_tools)
--> 221 result = self.agent_executor.invoke(
222 {
223 "input": task_prompt,
224 "tool_names": self.agent_executor.tools_names,
225 "tools": self.agent_executor.tools_description,
226 }
227 )["output"]
229 if self.max_rpm:
230 self._rpm_controller.stop_rpm_counter()
File /usr/local/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File /usr/local/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File /usr/local/lib/python3.11/site-packages/crewai/agents/executor.py:124, in CrewAgentExecutor._call(self, inputs, run_manager)
122 while self._should_continue(self.iterations, time_elapsed):
123 if not self.request_within_rpm_limit or self.request_within_rpm_limit():
--> 124 next_step_output = self._take_next_step(
125 name_to_tool_map,
126 color_mapping,
127 inputs,
128 intermediate_steps,
129 run_manager=run_manager,
130 )
131 if self.step_callback:
132 self.step_callback(next_step_output)
File /usr/local/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in AgentExecutor._take_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File /usr/local/lib/python3.11/site-packages/langchain/agents/agent.py:1138, in <listcomp>(.0)
1129 def _take_next_step(
1130 self,
1131 name_to_tool_map: Dict[str, BaseTool],
(...)
1135 run_manager: Optional[CallbackManagerForChainRun] = None,
1136 ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
1137 return self._consume_next_step(
-> 1138 [
1139 a
1140 for a in self._iter_next_step(
1141 name_to_tool_map,
1142 color_mapping,
1143 inputs,
1144 intermediate_steps,
1145 run_manager,
1146 )
1147 ]
1148 )
File /usr/local/lib/python3.11/site-packages/crewai/agents/executor.py:186, in CrewAgentExecutor._iter_next_step(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)
183 intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
185 # Call the LLM to see what to do.
--> 186 output = self.agent.plan(
187 intermediate_steps,
188 callbacks=run_manager.get_child() if run_manager else None,
189 **inputs,
190 )
192 except OutputParserException as e:
193 if isinstance(self.handle_parsing_errors, bool):
File /usr/local/lib/python3.11/site-packages/langchain/agents/agent.py:397, in RunnableAgent.plan(self, intermediate_steps, callbacks, **kwargs)
389 final_output: Any = None
390 if self.stream_runnable:
391 # Use streaming to make sure that the underlying LLM is invoked in a
392 # streaming
(...)
395 # Because the response from the plan is not a generator, we need to
396 # accumulate the output into final output and return that.
--> 397 for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
398 if final_output is None:
399 final_output = chunk
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:2875, in RunnableSequence.stream(self, input, config, **kwargs)
2869 def stream(
2870 self,
2871 input: Input,
2872 config: Optional[RunnableConfig] = None,
2873 **kwargs: Optional[Any],
2874 ) -> Iterator[Output]:
-> 2875 yield from self.transform(iter([input]), config, **kwargs)
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:2862, in RunnableSequence.transform(self, input, config, **kwargs)
2856 def transform(
2857 self,
2858 input: Iterator[Input],
2859 config: Optional[RunnableConfig] = None,
2860 **kwargs: Optional[Any],
2861 ) -> Iterator[Output]:
-> 2862 yield from self._transform_stream_with_config(
2863 input,
2864 self._transform,
2865 patch_config(config, run_name=(config or {}).get("run_name") or self.name),
2866 **kwargs,
2867 )
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:1881, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs)
1879 try:
1880 while True:
-> 1881 chunk: Output = context.run(next, iterator) # type: ignore
1882 yield chunk
1883 if final_output_supported:
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:2826, in RunnableSequence._transform(self, input, run_manager, config)
2817 for step in steps:
2818 final_pipeline = step.transform(
2819 final_pipeline,
2820 patch_config(
(...)
2823 ),
2824 )
-> 2826 for output in final_pipeline:
2827 yield output
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:1282, in Runnable.transform(self, input, config, **kwargs)
1279 final: Input
1280 got_first_val = False
-> 1282 for ichunk in input:
1283 # The default implementation of transform is to buffer input and
1284 # then call stream.
1285 # It'll attempt to gather all input into a single chunk using
1286 # the `+` operator.
1287 # If the input is not addable, then we'll assume that we can
1288 # only operate on the last chunk,
1289 # and we'll iterate until we get to the last chunk.
1290 if not got_first_val:
1291 final = ichunk
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:4736, in RunnableBindingBase.transform(self, input, config, **kwargs)
4730 def transform(
4731 self,
4732 input: Iterator[Input],
4733 config: Optional[RunnableConfig] = None,
4734 **kwargs: Any,
4735 ) -> Iterator[Output]:
-> 4736 yield from self.bound.transform(
4737 input,
4738 self._merge_configs(config),
4739 **{**self.kwargs, **kwargs},
4740 )
File /usr/local/lib/python3.11/site-packages/langchain_core/runnables/base.py:1300, in Runnable.transform(self, input, config, **kwargs)
1297 final = ichunk
1299 if got_first_val:
-> 1300 yield from self.stream(final, config, **kwargs)
File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:249, in BaseChatModel.stream(self, input, config, stop, **kwargs)
242 except BaseException as e:
243 run_manager.on_llm_error(
244 e,
245 response=LLMResult(
246 generations=[[generation]] if generation else []
247 ),
248 )
--> 249 raise e
250 else:
251 run_manager.on_llm_end(LLMResult(generations=[[generation]]))
File /usr/local/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:229, in BaseChatModel.stream(self, input, config, stop, **kwargs)
227 generation: Optional[ChatGenerationChunk] = None
228 try:
--> 229 for chunk in self._stream(messages, stop=stop, **kwargs):
230 if chunk.message.id is None:
231 chunk.message.id = f"run-{run_manager.run_id}"
File /usr/local/lib/python3.11/site-packages/langchain_openai/chat_models/base.py:408, in ChatOpenAI._stream(self, messages, stop, run_manager, **kwargs)
405 params = {**params, **kwargs, "stream": True}
407 default_chunk_class = AIMessageChunk
--> 408 for chunk in self.client.create(messages=message_dicts, **params):
409 if not isinstance(chunk, dict):
410 chunk = chunk.dict()
File /usr/local/lib/python3.11/site-packages/openai/_utils/_utils.py:277, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
275 msg = f"Missing required argument: {quote(missing[0])}"
276 raise TypeError(msg)
--> 277 return func(*args, **kwargs)
File /usr/local/lib/python3.11/site-packages/openai/resources/chat/completions.py:590, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
558 @required_args(["messages", "model"], ["messages", "model", "stream"])
559 def create(
560 self,
(...)
588 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
589 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
--> 590 return self._post(
591 "/chat/completions",
592 body=maybe_transform(
593 {
594 "messages": messages,
595 "model": model,
596 "frequency_penalty": frequency_penalty,
597 "function_call": function_call,
598 "functions": functions,
599 "logit_bias": logit_bias,
600 "logprobs": logprobs,
601 "max_tokens": max_tokens,
602 "n": n,
603 "presence_penalty": presence_penalty,
604 "response_format": response_format,
605 "seed": seed,
606 "stop": stop,
607 "stream": stream,
608 "stream_options": stream_options,
609 "temperature": temperature,
610 "tool_choice": tool_choice,
611 "tools": tools,
612 "top_logprobs": top_logprobs,
613 "top_p": top_p,
614 "user": user,
615 },
616 completion_create_params.CompletionCreateParams,
617 ),
618 options=make_request_options(
619 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
620 ),
621 cast_to=ChatCompletion,
622 stream=stream or False,
623 stream_cls=Stream[ChatCompletionChunk],
624 )
File /usr/local/lib/python3.11/site-packages/openai/_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1226 def post(
1227 self,
1228 path: str,
(...)
1235 stream_cls: type[_StreamT] | None = None,
1236 ) -> ResponseT | _StreamT:
1237 opts = FinalRequestOptions.construct(
1238 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1239 )
-> 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
File /usr/local/lib/python3.11/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request(
913 self,
914 cast_to: Type[ResponseT],
(...)
919 stream_cls: type[_StreamT] | None = None,
920 ) -> ResponseT | _StreamT:
--> 921 return self._request(
922 cast_to=cast_to,
923 options=options,
924 stream=stream,
925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries,
927 )
File /usr/local/lib/python3.11/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read()
1019 log.debug("Re-raising status error")
-> 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response(
1023 cast_to=cast_to,
1024 options=options,
(...)
1027 stream_cls=stream_cls,
1028 )
PermissionDeniedError: <!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<title>Forbidden (403)</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="rev proxy for open ai">
<meta name="author" content="deeplearning.ai">
<link rel="icon" href="/static/images/favicons/favicon.a66bc42b4e8a.ico">
<!-- Latest compiled and minified Bootstrap CSS -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.1.3/css/bootstrap.min.css" integrity="sha512-GQGU0fMMi238uA+a/bdWJfpUGKUkBdgfFdgBm72SUQ6BeyWjoY/ton0tEjH+OSH9iP4Dfh+7HM0I9f5eR0L/4w==" crossorigin="anonymous" referrerpolicy="no-referrer" />
<!-- Your stuff: Third-party CSS libraries go here -->
<!-- This file stores project-specific CSS -->
<link href="/static/css/project.a9bf09099778.css" rel="stylesheet">
<!-- Le javascript
================================================== -->
<!-- Bootstrap JS -->
<script defer src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap/5.1.3/js/bootstrap.min.js" integrity="sha512-OvBgP9A2JBgiRad/mM36mkzXSXaJE9BEIENnVEmeZdITvwT09xnxLtT4twkCa8m/loMbPHsvPl0T8lRGVBwjlQ==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>
<!-- Your stuff: Third-party javascript libraries go here -->
<!-- place project specific Javascript in this file -->
<script defer src="/static/js/project.e8c1a24dd152.js"></script>
</head>
<body>
<div class="container">
<h1>Forbidden (403)</h1>
<p>You're not allowed to access this page.</p>
</div> <!-- /container -->
</body>
</html>