Get_llm_response( ) function

Whenever I try to run the code get_llm_response( prompt), I start noticing this error at the end of module 2 and the debut of module 3:
**---------------------------------------------------------------------------
PermissionDeniedError Traceback (most recent call last)
Cell In[2], line 11
5 prompt = f"“”
6 Create a short recipe that uses the following ingredients:
7 {ingredients}
8 “”"
10 # Get the response from the LLM
—> 11 response = get_llm_response(prompt)
13 # Print the LLM response
14 print(response)

File ~/work/L1/helper_functions.py:66, in get_llm_response(prompt)
61 def get_llm_response(prompt):
62 “”“This function takes as input a prompt, which must be a string enclosed in quotation marks,
63 and passes it to OpenAI’s GPT3.5 model. The function then saves the response of the model as
64 a string.
65 “””
—> 66 completion = client.chat.completions.create(
67 model=“gpt-4o-mini”,
68 messages=[
69 {
70 “role”: “system”,
71 “content”: “You are a helpful but terse AI assistant who gets straight to the point.”,
72 },
73 {“role”: “user”, “content”: prompt},
74 ],
75 temperature=0.0,
76 )
77 response = completion.choices[0].message.content
78 return response

File /usr/local/lib/python3.9/site-packages/openai/_utils/_utils.py:277, in required_args..inner..wrapper(*args, **kwargs)
275 msg = f"Missing required argument: {quote(missing[0])}"
276 raise TypeError(msg)
→ 277 return func(*args, **kwargs)

File /usr/local/lib/python3.9/site-packages/openai/resources/chat/completions.py:590, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
558 @required_args([“messages”, “model”], [“messages”, “model”, “stream”])
559 def create(
560 self,
(…)
588 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
589 ) → ChatCompletion | Stream[ChatCompletionChunk]:
→ 590 return self._post(
591 “/chat/completions”,
592 body=maybe_transform(
593 {
594 “messages”: messages,
595 “model”: model,
596 “frequency_penalty”: frequency_penalty,
597 “function_call”: function_call,
598 “functions”: functions,
599 “logit_bias”: logit_bias,
600 “logprobs”: logprobs,
601 “max_tokens”: max_tokens,
602 “n”: n,
603 “presence_penalty”: presence_penalty,
604 “response_format”: response_format,
605 “seed”: seed,
606 “stop”: stop,
607 “stream”: stream,
608 “stream_options”: stream_options,
609 “temperature”: temperature,
610 “tool_choice”: tool_choice,
611 “tools”: tools,
612 “top_logprobs”: top_logprobs,
613 “top_p”: top_p,
614 “user”: user,
615 },
616 completion_create_params.CompletionCreateParams,
617 ),
618 options=make_request_options(
619 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
620 ),
621 cast_to=ChatCompletion,
622 stream=stream or False,
623 stream_cls=Stream[ChatCompletionChunk],
624 )

File /usr/local/lib/python3.9/site-packages/openai/_base_client.py:1240, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1226 def post(
1227 self,
1228 path: str,
(…)
1235 stream_cls: type[_StreamT] | None = None,
1236 ) → ResponseT | _StreamT:
1237 opts = FinalRequestOptions.construct(
1238 method=“post”, url=path, json_data=body, files=to_httpx_files(files), **options
1239 )
→ 1240 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File /usr/local/lib/python3.9/site-packages/openai/_base_client.py:921, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
912 def request(
913 self,
914 cast_to: Type[ResponseT],
(…)
919 stream_cls: type[_StreamT] | None = None,
920 ) → ResponseT | _StreamT:
→ 921 return self._request(
922 cast_to=cast_to,
923 options=options,
924 stream=stream,
925 stream_cls=stream_cls,
926 remaining_retries=remaining_retries,
927 )

File /usr/local/lib/python3.9/site-packages/openai/_base_client.py:1020, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
1017 err.response.read()
1019 log.debug(“Re-raising status error”)
→ 1020 raise self._make_status_error_from_response(err.response) from None
1022 return self._process_response(
1023 cast_to=cast_to,
1024 options=options,
(…)
1027 stream_cls=stream_cls,
1028 )

PermissionDeniedError: Error code: 403 - {‘error’: {‘message’: ‘Project proj_OXEC08aO6rKYJweAEj6Aae5D does not have access to model gpt-4o-mini’, ‘type’: ‘invalid_request_error’, ‘param’: None, ‘code’: ‘model_not_found’}}**

Hi @hayou,

We have recently been aware of this issue.

Please follow this thread for an update on this.

Thanks,
Mubsi