L1 Model Prompt Parser

I get an error when running

customer_response = chat(customer_messages)

I’m not sure what is wrong - everything (up to this point) worked fine.
Response: Retrying langchain.chat_models.openai.ChatOpenAI.completion_with_retry.._completion_with_retry in 1.0 seconds as it raised APIConnectionError: Error communicating with OpenAI: (‘Connection aborted.’, RemoteDisconnected(‘Remote end closed connection without response’)).

AuthenticationError                       Traceback (most recent call last)
Cell In[69], line 2
      1 # Call the LLM to translate to the style of the customer message
----> 2 customer_response = chat(customer_messages)

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\base.py:183, in BaseChatModel.__call__(self, messages, stop, callbacks)
    177 def __call__(
    178     self,
    179     messages: List[BaseMessage],
    180     stop: Optional[List[str]] = None,
    181     callbacks: Callbacks = None,
    182 ) -> BaseMessage:
--> 183     generation = self.generate(
    184         [messages], stop=stop, callbacks=callbacks
    185     ).generations[0][0]
    186     if isinstance(generation, ChatGeneration):
    187         return generation.message

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\base.py:92, in BaseChatModel.generate(self, messages, stop, callbacks)
     90 except (KeyboardInterrupt, Exception) as e:
     91     run_manager.on_llm_error(e)
---> 92     raise e
     93 llm_output = self._combine_llm_outputs([res.llm_output for res in results])
     94 generations = [res.generations for res in results]

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\base.py:84, in BaseChatModel.generate(self, messages, stop, callbacks)
     80 new_arg_supported = inspect.signature(self._generate).parameters.get(
     81     "run_manager"
     82 )
     83 try:
---> 84     results = [
     85         self._generate(m, stop=stop, run_manager=run_manager)
     86         if new_arg_supported
     87         else self._generate(m, stop=stop)
     88         for m in messages
     89     ]
     90 except (KeyboardInterrupt, Exception) as e:
     91     run_manager.on_llm_error(e)

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\base.py:85, in (.0)
     80 new_arg_supported = inspect.signature(self._generate).parameters.get(
     81     "run_manager"
     82 )
     83 try:
     84     results = [
---> 85         self._generate(m, stop=stop, run_manager=run_manager)
     86         if new_arg_supported
     87         else self._generate(m, stop=stop)
     88         for m in messages
     89     ]
     90 except (KeyboardInterrupt, Exception) as e:
     91     run_manager.on_llm_error(e)

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\openai.py:323, in ChatOpenAI._generate(self, messages, stop, run_manager)
    319     message = _convert_dict_to_message(
    320         {"content": inner_completion, "role": role}
    321     )
    322     return ChatResult(generations=[ChatGeneration(message=message)])
--> 323 response = self.completion_with_retry(messages=message_dicts, **params)
    324 return self._create_chat_result(response)

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\openai.py:284, in ChatOpenAI.completion_with_retry(self, **kwargs)
    280 @retry_decorator
    281 def _completion_with_retry(**kwargs: Any) -> Any:
    282     return self.client.create(**kwargs)
--> 284 return _completion_with_retry(**kwargs)

File ~\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py:289, in BaseRetrying.wraps..wrapped_f(*args, **kw)
    287 @functools.wraps(f)
    288 def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
--> 289     return self(f, *args, **kw)

File ~\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py:379, in Retrying.__call__(self, fn, *args, **kwargs)
    377 retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
    378 while True:
--> 379     do = self.iter(retry_state=retry_state)
    380     if isinstance(do, DoAttempt):
    381         try:

File ~\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py:314, in BaseRetrying.iter(self, retry_state)
    312 is_explicit_retry = fut.failed and isinstance(fut.exception(), TryAgain)
    313 if not (is_explicit_retry or self.retry(retry_state)):
--> 314     return fut.result()
    316 if self.after is not None:
    317     self.after(retry_state)

File c:\Program Files\Python311\Lib\concurrent\futures\_base.py:449, in Future.result(self, timeout)
    447     raise CancelledError()
    448 elif self._state == FINISHED:
--> 449     return self.__get_result()
    451 self._condition.wait(timeout)
    453 if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:

File c:\Program Files\Python311\Lib\concurrent\futures\_base.py:401, in Future.__get_result(self)
    399 if self._exception:
    400     try:
--> 401         raise self._exception
    402     finally:
    403         # Break a reference cycle with the exception in self._exception
    404         self = None

File ~\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py:382, in Retrying.__call__(self, fn, *args, **kwargs)
    380 if isinstance(do, DoAttempt):
    381     try:
--> 382         result = fn(*args, **kwargs)
    383     except BaseException:  # noqa: B902
    384         retry_state.set_exception(sys.exc_info())  # type: ignore[arg-type]

File ~\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\openai.py:282, in ChatOpenAI.completion_with_retry.._completion_with_retry(**kwargs)
    280 @retry_decorator
    281 def _completion_with_retry(**kwargs: Any) -> Any:
--> 282     return self.client.create(**kwargs)

File ~\AppData\Roaming\Python\Python311\site-packages\openai\api_resources\chat_completion.py:25, in ChatCompletion.create(cls, *args, **kwargs)
     23 while True:
     24     try:
---> 25         return super().create(*args, **kwargs)
     26     except TryAgain as e:
     27         if timeout is not None and time.time() > start + timeout:

File ~\AppData\Roaming\Python\Python311\site-packages\openai\api_resources\abstract\engine_api_resource.py:153, in EngineAPIResource.create(cls, api_key, api_base, api_type, request_id, api_version, organization, **params)
    127 @classmethod
    128 def create(
    129     cls,
   (...)
    136     **params,
    137 ):
    138     (
    139         deployment_id,
    140         engine,
   (...)
    150         api_key, api_base, api_type, api_version, organization, **params
    151     )
--> 153     response, _, api_key = requestor.request(
    154         "post",
    155         url,
    156         params=params,
    157         headers=headers,
    158         stream=stream,
    159         request_id=request_id,
    160         request_timeout=request_timeout,
    161     )
    163     if stream:
    164         # must be an iterator
    165         assert not isinstance(response, OpenAIResponse)

File ~\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py:230, in APIRequestor.request(self, method, url, params, headers, files, stream, request_id, request_timeout)
    209 def request(
    210     self,
    211     method,
   (...)
    218     request_timeout: Optional[Union[float, Tuple[float, float]]] = None,
    219 ) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
    220     result = self.request_raw(
    221         method.lower(),
    222         url,
   (...)
    228         request_timeout=request_timeout,
    229     )
--> 230     resp, got_stream = self._interpret_response(result, stream)
    231     return resp, got_stream, self.api_key

File ~\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py:624, in APIRequestor._interpret_response(self, result, stream)
    616     return (
    617         self._interpret_response_line(
    618             line, result.status_code, result.headers, stream=True
    619         )
    620         for line in parse_stream(result.iter_lines())
    621     ), True
    622 else:
    623     return (
--> 624         self._interpret_response_line(
    625             result.content.decode("utf-8"),
    626             result.status_code,
    627             result.headers,
    628             stream=False,
    629         ),
    630         False,
    631     )

File ~\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py:687, in APIRequestor._interpret_response_line(self, rbody, rcode, rheaders, stream)
    685 stream_error = stream and "error" in resp.data
    686 if stream_error or not 200 <= rcode < 300:
--> 687     raise self.handle_error_response(
    688         rbody, rcode, resp.data, rheaders, stream_error=stream_error
    689     )
    690 return resp

AuthenticationError: 

I think this has to do with the openai key and its limit for prompt tokens!

What limit?
Can you reference any documentation? What “limit for prompt tokens”?

I have tried changing max_tokens limit and request_timeout (just in case…) - same result.

When I added traceback to get more info about the error, this is what I got:

  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py", line 314, in iter
    return fut.result()
           ^^^^^^^^^^^^
  File "c:\Program Files\Python311\Lib\concurrent\futures\_base.py", line 449, in result
    return self.__get_result()
           ^^^^^^^^^^^^^^^^^^^
  File "c:\Program Files\Python311\Lib\concurrent\futures\_base.py", line 401, in __get_result
    raise self._exception
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\tenacity\__init__.py", line 382, in __call__
    result = fn(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\langchain\chat_models\openai.py", line 282, in _completion_with_retry
    return self.client.create(**kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\openai\api_resources\chat_completion.py", line 25, in create
    return super().create(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
    response, _, api_key = requestor.request(
                           ^^^^^^^^^^^^^^^^^^
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py", line 230, in request
    resp, got_stream = self._interpret_response(result, stream)
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py", line 624, in _interpret_response
    self._interpret_response_line(
  File "C:\Users\jozwi\AppData\Roaming\Python\Python311\site-packages\openai\api_requestor.py", line 687, in _interpret_response_line
    raise self.handle_error_response(
openai.error.AuthenticationError: <empty message>

No I dont know exactly why are you getting the error, but I think it has to do with the a key that chatgpt uses to get it to work and I think chatgpt is saying you have hit the limit for that key :neutral_face:

According to the documentation, I can change this with max_tokens variable:

https://python.langchain.com/en/latest/reference/modules/chat_models.html?highlight=max_tokens#langchain.chat_models.ChatOpenAI.max_tokens

By default, it’s set to “None”, which should allow the maximum possible limit (I think I saw it somewhere as 2024, but I’m not sure). I have tried different limits as well as no limits - same result.

I have an unmodified Jupyter file so nothing should be going over default limits here, especially since the prompt is a short sentence…

I’m out of ides :confused:

I dont think it has to do with the prompt size but with the key used by DLAI. Give it a try again tomorrow it might be reset in someway!