From 6b6e94da0822f8aebc4d95bffd0b536ac2152579 Mon Sep 17 00:00:00 2001 From: "Charlie.Wei" Date: Thu, 10 Oct 2024 15:26:38 +0800 Subject: [PATCH] Fix code indentation errors (#9164) --- .../model_providers/azure_openai/llm/llm.py | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py index f63c80e58e..b9cc3bb672 100644 --- a/api/core/model_runtime/model_providers/azure_openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/azure_openai/llm/llm.py @@ -312,39 +312,39 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel): if user: extra_model_kwargs["user"] = user - # clear illegal prompt messages - prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) - - block_as_stream = False - if model.startswith("o1"): - if stream: - block_as_stream = True - stream = False - - if "stream_options" in extra_model_kwargs: - del extra_model_kwargs["stream_options"] - - if "stop" in extra_model_kwargs: - del extra_model_kwargs["stop"] - - # chat model - response = client.chat.completions.create( - messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], - model=model, - stream=stream, - **model_parameters, - **extra_model_kwargs, - ) + # clear illegal prompt messages + prompt_messages = self._clear_illegal_prompt_messages(model, prompt_messages) + block_as_stream = False + if model.startswith("o1"): if stream: - return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) + block_as_stream = True + stream = False - block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) + if "stream_options" in extra_model_kwargs: + del extra_model_kwargs["stream_options"] - if block_as_stream: - return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) + if "stop" in extra_model_kwargs: + del extra_model_kwargs["stop"] - return block_result + # chat model + response = client.chat.completions.create( + messages=[self._convert_prompt_message_to_dict(m) for m in prompt_messages], + model=model, + stream=stream, + **model_parameters, + **extra_model_kwargs, + ) + + if stream: + return self._handle_chat_generate_stream_response(model, credentials, response, prompt_messages, tools) + + block_result = self._handle_chat_generate_response(model, credentials, response, prompt_messages, tools) + + if block_as_stream: + return self._handle_chat_block_as_stream_response(block_result, prompt_messages, stop) + + return block_result def _handle_chat_block_as_stream_response( self,