fix: PR comment changes
This commit is contained in:
parent
4e880eca84
commit
204f9c2e4a
3 changed files with 12 additions and 9 deletions
|
|
@ -21,7 +21,10 @@ LLM_PROVIDER="openai"
|
||||||
LLM_ENDPOINT=""
|
LLM_ENDPOINT=""
|
||||||
LLM_API_VERSION=""
|
LLM_API_VERSION=""
|
||||||
LLM_MAX_TOKENS="16384"
|
LLM_MAX_TOKENS="16384"
|
||||||
LLM_INSTRUCTOR_MODE="json_schema_mode" # this mode is used for gpt-5 models
|
# Instructor's modes determine how structured data is requested from and extracted from LLM responses
|
||||||
|
# You can change this type (i.e. mode) via this env variable
|
||||||
|
# Each LLM has its own default value, e.g. gpt-5 models have "json_schema_mode"
|
||||||
|
LLM_INSTRUCTOR_MODE=""
|
||||||
|
|
||||||
EMBEDDING_PROVIDER="openai"
|
EMBEDDING_PROVIDER="openai"
|
||||||
EMBEDDING_MODEL="openai/text-embedding-3-large"
|
EMBEDDING_MODEL="openai/text-embedding-3-large"
|
||||||
|
|
|
||||||
|
|
@ -38,7 +38,7 @@ class LLMConfig(BaseSettings):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
structured_output_framework: str = "instructor"
|
structured_output_framework: str = "instructor"
|
||||||
llm_instructor_mode: Optional[str] = None
|
llm_instructor_mode: str = ""
|
||||||
llm_provider: str = "openai"
|
llm_provider: str = "openai"
|
||||||
llm_model: str = "openai/gpt-5-mini"
|
llm_model: str = "openai/gpt-5-mini"
|
||||||
llm_endpoint: str = ""
|
llm_endpoint: str = ""
|
||||||
|
|
@ -182,7 +182,7 @@ class LLMConfig(BaseSettings):
|
||||||
instance.
|
instance.
|
||||||
"""
|
"""
|
||||||
return {
|
return {
|
||||||
"llm_instructor_mode": self.llm_instructor_mode,
|
"llm_instructor_mode": self.llm_instructor_mode.lower(),
|
||||||
"provider": self.llm_provider,
|
"provider": self.llm_provider,
|
||||||
"model": self.llm_model,
|
"model": self.llm_model,
|
||||||
"endpoint": self.llm_endpoint,
|
"endpoint": self.llm_endpoint,
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
model=llm_config.llm_model,
|
model=llm_config.llm_model,
|
||||||
transcription_model=llm_config.transcription_model,
|
transcription_model=llm_config.transcription_model,
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
streaming=llm_config.llm_streaming,
|
streaming=llm_config.llm_streaming,
|
||||||
fallback_api_key=llm_config.fallback_api_key,
|
fallback_api_key=llm_config.fallback_api_key,
|
||||||
fallback_endpoint=llm_config.fallback_endpoint,
|
fallback_endpoint=llm_config.fallback_endpoint,
|
||||||
|
|
@ -102,7 +102,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
llm_config.llm_model,
|
llm_config.llm_model,
|
||||||
"Ollama",
|
"Ollama",
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
)
|
)
|
||||||
|
|
||||||
elif provider == LLMProvider.ANTHROPIC:
|
elif provider == LLMProvider.ANTHROPIC:
|
||||||
|
|
@ -113,7 +113,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
return AnthropicAdapter(
|
return AnthropicAdapter(
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
model=llm_config.llm_model,
|
model=llm_config.llm_model,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
)
|
)
|
||||||
|
|
||||||
elif provider == LLMProvider.CUSTOM:
|
elif provider == LLMProvider.CUSTOM:
|
||||||
|
|
@ -130,7 +130,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
llm_config.llm_model,
|
llm_config.llm_model,
|
||||||
"Custom",
|
"Custom",
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
fallback_api_key=llm_config.fallback_api_key,
|
fallback_api_key=llm_config.fallback_api_key,
|
||||||
fallback_endpoint=llm_config.fallback_endpoint,
|
fallback_endpoint=llm_config.fallback_endpoint,
|
||||||
fallback_model=llm_config.fallback_model,
|
fallback_model=llm_config.fallback_model,
|
||||||
|
|
@ -150,7 +150,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
endpoint=llm_config.llm_endpoint,
|
endpoint=llm_config.llm_endpoint,
|
||||||
api_version=llm_config.llm_api_version,
|
api_version=llm_config.llm_api_version,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
)
|
)
|
||||||
|
|
||||||
elif provider == LLMProvider.MISTRAL:
|
elif provider == LLMProvider.MISTRAL:
|
||||||
|
|
@ -166,7 +166,7 @@ def get_llm_client(raise_api_key_error: bool = True):
|
||||||
model=llm_config.llm_model,
|
model=llm_config.llm_model,
|
||||||
max_completion_tokens=max_completion_tokens,
|
max_completion_tokens=max_completion_tokens,
|
||||||
endpoint=llm_config.llm_endpoint,
|
endpoint=llm_config.llm_endpoint,
|
||||||
instructor_mode=llm_config.llm_instructor_mode,
|
instructor_mode=llm_config.llm_instructor_mode.lower(),
|
||||||
)
|
)
|
||||||
|
|
||||||
elif provider == LLMProvider.MISTRAL:
|
elif provider == LLMProvider.MISTRAL:
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue