refactor: use gpt-5-mini by default (#1505)
<!-- .github/pull_request_template.md --> ## Description Set gpt-5-mini back to default LLM as issues with it have been resolved ## Type of Change <!-- Please check the relevant option --> - [ ] Bug fix (non-breaking change that fixes an issue) - [ ] New feature (non-breaking change that adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to change) - [ ] Documentation update - [ ] Code refactoring - [ ] Performance improvement - [ ] Other (please specify): ## Pre-submission Checklist <!-- Please check all boxes that apply before submitting your PR --> - [ ] **I have tested my changes thoroughly before submitting this PR** - [ ] **This PR contains minimal changes necessary to address the issue/feature** - [ ] My code follows the project's coding standards and style guidelines - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have added necessary documentation (if applicable) - [ ] All new and existing tests pass - [ ] I have searched existing PRs to ensure this change hasn't been submitted already - [ ] I have linked any relevant issues in the description - [ ] My commits have clear and descriptive messages ## DCO Affirmation I affirm that all code in every commit of this pull request conforms to the terms of the Topoteretes Developer Certificate of Origin.
This commit is contained in:
parent
3b8b839057
commit
0ef2623ac6
2 changed files with 4 additions and 4 deletions
|
|
@ -16,7 +16,7 @@
|
||||||
STRUCTURED_OUTPUT_FRAMEWORK="instructor"
|
STRUCTURED_OUTPUT_FRAMEWORK="instructor"
|
||||||
|
|
||||||
LLM_API_KEY="your_api_key"
|
LLM_API_KEY="your_api_key"
|
||||||
LLM_MODEL="openai/gpt-4o-mini"
|
LLM_MODEL="openai/gpt-5-mini"
|
||||||
LLM_PROVIDER="openai"
|
LLM_PROVIDER="openai"
|
||||||
LLM_ENDPOINT=""
|
LLM_ENDPOINT=""
|
||||||
LLM_API_VERSION=""
|
LLM_API_VERSION=""
|
||||||
|
|
@ -36,7 +36,7 @@ EMBEDDING_MAX_TOKENS=8191
|
||||||
|
|
||||||
# If using BAML structured output these env variables will be used
|
# If using BAML structured output these env variables will be used
|
||||||
BAML_LLM_PROVIDER=openai
|
BAML_LLM_PROVIDER=openai
|
||||||
BAML_LLM_MODEL="gpt-4o-mini"
|
BAML_LLM_MODEL="gpt-5-mini"
|
||||||
BAML_LLM_ENDPOINT=""
|
BAML_LLM_ENDPOINT=""
|
||||||
BAML_LLM_API_KEY="your_api_key"
|
BAML_LLM_API_KEY="your_api_key"
|
||||||
BAML_LLM_API_VERSION=""
|
BAML_LLM_API_VERSION=""
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ class LLMConfig(BaseSettings):
|
||||||
|
|
||||||
structured_output_framework: str = "instructor"
|
structured_output_framework: str = "instructor"
|
||||||
llm_provider: str = "openai"
|
llm_provider: str = "openai"
|
||||||
llm_model: str = "openai/gpt-4o-mini"
|
llm_model: str = "openai/gpt-5-mini"
|
||||||
llm_endpoint: str = ""
|
llm_endpoint: str = ""
|
||||||
llm_api_key: Optional[str] = None
|
llm_api_key: Optional[str] = None
|
||||||
llm_api_version: Optional[str] = None
|
llm_api_version: Optional[str] = None
|
||||||
|
|
@ -48,7 +48,7 @@ class LLMConfig(BaseSettings):
|
||||||
llm_max_completion_tokens: int = 16384
|
llm_max_completion_tokens: int = 16384
|
||||||
|
|
||||||
baml_llm_provider: str = "openai"
|
baml_llm_provider: str = "openai"
|
||||||
baml_llm_model: str = "gpt-4o-mini"
|
baml_llm_model: str = "gpt-5-mini"
|
||||||
baml_llm_endpoint: str = ""
|
baml_llm_endpoint: str = ""
|
||||||
baml_llm_api_key: Optional[str] = None
|
baml_llm_api_key: Optional[str] = None
|
||||||
baml_llm_temperature: float = 0.0
|
baml_llm_temperature: float = 0.0
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue