chore: Update default OpenAI model to gpt-4o-mini

Changed the default LLM model from gpt-4o to gpt-4o-mini across all
configuration files for better cost efficiency while maintaining quality.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Daniel Chalef 2025-10-26 17:36:09 -07:00
parent 39e3bc0098
commit 38ea97e90c
4 changed files with 4 additions and 4 deletions

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o"
model: "gpt-4o-mini"
temperature: 0.0
max_tokens: 4096

View file

@ -9,7 +9,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o"
model: "gpt-4o-mini"
temperature: 0.0
max_tokens: 4096

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o"
model: "gpt-4o-mini"
temperature: 0.0
max_tokens: 4096

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o"
model: "gpt-4o-mini"
temperature: 0.0
max_tokens: 4096