修改配置文件

This commit is contained in:
Lucas 2025-11-23 03:01:12 +08:00
parent ae78828f9c
commit c0d93b35d7
4 changed files with 9 additions and 9 deletions

View file

@ -8,7 +8,7 @@ server:
llm: llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini" model: ${MODEL_NAME:"gpt-4o"}
max_tokens: 4096 max_tokens: 4096
providers: providers:

View file

@ -8,7 +8,7 @@ server:
llm: llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini" model: ${MODEL_NAME:"gpt-4o"}
max_tokens: 4096 max_tokens: 4096
providers: providers:
@ -98,4 +98,4 @@ graphiti:
- name: "Topic" - name: "Topic"
description: "Subject of conversation, interest, or knowledge domain (use as last resort)" description: "Subject of conversation, interest, or knowledge domain (use as last resort)"
- name: "Object" - name: "Object"
description: "Physical items, tools, devices, or possessions (use as last resort)" description: "Physical items, tools, devices, or possessions (use as last resort)"

View file

@ -8,7 +8,7 @@ server:
llm: llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini" model: ${MODEL_NAME:"gpt-4o"}
max_tokens: 4096 max_tokens: 4096
providers: providers:
@ -100,4 +100,4 @@ graphiti:
- name: "Topic" - name: "Topic"
description: "Subject of conversation, interest, or knowledge domain (use as last resort)" description: "Subject of conversation, interest, or knowledge domain (use as last resort)"
- name: "Object" - name: "Object"
description: "Physical items, tools, devices, or possessions (use as last resort)" description: "Physical items, tools, devices, or possessions (use as last resort)"

View file

@ -6,13 +6,13 @@
# See README.md "Concurrency and LLM Provider 429 Rate Limit Errors" section for tuning guidance # See README.md "Concurrency and LLM Provider 429 Rate Limit Errors" section for tuning guidance
server: server:
transport: "http" # Options: stdio, sse (deprecated), http transport: "sse" # Options: stdio, sse (deprecated), http
host: "0.0.0.0" host: "0.0.0.0"
port: 8000 port: 8000
llm: llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini" model: ${MODEL_NAME:"gpt-4o"}
max_tokens: 4096 max_tokens: 4096
providers: providers:
@ -71,7 +71,7 @@ embedder:
model: "voyage-3" model: "voyage-3"
database: database:
provider: "falkordb" # Default: falkordb. Options: neo4j, falkordb provider: "neo4j" # Default: falkordb. Options: neo4j, falkordb
providers: providers:
falkordb: falkordb:
@ -108,4 +108,4 @@ graphiti:
- name: "Topic" - name: "Topic"
description: "Subject of conversation, interest, or knowledge domain (use as last resort)" description: "Subject of conversation, interest, or knowledge domain (use as last resort)"
- name: "Object" - name: "Object"
description: "Physical items, tools, devices, or possessions (use as last resort)" description: "Physical items, tools, devices, or possessions (use as last resort)"