120 lines
3.3 KiB
Text
120 lines
3.3 KiB
Text
# Integration Test Environment Configuration
|
|
# This file is used for integration testing with mock OpenAI server
|
|
|
|
###########################
|
|
### Server Configuration
|
|
###########################
|
|
HOST=0.0.0.0
|
|
PORT=9621
|
|
WEBUI_TITLE='Integration Test KB'
|
|
WEBUI_DESCRIPTION="Integration Test for LightRAG"
|
|
WORKERS=1
|
|
|
|
### Directory Configuration
|
|
INPUT_DIR=./test_inputs
|
|
WORKING_DIR=./test_rag_storage
|
|
|
|
### Use offline tokenizer (no internet required)
|
|
LIGHTRAG_OFFLINE_TOKENIZER=true
|
|
|
|
### Logging level
|
|
LOG_LEVEL=INFO
|
|
VERBOSE=False
|
|
|
|
#####################################
|
|
### Authentication (Disabled for tests)
|
|
#####################################
|
|
# No authentication required for testing
|
|
|
|
######################################################################################
|
|
### Query Configuration
|
|
######################################################################################
|
|
ENABLE_LLM_CACHE=true
|
|
TOP_K=20
|
|
CHUNK_TOP_K=10
|
|
MAX_ENTITY_TOKENS=4000
|
|
MAX_RELATION_TOKENS=4000
|
|
MAX_TOTAL_TOKENS=16000
|
|
|
|
########################################
|
|
### Document processing configuration
|
|
########################################
|
|
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
|
SUMMARY_LANGUAGE=English
|
|
|
|
### Entity types for code analysis
|
|
ENTITY_TYPES='["Class","Function","Variable","Module","Namespace","Struct","Enum","Method"]'
|
|
|
|
### Chunk size for document splitting
|
|
CHUNK_SIZE=800
|
|
CHUNK_OVERLAP_SIZE=100
|
|
|
|
###############################
|
|
### Concurrency Configuration
|
|
###############################
|
|
MAX_ASYNC=2
|
|
MAX_PARALLEL_INSERT=1
|
|
EMBEDDING_FUNC_MAX_ASYNC=4
|
|
EMBEDDING_BATCH_NUM=5
|
|
|
|
###########################################################################
|
|
### LLM Configuration (Mock OpenAI Server)
|
|
###########################################################################
|
|
LLM_BINDING=openai
|
|
LLM_MODEL=gpt-5
|
|
LLM_BINDING_HOST=http://127.0.0.1:8000
|
|
LLM_BINDING_API_KEY=mock-api-key-for-testing
|
|
LLM_TIMEOUT=60
|
|
|
|
### OpenAI Specific Parameters (for mock server)
|
|
OPENAI_LLM_REASONING_EFFORT=medium
|
|
OPENAI_LLM_MAX_COMPLETION_TOKENS=8000
|
|
OPENAI_LLM_TEMPERATURE=0.7
|
|
|
|
#######################################################################################
|
|
### Embedding Configuration (Mock OpenAI Server)
|
|
#######################################################################################
|
|
EMBEDDING_BINDING=openai
|
|
EMBEDDING_MODEL=text-embedding-3-large
|
|
EMBEDDING_DIM=3072
|
|
EMBEDDING_BINDING_HOST=http://127.0.0.1:8000
|
|
EMBEDDING_BINDING_API_KEY=mock-api-key-for-testing
|
|
EMBEDDING_TIMEOUT=30
|
|
EMBEDDING_SEND_DIM=false
|
|
|
|
####################################################################
|
|
### WORKSPACE
|
|
####################################################################
|
|
WORKSPACE=integration_test
|
|
|
|
############################
|
|
### Data storage selection
|
|
############################
|
|
### Redis Storage
|
|
LIGHTRAG_KV_STORAGE=RedisKVStorage
|
|
LIGHTRAG_DOC_STATUS_STORAGE=RedisDocStatusStorage
|
|
|
|
### Milvus Vector Storage
|
|
LIGHTRAG_VECTOR_STORAGE=MilvusVectorDBStorage
|
|
|
|
### Neo4j Graph Storage
|
|
LIGHTRAG_GRAPH_STORAGE=Neo4JStorage
|
|
|
|
### Redis Configuration
|
|
REDIS_URI=redis://localhost:6379
|
|
REDIS_SOCKET_TIMEOUT=30
|
|
REDIS_CONNECT_TIMEOUT=10
|
|
REDIS_MAX_CONNECTIONS=50
|
|
REDIS_RETRY_ATTEMPTS=3
|
|
|
|
### Neo4j Configuration
|
|
NEO4J_URI=neo4j://localhost:7687
|
|
NEO4J_USERNAME=neo4j
|
|
NEO4J_PASSWORD=testpassword123
|
|
NEO4J_DATABASE=neo4j
|
|
NEO4J_MAX_CONNECTION_POOL_SIZE=50
|
|
NEO4J_CONNECTION_TIMEOUT=30
|
|
|
|
### Milvus Configuration
|
|
MILVUS_URI=http://localhost:19530
|
|
MILVUS_DB_NAME=default
|