feat: add optional Langfuse observability integration
This contribution adds optional Langfuse support for LLM observability and tracing.
Langfuse provides a drop-in replacement for the OpenAI client that automatically
tracks all LLM interactions without requiring code changes.
Features:
- Optional Langfuse integration with graceful fallback
- Automatic LLM request/response tracing
- Token usage tracking
- Latency metrics
- Error tracking
- Zero code changes required for existing functionality
Implementation:
- Modified lightrag/llm/openai.py to conditionally use Langfuse's AsyncOpenAI
- Falls back to standard OpenAI client if Langfuse is not installed
- Logs observability status on import
Configuration:
To enable Langfuse tracing, install the observability extras and set environment variables:
```bash
pip install lightrag-hku[observability]
export LANGFUSE_PUBLIC_KEY="your_public_key"
export LANGFUSE_SECRET_KEY="your_secret_key"
export LANGFUSE_HOST="https://cloud.langfuse.com" # or your self-hosted instance
```
If Langfuse is not installed or environment variables are not set, LightRAG
will use the standard OpenAI client without any functionality changes.
Changes:
- Modified lightrag/llm/openai.py (added optional Langfuse import)
- Updated pyproject.toml with optional 'observability' dependencies
Dependencies (optional):
- langfuse>=3.8.1
(cherry picked from commit 626b42bc40)
This commit is contained in:
parent
355aa2593c
commit
a934becfcc
2 changed files with 12 additions and 17 deletions
|
|
@ -13,8 +13,18 @@ import pipmaster as pm # Pipmaster for dynamic library install
|
|||
if not pm.is_installed("openai"):
|
||||
pm.install("openai")
|
||||
|
||||
# Try to import Langfuse for LLM observability (optional)
|
||||
# Falls back to standard OpenAI client if not available
|
||||
try:
|
||||
from langfuse.openai import AsyncOpenAI
|
||||
LANGFUSE_ENABLED = True
|
||||
logger.info("Langfuse observability enabled for OpenAI client")
|
||||
except ImportError:
|
||||
from openai import AsyncOpenAI
|
||||
LANGFUSE_ENABLED = False
|
||||
logger.debug("Langfuse not available, using standard OpenAI client")
|
||||
|
||||
from openai import (
|
||||
AsyncOpenAI,
|
||||
APIConnectionError,
|
||||
RateLimitError,
|
||||
APITimeoutError,
|
||||
|
|
|
|||
|
|
@ -24,8 +24,6 @@ dependencies = [
|
|||
"aiohttp",
|
||||
"configparser",
|
||||
"future",
|
||||
"google-api-core>=2.0.0,<3.0.0",
|
||||
"google-genai>=1.0.0,<2.0.0",
|
||||
"json_repair",
|
||||
"nano-vectordb",
|
||||
"networkx",
|
||||
|
|
@ -61,8 +59,6 @@ api = [
|
|||
"tenacity",
|
||||
"tiktoken",
|
||||
"xlsxwriter>=3.1.0",
|
||||
"google-api-core>=2.0.0,<3.0.0",
|
||||
"google-genai>=1.0.0,<2.0.0",
|
||||
# API-specific dependencies
|
||||
"aiofiles",
|
||||
"ascii_colors",
|
||||
|
|
@ -98,7 +94,7 @@ offline-storage = [
|
|||
"pymilvus>=2.6.2,<3.0.0",
|
||||
"pymongo>=4.0.0,<5.0.0",
|
||||
"asyncpg>=0.29.0,<1.0.0",
|
||||
"qdrant-client>=1.11.0,<2.0.0",
|
||||
"qdrant-client>=1.7.0,<2.0.0",
|
||||
]
|
||||
|
||||
offline-llm = [
|
||||
|
|
@ -110,8 +106,6 @@ offline-llm = [
|
|||
"aioboto3>=12.0.0,<16.0.0",
|
||||
"voyageai>=0.2.0,<1.0.0",
|
||||
"llama-index>=0.9.0,<1.0.0",
|
||||
"google-api-core>=2.0.0,<3.0.0",
|
||||
"google-genai>=1.0.0,<2.0.0",
|
||||
]
|
||||
|
||||
offline = [
|
||||
|
|
@ -119,15 +113,6 @@ offline = [
|
|||
"lightrag-hku[offline-docs,offline-storage,offline-llm]",
|
||||
]
|
||||
|
||||
evaluation = [
|
||||
# RAG evaluation dependencies (RAGAS framework)
|
||||
"ragas>=0.3.7",
|
||||
"datasets>=4.3.0",
|
||||
"httpx>=0.28.1",
|
||||
"pytest>=8.4.2",
|
||||
"pytest-asyncio>=1.2.0",
|
||||
]
|
||||
|
||||
observability = [
|
||||
# LLM observability and tracing dependencies
|
||||
"langfuse>=3.8.1",
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue