diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index d367abc7..07942101 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -10,8 +10,18 @@ import pipmaster as pm if not pm.is_installed("openai"): pm.install("openai") +# Try to import Langfuse for LLM observability (optional) +# Falls back to standard OpenAI client if not available +try: + from langfuse.openai import AsyncOpenAI + LANGFUSE_ENABLED = True + logger.info("Langfuse observability enabled for OpenAI client") +except ImportError: + from openai import AsyncOpenAI + LANGFUSE_ENABLED = False + logger.debug("Langfuse not available, using standard OpenAI client") + from openai import ( - AsyncOpenAI, APIConnectionError, RateLimitError, APITimeoutError, diff --git a/pyproject.toml b/pyproject.toml index 57e1b765..8eabb37c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,6 +113,11 @@ offline = [ "lightrag-hku[offline-docs,offline-storage,offline-llm]", ] +observability = [ + # LLM observability and tracing dependencies + "langfuse>=3.8.1", +] + [project.scripts] lightrag-server = "lightrag.api.lightrag_server:main" lightrag-gunicorn = "lightrag.api.run_with_gunicorn:main"