Add Chain of Thought support to Gemini LLM integration

- Extract thoughts from response parts
- Add COT enable/disable parameter
This commit is contained in:
yangdx 2025-11-07 15:22:14 +08:00
parent 6686edfd35
commit 3cb4eae492
2 changed files with 148 additions and 41 deletions

View file

@ -114,28 +114,44 @@ def _format_history_messages(history_messages: list[dict[str, Any]] | None) -> s
return "\n".join(history_lines) return "\n".join(history_lines)
def _extract_response_text(response: Any) -> str: def _extract_response_text(
response: Any, extract_thoughts: bool = False
) -> tuple[str, str]:
""" """
Extract text content from Gemini response, avoiding warnings about non-text parts. Extract text content from Gemini response, separating regular content from thoughts.
Always extracts text manually from parts to avoid triggering warnings when Args:
non-text parts (like 'thought_signature') are present in the response. response: Gemini API response object
extract_thoughts: Whether to extract thought content separately
Returns:
Tuple of (regular_text, thought_text)
""" """
candidates = getattr(response, "candidates", None) candidates = getattr(response, "candidates", None)
if not candidates: if not candidates:
return "" return ("", "")
regular_parts: list[str] = []
thought_parts: list[str] = []
parts: list[str] = []
for candidate in candidates: for candidate in candidates:
if not getattr(candidate, "content", None): if not getattr(candidate, "content", None):
continue continue
for part in getattr(candidate.content, "parts", []): # Use 'or []' to handle None values from parts attribute
# Only extract text parts to avoid non-text content like thought_signature for part in getattr(candidate.content, "parts", None) or []:
text = getattr(part, "text", None) text = getattr(part, "text", None)
if text: if not text:
parts.append(text) continue
return "\n".join(parts) # Check if this part is thought content using the 'thought' attribute
is_thought = getattr(part, "thought", False)
if is_thought and extract_thoughts:
thought_parts.append(text)
elif not is_thought:
regular_parts.append(text)
return ("\n".join(regular_parts), "\n".join(thought_parts))
async def gemini_complete_if_cache( async def gemini_complete_if_cache(
@ -143,18 +159,51 @@ async def gemini_complete_if_cache(
prompt: str, prompt: str,
system_prompt: str | None = None, system_prompt: str | None = None,
history_messages: list[dict[str, Any]] | None = None, history_messages: list[dict[str, Any]] | None = None,
*, enable_cot: bool = False,
api_key: str | None = None,
base_url: str | None = None, base_url: str | None = None,
generation_config: dict[str, Any] | None = None, api_key: str | None = None,
keyword_extraction: bool = False,
token_tracker: Any | None = None, token_tracker: Any | None = None,
hashing_kv: Any | None = None, # noqa: ARG001 - present for interface parity
stream: bool | None = None, stream: bool | None = None,
enable_cot: bool = False, # noqa: ARG001 - not supported by Gemini currently keyword_extraction: bool = False,
timeout: float | None = None, # noqa: ARG001 - handled by caller if needed generation_config: dict[str, Any] | None = None,
**_: Any, **_: Any,
) -> str | AsyncIterator[str]: ) -> str | AsyncIterator[str]:
"""
Complete a prompt using Gemini's API with Chain of Thought (COT) support.
This function supports automatic integration of reasoning content from Gemini models
that provide Chain of Thought capabilities via the thinking_config API feature.
COT Integration:
- When enable_cot=True: Thought content is wrapped in <think>...</think> tags
- When enable_cot=False: Thought content is filtered out, only regular content returned
- Thought content is identified by the 'thought' attribute on response parts
- Requires thinking_config to be enabled in generation_config for API to return thoughts
Args:
model: The Gemini model to use.
prompt: The prompt to complete.
system_prompt: Optional system prompt to include.
history_messages: Optional list of previous messages in the conversation.
api_key: Optional Gemini API key. If None, uses environment variable.
base_url: Optional custom API endpoint.
generation_config: Optional generation configuration dict.
keyword_extraction: Whether to use JSON response format.
token_tracker: Optional token usage tracker for monitoring API usage.
hashing_kv: Storage interface (for interface parity with other bindings).
stream: Whether to stream the response.
enable_cot: Whether to include Chain of Thought content in the response.
timeout: Request timeout (handled by caller if needed).
**_: Additional keyword arguments (ignored).
Returns:
The completed text (with COT content if enable_cot=True) or an async iterator
of text chunks if streaming. COT content is wrapped in <think>...</think> tags.
Raises:
RuntimeError: If the response from Gemini is empty.
ValueError: If API key is not provided or configured.
"""
loop = asyncio.get_running_loop() loop = asyncio.get_running_loop()
key = _ensure_api_key(api_key) key = _ensure_api_key(api_key)
@ -188,6 +237,11 @@ async def gemini_complete_if_cache(
usage_container: dict[str, Any] = {} usage_container: dict[str, Any] = {}
def _stream_model() -> None: def _stream_model() -> None:
# COT state tracking for streaming
cot_active = False
cot_started = False
initial_content_seen = False
try: try:
stream_kwargs = dict(request_kwargs) stream_kwargs = dict(request_kwargs)
stream_iterator = client.models.generate_content_stream(**stream_kwargs) stream_iterator = client.models.generate_content_stream(**stream_kwargs)
@ -195,19 +249,59 @@ async def gemini_complete_if_cache(
usage = getattr(chunk, "usage_metadata", None) usage = getattr(chunk, "usage_metadata", None)
if usage is not None: if usage is not None:
usage_container["usage"] = usage usage_container["usage"] = usage
# Always use manual extraction to avoid warnings about non-text parts
text_piece = _extract_response_text(chunk) # Extract both regular and thought content
if text_piece: regular_text, thought_text = _extract_response_text(
loop.call_soon_threadsafe(queue.put_nowait, text_piece) chunk, extract_thoughts=True
)
if enable_cot:
# Process regular content
if regular_text:
if not initial_content_seen:
initial_content_seen = True
# Close COT section if it was active
if cot_active:
loop.call_soon_threadsafe(queue.put_nowait, "</think>")
cot_active = False
# Send regular content
loop.call_soon_threadsafe(queue.put_nowait, regular_text)
# Process thought content
if thought_text:
if not initial_content_seen and not cot_started:
# Start COT section
loop.call_soon_threadsafe(queue.put_nowait, "<think>")
cot_active = True
cot_started = True
# Send thought content if COT is active
if cot_active:
loop.call_soon_threadsafe(queue.put_nowait, thought_text)
else:
# COT disabled - only send regular content
if regular_text:
loop.call_soon_threadsafe(queue.put_nowait, regular_text)
# Ensure COT is properly closed if still active
if cot_active:
loop.call_soon_threadsafe(queue.put_nowait, "</think>")
loop.call_soon_threadsafe(queue.put_nowait, None) loop.call_soon_threadsafe(queue.put_nowait, None)
except Exception as exc: # pragma: no cover - surface runtime issues except Exception as exc: # pragma: no cover - surface runtime issues
# Try to close COT tag before reporting error
if cot_active:
try:
loop.call_soon_threadsafe(queue.put_nowait, "</think>")
except Exception:
pass
loop.call_soon_threadsafe(queue.put_nowait, exc) loop.call_soon_threadsafe(queue.put_nowait, exc)
loop.run_in_executor(None, _stream_model) loop.run_in_executor(None, _stream_model)
async def _async_stream() -> AsyncIterator[str]: async def _async_stream() -> AsyncIterator[str]:
accumulated = ""
emitted = ""
try: try:
while True: while True:
item = await queue.get() item = await queue.get()
@ -220,16 +314,9 @@ async def gemini_complete_if_cache(
if "\\u" in chunk_text: if "\\u" in chunk_text:
chunk_text = safe_unicode_decode(chunk_text.encode("utf-8")) chunk_text = safe_unicode_decode(chunk_text.encode("utf-8"))
accumulated += chunk_text # Yield the chunk directly without filtering
sanitized = remove_think_tags(accumulated) # COT filtering is already handled in _stream_model()
if sanitized.startswith(emitted): yield chunk_text
delta = sanitized[len(emitted) :]
else:
delta = sanitized
emitted = sanitized
if delta:
yield delta
finally: finally:
usage = usage_container.get("usage") usage = usage_container.get("usage")
if token_tracker and usage: if token_tracker and usage:
@ -247,14 +334,33 @@ async def gemini_complete_if_cache(
response = await asyncio.to_thread(_call_model) response = await asyncio.to_thread(_call_model)
text = _extract_response_text(response) # Extract both regular text and thought text
if not text: regular_text, thought_text = _extract_response_text(response, extract_thoughts=True)
# Apply COT filtering logic based on enable_cot parameter
if enable_cot:
# Include thought content wrapped in <think> tags
if thought_text and thought_text.strip():
if not regular_text or regular_text.strip() == "":
# Only thought content available
final_text = f"<think>{thought_text}</think>"
else:
# Both content types present: prepend thought to regular content
final_text = f"<think>{thought_text}</think>{regular_text}"
else:
# No thought content, use regular content only
final_text = regular_text or ""
else:
# Filter out thought content, return only regular content
final_text = regular_text or ""
if not final_text:
raise RuntimeError("Gemini response did not contain any text content.") raise RuntimeError("Gemini response did not contain any text content.")
if "\\u" in text: if "\\u" in final_text:
text = safe_unicode_decode(text.encode("utf-8")) final_text = safe_unicode_decode(final_text.encode("utf-8"))
text = remove_think_tags(text) final_text = remove_think_tags(final_text)
usage = getattr(response, "usage_metadata", None) usage = getattr(response, "usage_metadata", None)
if token_tracker and usage: if token_tracker and usage:
@ -266,8 +372,8 @@ async def gemini_complete_if_cache(
} }
) )
logger.debug("Gemini response length: %s", len(text)) logger.debug("Gemini response length: %s", len(final_text))
return text return final_text
async def gemini_model_complete( async def gemini_model_complete(

View file

@ -174,6 +174,7 @@ async def openai_complete_if_cache(
explicit parameters (api_key, base_url). explicit parameters (api_key, base_url).
- hashing_kv: Will be removed from kwargs before passing to OpenAI. - hashing_kv: Will be removed from kwargs before passing to OpenAI.
- keyword_extraction: Will be removed from kwargs before passing to OpenAI. - keyword_extraction: Will be removed from kwargs before passing to OpenAI.
- stream: Whether to stream the response. Default is False.
Returns: Returns:
The completed text (with integrated COT content if available) or an async iterator The completed text (with integrated COT content if available) or an async iterator