feat: feedback enrichment preparation
This commit is contained in:
parent
055042af0f
commit
44ec814256
5 changed files with 109 additions and 0 deletions
|
|
@ -0,0 +1,14 @@
|
|||
A question was previously answered, but the answer received negative feedback.
|
||||
Please reconsider and improve the response.
|
||||
|
||||
Question: {question}
|
||||
Context originally used: {context}
|
||||
Previous answer: {wrong_answer}
|
||||
Feedback on that answer: {negative_feedback}
|
||||
|
||||
Task: Provide a better response. The new answer should be short and direct.
|
||||
Then explain briefly why this answer is better.
|
||||
|
||||
Format your reply as:
|
||||
Answer: <improved answer>
|
||||
Explanation: <short explanation>
|
||||
13
cognee/infrastructure/llm/prompts/feedback_report_prompt.txt
Normal file
13
cognee/infrastructure/llm/prompts/feedback_report_prompt.txt
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
Write a concise, stand-alone paragraph that explains the correct answer to the question below.
|
||||
The paragraph should read naturally on its own, providing all necessary context and reasoning
|
||||
so the answer is clear and well-supported.
|
||||
|
||||
Question: {question}
|
||||
Correct answer: {improved_answer}
|
||||
Supporting context: {new_context}
|
||||
|
||||
Your paragraph should:
|
||||
- First sentence clearly states the correct answer as a full sentence
|
||||
- Remainder flows from first sentence and provides explanation based on context
|
||||
- Use simple, direct language that is easy to follow
|
||||
- Use shorter sentences, no long-winded explanations
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
Question: {question}
|
||||
Context: {context}
|
||||
|
||||
Provide a one paragraph human readable summary of this interaction context,
|
||||
listing all the relevant facts and information in a simple and direct way.
|
||||
20
cognee/tasks/feedback/models.py
Normal file
20
cognee/tasks/feedback/models.py
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
from typing import List, Optional, Union
|
||||
from uuid import UUID
|
||||
|
||||
from cognee.infrastructure.engine import DataPoint
|
||||
from cognee.modules.engine.models import Entity
|
||||
from cognee.tasks.temporal_graph.models import Event
|
||||
|
||||
|
||||
class FeedbackEnrichment(DataPoint):
|
||||
"""Minimal DataPoint for feedback enrichment that works with extract_graph_from_data."""
|
||||
|
||||
text: str
|
||||
contains: Optional[List[Union[Entity, Event]]] = None
|
||||
metadata: dict = {"index_fields": ["text"]}
|
||||
|
||||
question: str
|
||||
original_answer: str
|
||||
improved_answer: str
|
||||
feedback_id: UUID
|
||||
interaction_id: UUID
|
||||
57
cognee/tasks/feedback/utils.py
Normal file
57
cognee/tasks/feedback/utils.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
from cognee.modules.retrieval.graph_completion_retriever import GraphCompletionRetriever
|
||||
from cognee.modules.retrieval.graph_completion_cot_retriever import GraphCompletionCotRetriever
|
||||
from cognee.modules.retrieval.graph_completion_context_extension_retriever import (
|
||||
GraphCompletionContextExtensionRetriever,
|
||||
)
|
||||
from cognee.shared.logging_utils import get_logger
|
||||
|
||||
|
||||
logger = get_logger("feedback_utils")
|
||||
|
||||
|
||||
def create_retriever(
|
||||
retriever_name: str = "graph_completion_cot",
|
||||
top_k: int = 20,
|
||||
user_prompt_path: str = "graph_context_for_question.txt",
|
||||
system_prompt_path: str = "answer_simple_question.txt",
|
||||
):
|
||||
"""Factory for retriever instances with configurable top_k and prompt paths."""
|
||||
if retriever_name == "graph_completion":
|
||||
return GraphCompletionRetriever(
|
||||
top_k=top_k,
|
||||
save_interaction=False,
|
||||
user_prompt_path=user_prompt_path,
|
||||
system_prompt_path=system_prompt_path,
|
||||
)
|
||||
if retriever_name == "graph_completion_cot":
|
||||
return GraphCompletionCotRetriever(
|
||||
top_k=top_k,
|
||||
save_interaction=False,
|
||||
user_prompt_path=user_prompt_path,
|
||||
system_prompt_path=system_prompt_path,
|
||||
)
|
||||
if retriever_name == "graph_completion_context_extension":
|
||||
return GraphCompletionContextExtensionRetriever(
|
||||
top_k=top_k,
|
||||
save_interaction=False,
|
||||
user_prompt_path=user_prompt_path,
|
||||
system_prompt_path=system_prompt_path,
|
||||
)
|
||||
logger.warning(
|
||||
"Unknown retriever, defaulting to graph_completion_cot", retriever=retriever_name
|
||||
)
|
||||
return GraphCompletionCotRetriever(
|
||||
top_k=top_k,
|
||||
save_interaction=False,
|
||||
user_prompt_path=user_prompt_path,
|
||||
system_prompt_path=system_prompt_path,
|
||||
)
|
||||
|
||||
|
||||
def filter_negative_feedback(feedback_nodes):
|
||||
"""Filter for negative sentiment feedback using precise sentiment classification."""
|
||||
return [
|
||||
(node_id, props)
|
||||
for node_id, props in feedback_nodes
|
||||
if (props.get("sentiment", "").casefold() == "negative" or props.get("score", 0) < 0)
|
||||
]
|
||||
Loading…
Add table
Reference in a new issue