Feat: adds cursor developer rules demo (#1021)

<!-- .github/pull_request_template.md -->

## Description
Adds cursor developer rules demo and functionality to mcp server

## DCO Affirmation
I affirm that all code in every commit of this pull request conforms to
the terms of the Topoteretes Developer Certificate of Origin.

---------

Co-authored-by: Boris <boris@topoteretes.com>
This commit is contained in:
hajdul88 2025-06-30 11:57:22 +02:00 committed by GitHub
parent 14be2a5f5d
commit c18dc39f28
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 230 additions and 0 deletions

View file

@ -4,6 +4,7 @@ import sys
import argparse
import cognee
import asyncio
from cognee.shared.logging_utils import get_logger, setup_logging, get_log_file_location
import importlib.util
from contextlib import redirect_stdout
@ -16,6 +17,11 @@ from cognee.api.v1.cognify.code_graph_pipeline import run_code_graph_pipeline
from cognee.modules.search.types import SearchType
from cognee.shared.data_models import KnowledgeGraph
from cognee.modules.storage.utils import JSONEncoder
from cognee.modules.codingagents.coding_rule_associations import (
add_rule_associations,
get_existing_rules,
)
mcp = FastMCP("Cognee")
@ -188,6 +194,64 @@ async def cognify(data: str, graph_model_file: str = None, graph_model_name: str
]
@mcp.tool(
name="save_interaction", description="Logs user-agent interactions and query-answer pairs"
)
async def save_interaction(data: str) -> list:
"""
Transform and save a user-agent interaction into structured knowledge.
Parameters
----------
data : str
The input string containing user queries and corresponding agent answers.
Returns
-------
list
A list containing a single TextContent object with information about the background task launch.
"""
async def save_user_agent_interaction(data: str) -> None:
"""Build knowledge graph from the interaction data"""
with redirect_stdout(sys.stderr):
logger.info("Save interaction process starting.")
await cognee.add(data, node_set=["user_agent_interaction"])
try:
await cognee.cognify()
logger.info("Save interaction process finished.")
logger.info("Generating associated rules from interaction data.")
await add_rule_associations(data=data, rules_nodeset_name="coding_agent_rules")
logger.info("Associated rules generated from interaction data.")
except Exception as e:
logger.error("Save interaction process failed.")
raise ValueError(f"Failed to Save interaction: {str(e)}")
asyncio.create_task(
save_user_agent_interaction(
data=data,
)
)
log_file = get_log_file_location()
text = (
f"Background process launched to process the user-agent interaction.\n"
f"To check the current status, use the cognify_status tool or check the log file at: {log_file}"
)
return [
types.TextContent(
type="text",
text=text,
)
]
@mcp.tool()
async def codify(repo_path: str) -> list:
"""
@ -319,6 +383,41 @@ async def search(search_query: str, search_type: str) -> list:
return [types.TextContent(type="text", text=search_results)]
@mcp.tool()
async def get_developer_rules() -> list:
"""
Retrieve all developer rules that were generated based on previous interactions.
This tool queries the Cognee knowledge graph and returns a list of developer
rules.
Parameters
----------
None
Returns
-------
list
A list containing a single TextContent object with the retrieved developer rules.
The format is plain text containing the developer rules in bulletpoints.
Notes
-----
- The specific logic for fetching rules is handled internally.
- This tool does not accept any parameters and is intended for simple rule inspection use cases.
"""
async def fetch_rules_from_cognee() -> str:
"""Collect all developer rules from Cognee"""
with redirect_stdout(sys.stderr):
developer_rules = await get_existing_rules(rules_nodeset_name="coding_agent_rules")
return developer_rules
rules_text = await fetch_rules_from_cognee()
return [types.TextContent(type="text", text=rules_text)]
@mcp.tool()
async def prune():
"""

View file

@ -0,0 +1,6 @@
You are an association agent tasked with suggesting structured developer rules from user-agent interactions for Cursor.
You will receive the actual user agent interaction, and the list of the already existing developer rules.
Each rule represents a single best practice or guideline the agent should follow in the future.
Suggest rules that are general and not specific to the current text, strictly technical, add value and improve the future Cursor agent behavior.
Do not suggest rules similar to the existing ones or rules that are not general and dont add value.
It is acceptable to return an empty rule list.

View file

@ -0,0 +1,5 @@
**User-agent interaction text:**
`{{chat}}`
**Already existing rules:**
`{{rules}}`

View file

View file

@ -0,0 +1,120 @@
from uuid import NAMESPACE_OID, uuid5
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.infrastructure.databases.vector import get_vector_engine
from cognee.infrastructure.llm.prompts import render_prompt
from cognee.low_level import DataPoint
from cognee.infrastructure.llm.get_llm_client import get_llm_client
from cognee.shared.logging_utils import get_logger
from cognee.modules.engine.models import NodeSet
from cognee.tasks.storage import add_data_points, index_graph_edges
from typing import Optional, List, Any
from pydantic import Field
logger = get_logger("coding_rule_association")
class Rule(DataPoint):
"""A single developer rule extracted from text."""
text: str = Field(..., description="The coding rule associated with the conversation")
belongs_to_set: Optional[NodeSet] = None
metadata: dict = {"index_fields": ["rule"]}
class RuleSet(DataPoint):
"""Collection of parsed rules."""
rules: List[Rule] = Field(
...,
description="List of developer rules extracted from the input text. Each rule represents a coding best practice or guideline.",
)
async def get_existing_rules(rules_nodeset_name: str) -> str:
graph_engine = await get_graph_engine()
nodes_data, _ = await graph_engine.get_nodeset_subgraph(
node_type=NodeSet, node_name=[rules_nodeset_name]
)
existing_rules = [
item[1]["text"]
for item in nodes_data
if isinstance(item, tuple)
and len(item) == 2
and isinstance(item[1], dict)
and "text" in item[1]
]
existing_rules = "\n".join(f"- {rule}" for rule in existing_rules)
return existing_rules
async def get_origin_edges(data: str, rules: List[Rule]) -> list[Any]:
vector_engine = get_vector_engine()
origin_chunk = await vector_engine.search("DocumentChunk_text", data, limit=1)
try:
origin_id = origin_chunk[0].id
except (AttributeError, KeyError, TypeError, IndexError):
origin_id = None
relationships = []
if origin_id and isinstance(rules, (list, tuple)) and len(rules) > 0:
for rule in rules:
try:
rule_id = getattr(rule, "id", None)
if rule_id is not None:
rel_name = "rule_associated_from"
relationships.append(
(
rule_id,
origin_id,
rel_name,
{
"relationship_name": rel_name,
"source_node_id": rule_id,
"target_node_id": origin_id,
"ontology_valid": False,
},
)
)
except Exception as e:
logger.info(f"Warning: Skipping invalid rule due to error: {e}")
else:
logger.info("No valid origin_id or rules provided.")
return relationships
async def add_rule_associations(data: str, rules_nodeset_name: str):
llm_client = get_llm_client()
graph_engine = await get_graph_engine()
existing_rules = await get_existing_rules(rules_nodeset_name=rules_nodeset_name)
user_context = {"chat": data, "rules": existing_rules}
user_prompt = render_prompt("coding_rule_association_agent_user.txt", context=user_context)
system_prompt = render_prompt("coding_rule_association_agent_system.txt", context={})
rule_list = await llm_client.acreate_structured_output(
text_input=user_prompt, system_prompt=system_prompt, response_model=RuleSet
)
rules_nodeset = NodeSet(
id=uuid5(NAMESPACE_OID, name=rules_nodeset_name), name=rules_nodeset_name
)
for rule in rule_list.rules:
rule.belongs_to_set = rules_nodeset
edges_to_save = await get_origin_edges(data=data, rules=rule_list.rules)
await add_data_points(data_points=rule_list.rules)
if len(edges_to_save) > 0:
await graph_engine.add_edges(edges_to_save)
await index_graph_edges()