Merge branch 'main' into feature/websocket-streaming-api
This commit is contained in:
commit
70200e430c
11 changed files with 42 additions and 32 deletions
|
|
@ -39,7 +39,7 @@ async def upload(tenant_id):
|
|||
Upload a file to the system.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -155,7 +155,7 @@ async def create(tenant_id):
|
|||
Create a new file or folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -233,7 +233,7 @@ async def list_files(tenant_id):
|
|||
List files under a specific folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -325,7 +325,7 @@ async def get_root_folder(tenant_id):
|
|||
Get user's root folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
|
|
@ -361,7 +361,7 @@ async def get_parent_folder():
|
|||
Get parent folder info of a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -406,7 +406,7 @@ async def get_all_parent_folders(tenant_id):
|
|||
Get all parent folders of a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -454,7 +454,7 @@ async def rm(tenant_id):
|
|||
Delete one or multiple files/folders.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -528,7 +528,7 @@ async def rename(tenant_id):
|
|||
Rename a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
@ -589,7 +589,7 @@ async def get(tenant_id, file_id):
|
|||
Download a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
produces:
|
||||
|
|
@ -637,7 +637,7 @@ async def move(tenant_id):
|
|||
Move one or multiple files to another folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
- File
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ TZ=Asia/Shanghai
|
|||
# Uncomment the following line if your operating system is MacOS:
|
||||
# MACOS=1
|
||||
|
||||
# The maximum file size limit (in bytes) for each upload to your knowledge base or File Management.
|
||||
# The maximum file size limit (in bytes) for each upload to your dataset or RAGFlow's File system.
|
||||
# To change the 1GB file size limit, uncomment the line below and update as needed.
|
||||
# MAX_CONTENT_LENGTH=1073741824
|
||||
# After updating, ensure `client_max_body_size` in nginx/nginx.conf is updated accordingly.
|
||||
|
|
|
|||
|
|
@ -76,5 +76,5 @@ No. Files uploaded to an agent as input are not stored in a dataset and hence wi
|
|||
There is no _specific_ file size limit for a file uploaded to an agent. However, note that model providers typically have a default or explicit maximum token setting, which can range from 8196 to 128k: The plain text part of the uploaded file will be passed in as the key value, but if the file's token count exceeds this limit, the string will be truncated and incomplete.
|
||||
|
||||
:::tip NOTE
|
||||
The variables `MAX_CONTENT_LENGTH` in `/docker/.env` and `client_max_body_size` in `/docker/nginx/nginx.conf` set the file size limit for each upload to a dataset or **File Management**. These settings DO NOT apply in this scenario.
|
||||
The variables `MAX_CONTENT_LENGTH` in `/docker/.env` and `client_max_body_size` in `/docker/nginx/nginx.conf` set the file size limit for each upload to a dataset or RAGFlow's File system. These settings DO NOT apply in this scenario.
|
||||
:::
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ Initiate an AI-powered chat with a configured chat assistant.
|
|||
|
||||
---
|
||||
|
||||
Knowledge base, hallucination-free chat, and file management are the three pillars of RAGFlow. Chats in RAGFlow are based on a particular dataset or multiple datasets. Once you have created your dataset, finished file parsing, and [run a retrieval test](../dataset/run_retrieval_test.md), you can go ahead and start an AI conversation.
|
||||
Chats in RAGFlow are based on a particular dataset or multiple datasets. Once you have created your dataset, finished file parsing, and [run a retrieval test](../dataset/run_retrieval_test.md), you can go ahead and start an AI conversation.
|
||||
|
||||
## Start an AI chat
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ slug: /configure_knowledge_base
|
|||
|
||||
# Configure dataset
|
||||
|
||||
Most of RAGFlow's chat assistants and Agents are based on datasets. Each of RAGFlow's datasets serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in **File Management** into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the dataset feature, covering the following topics:
|
||||
Most of RAGFlow's chat assistants and Agents are based on datasets. Each of RAGFlow's datasets serves as a knowledge source, *parsing* files uploaded from your local machine and file references generated in RAGFlow's File system into the real 'knowledge' for future AI chats. This guide demonstrates some basic usages of the dataset feature, covering the following topics:
|
||||
|
||||
- Create a dataset
|
||||
- Configure a dataset
|
||||
|
|
@ -82,10 +82,10 @@ Some embedding models are optimized for specific languages, so performance may b
|
|||
|
||||
### Upload file
|
||||
|
||||
- RAGFlow's **File Management** allows you to link a file to multiple datasets, in which case each target dataset holds a reference to the file.
|
||||
- RAGFlow's File system allows you to link a file to multiple datasets, in which case each target dataset holds a reference to the file.
|
||||
- In **Knowledge Base**, you are also given the option of uploading a single file or a folder of files (bulk upload) from your local machine to a dataset, in which case the dataset holds file copies.
|
||||
|
||||
While uploading files directly to a dataset seems more convenient, we *highly* recommend uploading files to **File Management** and then linking them to the target datasets. This way, you can avoid permanently deleting files uploaded to the dataset.
|
||||
While uploading files directly to a dataset seems more convenient, we *highly* recommend uploading files to RAGFlow's File system and then linking them to the target datasets. This way, you can avoid permanently deleting files uploaded to the dataset.
|
||||
|
||||
### Parse file
|
||||
|
||||
|
|
@ -142,6 +142,6 @@ As of RAGFlow v0.22.1, the search feature is still in a rudimentary form, suppor
|
|||
You are allowed to delete a dataset. Hover your mouse over the three dot of the intended dataset card and the **Delete** option appears. Once you delete a dataset, the associated folder under **root/.knowledge** directory is AUTOMATICALLY REMOVED. The consequence is:
|
||||
|
||||
- The files uploaded directly to the dataset are gone;
|
||||
- The file references, which you created from within **File Management**, are gone, but the associated files still exist in **File Management**.
|
||||
- The file references, which you created from within RAGFlow's File system, are gone, but the associated files still exist.
|
||||
|
||||

|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ async def run_graphrag(
|
|||
start = trio.current_time()
|
||||
tenant_id, kb_id, doc_id = row["tenant_id"], str(row["kb_id"]), row["doc_id"]
|
||||
chunks = []
|
||||
for d in settings.retriever.chunk_list(doc_id, tenant_id, [kb_id], fields=["content_with_weight", "doc_id"], sort_by_position=True):
|
||||
for d in settings.retriever.chunk_list(doc_id, tenant_id, [kb_id], max_count=10000, fields=["content_with_weight", "doc_id"], sort_by_position=True):
|
||||
chunks.append(d["content_with_weight"])
|
||||
|
||||
with trio.fail_after(max(120, len(chunks) * 60 * 10) if enable_timeout_assertion else 10000000000):
|
||||
|
|
@ -174,13 +174,19 @@ async def run_graphrag_for_kb(
|
|||
chunks = []
|
||||
current_chunk = ""
|
||||
|
||||
for d in settings.retriever.chunk_list(
|
||||
# DEBUG: Obtener todos los chunks primero
|
||||
raw_chunks = list(settings.retriever.chunk_list(
|
||||
doc_id,
|
||||
tenant_id,
|
||||
[kb_id],
|
||||
max_count=10000, # FIX: Aumentar límite para procesar todos los chunks
|
||||
fields=fields_for_chunks,
|
||||
sort_by_position=True,
|
||||
):
|
||||
))
|
||||
|
||||
callback(msg=f"[DEBUG] chunk_list() returned {len(raw_chunks)} raw chunks for doc {doc_id}")
|
||||
|
||||
for d in raw_chunks:
|
||||
content = d["content_with_weight"]
|
||||
if num_tokens_from_string(current_chunk + content) < 1024:
|
||||
current_chunk += content
|
||||
|
|
|
|||
|
|
@ -537,7 +537,8 @@ class Dealer:
|
|||
doc["id"] = id
|
||||
if dict_chunks:
|
||||
res.extend(dict_chunks.values())
|
||||
if len(dict_chunks.values()) < bs:
|
||||
# FIX: Solo terminar si no hay chunks, no si hay menos de bs
|
||||
if len(dict_chunks.values()) == 0:
|
||||
break
|
||||
return res
|
||||
|
||||
|
|
|
|||
|
|
@ -714,6 +714,8 @@ This auto-tagging feature enhances retrieval by adding another layer of domain-s
|
|||
'Check if this is a Confluence Cloud instance, uncheck for Confluence Server/Data Center',
|
||||
confluenceWikiBaseUrlTip:
|
||||
'The base URL of your Confluence instance (e.g., https://your-domain.atlassian.net/wiki)',
|
||||
confluenceSpaceKeyTip:
|
||||
'Optional: Specify a space key to limit syncing to a specific space. Leave empty to sync all accessible spaces. For multiple spaces, separate with commas (e.g., DEV,DOCS,HR)',
|
||||
s3PrefixTip: `Specify the folder path within your S3 bucket to fetch files from.
|
||||
Example: general/v2/`,
|
||||
S3CompatibleEndpointUrlTip: `Required for S3 compatible Storage Box. Specify the S3-compatible endpoint URL.
|
||||
|
|
|
|||
|
|
@ -711,6 +711,8 @@ export default {
|
|||
'Отметьте, если это экземпляр Confluence Cloud, снимите для Confluence Server/Data Center',
|
||||
confluenceWikiBaseUrlTip:
|
||||
'Базовый URL вашего экземпляра Confluence (например, https://your-domain.atlassian.net/wiki)',
|
||||
confluenceSpaceKeyTip:
|
||||
'Необязательно: Укажите ключ пространства для синхронизации только определенного пространства. Оставьте пустым для синхронизации всех доступных пространств. Для нескольких пространств разделите запятыми (например, DEV,DOCS,HR)',
|
||||
s3PrefixTip: `Укажите путь к папке в вашем S3 бакете для получения файлов.
|
||||
Пример: general/v2/`,
|
||||
S3CompatibleEndpointUrlTip: `Требуется для S3 совместимого Storage Box. Укажите URL конечной точки, совместимой с S3.
|
||||
|
|
|
|||
|
|
@ -701,6 +701,8 @@ General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于
|
|||
'检查这是否是 Confluence Cloud 实例,如果是 Confluence 服务/数据中心,则取消选中。',
|
||||
confluenceWikiBaseUrlTip:
|
||||
'Confluence Wiki 的基础 URL(例如 https://your-domain.atlassian.net/wiki)',
|
||||
confluenceSpaceKeyTip:
|
||||
'可选:指定空间键以限制同步到特定空间。留空则同步所有可访问的空间。多个空间请用逗号分隔(例如:DEV,DOCS,HR)',
|
||||
s3PrefixTip: `指定 S3 存储桶内的文件夹路径,用于读取文件。
|
||||
示例:general/v2/`,
|
||||
addDataSourceModalTital: '创建你的 {{name}} 链接',
|
||||
|
|
@ -1903,16 +1905,5 @@ Tokenizer 会根据所选方式将内容存储为对应的数据结构。`,
|
|||
searchTitle: '尚未创建搜索应用',
|
||||
addNow: '立即添加',
|
||||
},
|
||||
|
||||
deleteModal: {
|
||||
delAgent: '删除智能体',
|
||||
delDataset: '删除知识库',
|
||||
delSearch: '删除搜索',
|
||||
delFile: '删除文件',
|
||||
delFiles: '删除文件',
|
||||
delFilesContent: '已选择 {{count}} 个文件',
|
||||
delChat: '删除聊天',
|
||||
delMember: '删除成员',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
|
|
|||
|
|
@ -230,6 +230,13 @@ export const DataSourceFormFields = {
|
|||
required: false,
|
||||
tooltip: t('setting.confluenceIsCloudTip'),
|
||||
},
|
||||
{
|
||||
label: 'Space Key',
|
||||
name: 'config.space',
|
||||
type: FormFieldType.Text,
|
||||
required: false,
|
||||
tooltip: t('setting.confluenceSpaceKeyTip'),
|
||||
},
|
||||
],
|
||||
[DataSourceKey.GOOGLE_DRIVE]: [
|
||||
{
|
||||
|
|
@ -563,6 +570,7 @@ export const DataSourceFormDefaultValues = {
|
|||
config: {
|
||||
wiki_base: '',
|
||||
is_cloud: true,
|
||||
space: '',
|
||||
credentials: {
|
||||
confluence_username: '',
|
||||
confluence_access_token: '',
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue