openrag/assets/js/0ba6a408.efa6fc46.js
2025-12-18 23:00:04 +00:00

1 line
No EOL
21 KiB
JavaScript

"use strict";(globalThis.webpackChunkopenrag_docs=globalThis.webpackChunkopenrag_docs||[]).push([[571],{3227:(e,n,s)=>{s.r(n),s.d(n,{assets:()=>c,contentTitle:()=>l,default:()=>p,frontMatter:()=>d,metadata:()=>t,toc:()=>h});const t=JSON.parse('{"id":"core-components/knowledge","title":"Configure knowledge","description":"OpenRAG includes a built-in OpenSearch instance that serves as the underlying datastore for your knowledge (documents).","source":"@site/docs/core-components/knowledge.mdx","sourceDirName":"core-components","slug":"/knowledge","permalink":"/knowledge","draft":false,"unlisted":false,"editUrl":"https://github.com/openrag/openrag/tree/main/docs/docs/core-components/knowledge.mdx","tags":[],"version":"current","frontMatter":{"title":"Configure knowledge","slug":"/knowledge"},"sidebar":"tutorialSidebar","previous":{"title":"Flows","permalink":"/agents"},"next":{"title":"Ingest knowledge","permalink":"/ingestion"}}');var i=s(4848),o=s(8453),r=s(9179),a=s(7637);const d={title:"Configure knowledge",slug:"/knowledge"},l=void 0,c={},h=[{value:"Browse knowledge",id:"browse-knowledge",level:2},{value:"Default documents",id:"default-documents",level:3},{value:"OpenSearch authentication and document access",id:"auth",level:2},...a.RM,{value:"OpenSearch indexes",id:"opensearch-indexes",level:2},{value:"Knowledge ingestion settings",id:"knowledge-ingestion-settings",level:2},{value:"Set the embedding model and dimensions",id:"set-the-embedding-model-and-dimensions",level:3},{value:"Set Docling parameters",id:"set-docling-parameters",level:3},{value:"Set the local documents path",id:"set-the-local-documents-path",level:3},{value:"Delete knowledge",id:"delete-knowledge",level:2},{value:"See also",id:"see-also",level:2}];function u(e){const n={a:"a",admonition:"admonition",code:"code",em:"em",h2:"h2",h3:"h3",li:"li",p:"p",strong:"strong",ul:"ul",...(0,o.R)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsxs)(n.p,{children:["OpenRAG includes a built-in ",(0,i.jsx)(n.a,{href:"https://docs.opensearch.org/latest/",children:"OpenSearch"})," instance that serves as the underlying datastore for your ",(0,i.jsx)(n.em,{children:"knowledge"})," (documents).\nThis specialized database is used to store and retrieve your documents and the associated vector data (embeddings)."]}),"\n",(0,i.jsxs)(n.p,{children:["The documents in your OpenSearch knowledge base provide specialized context in addition to the general knowledge available to the language model that you select when you ",(0,i.jsx)(n.a,{href:"/install-options",children:"install OpenRAG"})," or ",(0,i.jsx)(n.a,{href:"/agents",children:"edit a flow"}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["You can ",(0,i.jsx)(n.a,{href:"/ingestion",children:"upload documents"})," from a variety of sources to populate your knowledge base with unique content, such as your own company documents, research papers, or websites.\nDocuments are processed through OpenRAG's knowledge ingestion flows with Docling."]}),"\n",(0,i.jsxs)(n.p,{children:["Then, the ",(0,i.jsxs)(n.a,{href:"/chat",children:["OpenRAG ",(0,i.jsx)(n.strong,{children:"Chat"})]})," can run ",(0,i.jsx)(n.a,{href:"https://www.ibm.com/think/topics/vector-search",children:"similarity searches"})," against your OpenSearch database to retrieve relevant information and generate context-aware responses."]}),"\n",(0,i.jsxs)(n.p,{children:["You can configure how documents are ingested and how the ",(0,i.jsx)(n.strong,{children:"Chat"})," interacts with your knowledge base."]}),"\n",(0,i.jsx)(n.h2,{id:"browse-knowledge",children:"Browse knowledge"}),"\n",(0,i.jsxs)(n.p,{children:["The ",(0,i.jsx)(n.strong,{children:"Knowledge"})," page lists the documents OpenRAG has ingested into your OpenSearch database, specifically in an ",(0,i.jsx)(n.a,{href:"https://docs.opensearch.org/latest/getting-started/intro/#index",children:"OpenSearch index"})," named ",(0,i.jsx)(n.code,{children:"documents"}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["To explore the raw contents of your knowledge base, click ",(0,i.jsx)(r.A,{name:"Library","aria-hidden":"true"})," ",(0,i.jsx)(n.strong,{children:"Knowledge"})," to get a list of all ingested documents.\nClick a document to view the chunks produced from splitting the document during ingestion."]}),"\n",(0,i.jsx)(n.h3,{id:"default-documents",children:"Default documents"}),"\n",(0,i.jsxs)(n.p,{children:["By default, OpenRAG includes some initial documents about OpenRAG.\nThese documents are ingested automatically during the ",(0,i.jsx)(n.a,{href:"/install#application-onboarding",children:"application onboarding process"}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["You can use these documents to ask OpenRAG about itself, and to test the ",(0,i.jsx)(n.a,{href:"/chat",children:(0,i.jsx)(n.strong,{children:"Chat"})})," feature before uploading your own documents."]}),"\n",(0,i.jsxs)(n.p,{children:["If you ",(0,i.jsx)(n.a,{href:"#delete-knowledge",children:"delete"})," these documents, you won't be able to ask OpenRAG about itself and it's own functionality.\nIt is recommended that you keep these documents, and use ",(0,i.jsx)(n.a,{href:"/knowledge-filters",children:"filters"})," to separate them from your other knowledge."]}),"\n",(0,i.jsx)(n.h2,{id:"auth",children:"OpenSearch authentication and document access"}),"\n",(0,i.jsxs)(n.p,{children:["When you ",(0,i.jsx)(n.a,{href:"/install-options",children:"install OpenRAG"}),", you provide the initial configuration values for your OpenRAG services, including authentication credentials for OpenSearch and OAuth connectors.\nThis configuration determines how OpenRAG authenticates with your deployment's OpenSearch instance, and it controls user access to documents in your knowledge base:"]}),"\n",(0,i.jsx)(a.Ay,{}),"\n",(0,i.jsx)(n.h2,{id:"opensearch-indexes",children:"OpenSearch indexes"}),"\n",(0,i.jsxs)(n.p,{children:["An ",(0,i.jsx)(n.a,{href:"https://docs.opensearch.org/latest/getting-started/intro/#index",children:"OpenSearch index"})," is a collection of documents in an OpenSearch database."]}),"\n",(0,i.jsxs)(n.p,{children:["By default, all documents you upload to your OpenRAG knowledge base are stored in an index named ",(0,i.jsx)(n.code,{children:"documents"}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["It is possible to change the index name by ",(0,i.jsx)(n.a,{href:"/agents#inspect-and-modify-flows",children:"editing the ingestion flow"}),".\nHowever, this can impact dependent processes, such as the ",(0,i.jsx)(n.a,{href:"/knowledge-filters",children:"filters"})," and ",(0,i.jsx)(n.a,{href:"/chat",children:(0,i.jsx)(n.strong,{children:"Chat"})})," flow, that reference the ",(0,i.jsx)(n.code,{children:"documents"})," index by default.\nMake sure you edit other flows as needed to ensure all processes use the same index name."]}),"\n",(0,i.jsxs)(n.p,{children:["If you encounter errors or unexpected behavior after changing the index name, you can ",(0,i.jsx)(n.a,{href:"/agents#revert-a-built-in-flow-to-its-original-configuration",children:"revert the flows to their original configuration"}),", or ",(0,i.jsx)(n.a,{href:"/knowledge#delete-knowledge",children:"delete knowledge"})," to clear the existing documents from your knowledge base."]}),"\n",(0,i.jsx)(n.h2,{id:"knowledge-ingestion-settings",children:"Knowledge ingestion settings"}),"\n",(0,i.jsx)(n.admonition,{type:"warning",children:(0,i.jsx)(n.p,{children:"Knowledge ingestion settings apply to documents you upload after making the changes.\nDocuments uploaded before changing these settings aren't reprocessed."})}),"\n",(0,i.jsx)(n.p,{children:"After changing knowledge ingestion settings, you must determine if you need to reupload any documents to be consistent with the new settings."}),"\n",(0,i.jsx)(n.p,{children:"It isn't always necessary to reupload documents after changing knowledge ingestion settings.\nFor example, it is typical to upload some documents with OCR enabled and others without OCR enabled."}),"\n",(0,i.jsxs)(n.p,{children:["If needed, you can use ",(0,i.jsx)(n.a,{href:"/knowledge-filters",children:"filters"})," to separate documents that you uploaded with different settings, such as different embedding models."]}),"\n",(0,i.jsx)(n.h3,{id:"set-the-embedding-model-and-dimensions",children:"Set the embedding model and dimensions"}),"\n",(0,i.jsxs)(n.p,{children:["When you ",(0,i.jsx)(n.a,{href:"/install-options",children:"install OpenRAG"}),", you select at least one embedding model during the ",(0,i.jsx)(n.a,{href:"/install#application-onboarding",children:"application onboarding process"}),".\nOpenRAG automatically detects and configures the appropriate vector dimensions for your selected embedding model, ensuring optimal search performance and compatibility."]}),"\n",(0,i.jsxs)(n.p,{children:["In the OpenRAG repository, you can find the complete list of supported models in ",(0,i.jsx)(n.a,{href:"https://github.com/langflow-ai/openrag/blob/main/src/services/models_service.py",children:(0,i.jsx)(n.code,{children:"models_service.py"})})," and the corresponding vector dimensions in ",(0,i.jsx)(n.a,{href:"https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py",children:(0,i.jsx)(n.code,{children:"settings.py"})}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["During the application onboarding process, you can select from the supported models.\nThe default embedding dimension is ",(0,i.jsx)(n.code,{children:"1536"}),", and the default model is the OpenAI ",(0,i.jsx)(n.code,{children:"text-embedding-3-small"}),"."]}),"\n",(0,i.jsxs)(n.p,{children:["If you want to use an unsupported model, you must manually set the model in your ",(0,i.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,i.jsx)(n.code,{children:".env"})," file"]}),".\nIf you use an unsupported embedding model that doesn't have defined dimensions in ",(0,i.jsx)(n.code,{children:"settings.py"}),", then OpenRAG falls back to the default dimensions (1536) and logs a warning. OpenRAG's OpenSearch instance and flows continue to work, but ",(0,i.jsx)(n.a,{href:"https://www.ibm.com/think/topics/vector-search",children:"similarity search"})," quality can be affected if the actual model dimensions aren't 1536."]}),"\n",(0,i.jsxs)(n.p,{children:["To change the embedding model after onboarding, it is recommended that you modify the embedding model setting in the OpenRAG ",(0,i.jsx)(n.strong,{children:"Settings"})," page or in your ",(0,i.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,i.jsx)(n.code,{children:".env"})," file"]}),".\nThis will automatically update all relevant ",(0,i.jsx)(n.a,{href:"/agents",children:"OpenRAG flows"})," to use the new embedding model configuration."]}),"\n",(0,i.jsx)(n.h3,{id:"set-docling-parameters",children:"Set Docling parameters"}),"\n",(0,i.jsxs)(n.p,{children:["OpenRAG uses ",(0,i.jsx)(n.a,{href:"https://docling-project.github.io/docling/",children:"Docling"})," for document ingestion because it supports many file formats, processes tables and images well, and performs efficiently."]}),"\n",(0,i.jsxs)(n.p,{children:["When you ",(0,i.jsx)(n.a,{href:"/ingestion",children:"upload documents"}),", Docling processes the files, splits them into chunks, and stores them as separate, structured documents in your OpenSearch knowledge base."]}),"\n",(0,i.jsx)(n.p,{children:"You can use either Docling Serve or OpenRAG's built-in Docling ingestion pipeline to process documents."}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Docling Serve ingestion"}),": By default, OpenRAG uses ",(0,i.jsx)(n.a,{href:"https://github.com/docling-project/docling-serve",children:"Docling Serve"}),".\nThis means that OpenRAG starts a ",(0,i.jsx)(n.code,{children:"docling serve"})," process on your local machine and runs Docling ingestion through an API service."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Built-in Docling ingestion"}),": If you want to use OpenRAG's built-in Docling ingestion pipeline instead of the separate Docling Serve service, set ",(0,i.jsx)(n.code,{children:"DISABLE_INGEST_WITH_LANGFLOW=true"})," in your ",(0,i.jsx)(n.a,{href:"/reference/configuration#document-processing-settings",children:"OpenRAG environment variables"}),"."]}),"\n",(0,i.jsx)(n.p,{children:"The built-in pipeline uses the Docling processor directly instead of through the Docling Serve API."}),"\n",(0,i.jsxs)(n.p,{children:["For the underlying functionality, see ",(0,i.jsx)(n.a,{href:"https://github.com/langflow-ai/openrag/blob/main/src/models/processors.py#L58",children:(0,i.jsx)(n.code,{children:"processors.py"})})," in the OpenRAG repository."]}),"\n"]}),"\n"]}),"\n",(0,i.jsxs)(n.p,{children:["To modify the Docling ingestion and embedding parameters, click ",(0,i.jsx)(r.A,{name:"Settings2","aria-hidden":"true"})," ",(0,i.jsx)(n.strong,{children:"Settings"})," in the OpenRAG user interface."]}),"\n",(0,i.jsx)(n.admonition,{type:"tip",children:(0,i.jsxs)(n.p,{children:["OpenRAG warns you if ",(0,i.jsx)(n.code,{children:"docling serve"})," isn't running.\nFor information about starting and stopping OpenRAG native services, like Docling, see ",(0,i.jsx)(n.a,{href:"/manage-services",children:"Manage OpenRAG services"}),"."]})}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Embedding model"}),": Select the model to use to generate vector embeddings for your documents."]}),"\n",(0,i.jsxs)(n.p,{children:["This is initially set during installation.\nThe recommended way to change this setting is in the OpenRAG ",(0,i.jsx)(n.strong,{children:"Settings"})," or your ",(0,i.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,i.jsx)(n.code,{children:".env"})," file"]}),".\nThis will automatically update all relevant ",(0,i.jsx)(n.a,{href:"/agents",children:"OpenRAG flows"})," to use the new embedding model configuration."]}),"\n",(0,i.jsxs)(n.p,{children:["If you uploaded documents prior to changing the embedding model, you can ",(0,i.jsx)(n.a,{href:"/knowledge-filters",children:"create filters"})," to separate documents embedded with different models, or you can reupload all documents to regenerate embeddings with the new model.\nIf you want to use multiple embeddings models, similarity search (in the ",(0,i.jsx)(n.strong,{children:"Chat"}),") can take longer as it searching each model's embeddings separately."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Chunk size"}),": Set the number of characters for each text chunk when breaking down a file.\nLarger chunks yield more context per chunk, but can include irrelevant information. Smaller chunks yield more precise semantic search, but can lack context.\nThe default value is 1000 characters, which is usually a good balance between context and precision."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Chunk overlap"}),": Set the number of characters to overlap over chunk boundaries.\nUse larger overlap values for documents where context is most important. Use smaller overlap values for simpler documents or when optimization is most important.\nThe default value is 200 characters, which represents an overlap of 20 percent if the ",(0,i.jsx)(n.strong,{children:"Chunk size"})," is 1000. This is suitable for general use. For faster processing, decrease the overlap to approximately 10 percent. For more complex documents where you need to preserve context across chunks, increase it to approximately 40 percent."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Table Structure"}),": Enables Docling's ",(0,i.jsx)(n.a,{href:"https://docling-project.github.io/docling/reference/document_converter/",children:(0,i.jsx)(n.code,{children:"DocumentConverter"})})," tool for parsing tables. Instead of treating tables as plain text, tables are output as structured table data with preserved relationships and metadata. This option is enabled by default."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"OCR"}),": Enables Optical Character Recognition (OCR) processing when extracting text from images and ingesting scanned documents. This setting is best suited for processing text-based documents faster with Docling's ",(0,i.jsx)(n.a,{href:"https://docling-project.github.io/docling/reference/document_converter/",children:(0,i.jsx)(n.code,{children:"DocumentConverter"})}),". Images are ignored and not processed."]}),"\n",(0,i.jsx)(n.p,{children:"This option is disabled by default. Enabling OCR can slow ingestion performance."}),"\n",(0,i.jsxs)(n.p,{children:["If OpenRAG detects that the local machine is running on macOS, OpenRAG uses the ",(0,i.jsx)(n.a,{href:"https://www.piwheels.org/project/ocrmac/",children:"ocrmac"})," OCR engine. Other platforms use ",(0,i.jsx)(n.a,{href:"https://www.jaided.ai/easyocr/",children:"easyocr"}),"."]}),"\n"]}),"\n",(0,i.jsxs)(n.li,{children:["\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"Picture descriptions"}),": Only applicable if ",(0,i.jsx)(n.strong,{children:"OCR"})," is enabled. Adds image descriptions generated by the ",(0,i.jsx)(n.a,{href:"https://huggingface.co/HuggingFaceTB/SmolVLM-Instruct",children:(0,i.jsx)(n.code,{children:"SmolVLM-256M-Instruct"})})," model. Enabling picture descriptions can slow ingestion performance."]}),"\n"]}),"\n"]}),"\n",(0,i.jsx)(n.h3,{id:"set-the-local-documents-path",children:"Set the local documents path"}),"\n",(0,i.jsxs)(n.p,{children:["The default path for local uploads is ",(0,i.jsx)(n.code,{children:"~/.openrag/documents"}),". This is mounted to the ",(0,i.jsx)(n.code,{children:"/app/openrag-documents/"})," directory inside the OpenRAG container. Files added to the host or container directory are visible in both locations."]}),"\n",(0,i.jsxs)(n.p,{children:["To change this location, modify the ",(0,i.jsx)(n.strong,{children:"Documents Paths"})," variable in either the ",(0,i.jsxs)(n.a,{href:"/install#setup",children:[(0,i.jsx)(n.strong,{children:"Advanced Setup"})," menu"]})," or in your ",(0,i.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,i.jsx)(n.code,{children:".env"})," file"]}),"."]}),"\n",(0,i.jsx)(n.h2,{id:"delete-knowledge",children:"Delete knowledge"}),"\n",(0,i.jsx)(n.admonition,{type:"warning",children:(0,i.jsx)(n.p,{children:"This is a destructive operation that cannot be undone."})}),"\n",(0,i.jsxs)(n.p,{children:["To clear your entire knowledge base, ",(0,i.jsx)(n.a,{href:"/manage-services#reset-containers",children:"reset your OpenRAG containers"})," or ",(0,i.jsx)(n.a,{href:"/reinstall",children:"reinstall OpenRAG"}),"."]}),"\n",(0,i.jsx)(n.h2,{id:"see-also",children:"See also"}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsx)(n.li,{children:(0,i.jsx)(n.a,{href:"/ingestion",children:"Ingest knowledge"})}),"\n",(0,i.jsx)(n.li,{children:(0,i.jsx)(n.a,{href:"/knowledge-filters",children:"Filter knowledge"})}),"\n",(0,i.jsx)(n.li,{children:(0,i.jsx)(n.a,{href:"/chat",children:"Chat with knowledge"})}),"\n",(0,i.jsx)(n.li,{children:(0,i.jsx)(n.a,{href:"/agents#inspect-and-modify-flows",children:"Inspect and modify flows"})}),"\n"]})]})}function p(e={}){const{wrapper:n}={...(0,o.R)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(u,{...e})}):u(e)}},7637:(e,n,s)=>{s.d(n,{Ay:()=>a,RM:()=>o});var t=s(4848),i=s(8453);const o=[];function r(e){const n={a:"a",code:"code",li:"li",p:"p",strong:"strong",ul:"ul",...(0,i.R)(),...e.components};return(0,t.jsxs)(n.ul,{children:["\n",(0,t.jsxs)(n.li,{children:["\n",(0,t.jsxs)(n.p,{children:[(0,t.jsx)(n.strong,{children:"No-auth mode"}),": If you select ",(0,t.jsx)(n.strong,{children:"Basic Setup"})," in the ",(0,t.jsx)(n.a,{href:"/tui",children:"TUI"}),", or your ",(0,t.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,t.jsx)(n.code,{children:".env"})," file"]})," doesn't include OAuth credentials, then the OpenRAG OpenSearch instance runs in no-auth mode."]}),"\n",(0,t.jsx)(n.p,{children:"This mode uses one anonymous JWT token for OpenSearch authentication.\nThere is no differentiation between users; all users that access your OpenRAG instance can access all documents uploaded to your knowledge base."}),"\n"]}),"\n",(0,t.jsxs)(n.li,{children:["\n",(0,t.jsxs)(n.p,{children:[(0,t.jsx)(n.strong,{children:"OAuth mode"}),": If you select ",(0,t.jsx)(n.strong,{children:"Advanced Setup"})," in the ",(0,t.jsx)(n.a,{href:"/tui",children:"TUI"}),", or your ",(0,t.jsxs)(n.a,{href:"/reference/configuration",children:["OpenRAG ",(0,t.jsx)(n.code,{children:".env"})," file"]})," includes OAuth credentials, then the OpenRAG OpenSearch instance runs in OAuth mode."]}),"\n",(0,t.jsx)(n.p,{children:"This mode uses a unique JWT token for each OpenRAG user, and each document is tagged with user ownership.\nDocuments are filtered by user owner; users see only the documents that they uploaded or have access to through their cloud storage accounts."}),"\n",(0,t.jsxs)(n.p,{children:["To enable OAuth mode after initial setup, see ",(0,t.jsx)(n.a,{href:"/ingestion#oauth-ingestion",children:"Ingest files with OAuth connectors"}),"."]}),"\n"]}),"\n"]})}function a(e={}){const{wrapper:n}={...(0,i.R)(),...e.components};return n?(0,t.jsx)(n,{...e,children:(0,t.jsx)(r,{...e})}):r(e)}},9179:(e,n,s)=>{s.d(n,{A:()=>o});s(6540);var t=s(7856),i=s(4848);function o({name:e,...n}){const s=t[e];return s?(0,i.jsx)(s,{...n}):null}}}]);