nothingworry commited on
Commit
73fd1fc
·
1 Parent(s): 90160c5

feat: Add knowledge base with document ingestion and file upload support

Browse files
5_day_meal_plan.docx ADDED
Binary file (37 kB). View file
 
5_day_meal_plan.xlsx ADDED
Binary file (5.4 kB). View file
 
backend/api/ingestion/document_ingestion.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Document Ingestion Service
3
+
4
+ Handles ingestion of various document types (PDF, DOCX, TXT, URL, raw_text)
5
+ with metadata support and automatic type detection.
6
+ """
7
+
8
+ import os
9
+ import re
10
+ import logging
11
+ from typing import Dict, Any, Optional
12
+ from urllib.parse import urlparse
13
+ import httpx
14
+
15
+ logger = logging.getLogger("document_ingestion")
16
+
17
+
18
+ def detect_source_type(content: str, filename: Optional[str] = None, url: Optional[str] = None) -> str:
19
+ """
20
+ Detect the source type from content, filename, or URL.
21
+ Returns: 'pdf', 'docx', 'txt', 'url', 'raw_text', 'markdown'
22
+ """
23
+ if url:
24
+ return "url"
25
+
26
+ if filename:
27
+ ext = filename.lower().split('.')[-1] if '.' in filename else ''
28
+ if ext in ['pdf']:
29
+ return 'pdf'
30
+ elif ext in ['docx', 'doc']:
31
+ return 'docx'
32
+ elif ext in ['txt', 'text']:
33
+ return 'txt'
34
+ elif ext in ['md', 'markdown']:
35
+ return 'markdown'
36
+
37
+ # Heuristic detection from content
38
+ content_lower = content.lower()
39
+ if 'http://' in content_lower or 'https://' in content_lower or 'www.' in content_lower:
40
+ return 'url'
41
+
42
+ return 'raw_text'
43
+
44
+
45
+ async def extract_text_from_url(url: str, timeout: int = 30) -> str:
46
+ """
47
+ Fetch and extract text content from a URL (async).
48
+ """
49
+ try:
50
+ async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
51
+ response = await client.get(url)
52
+ response.raise_for_status()
53
+
54
+ # Basic HTML stripping (for simple pages)
55
+ text = response.text
56
+ # Remove script and style tags
57
+ text = re.sub(r'<script[^>]*>.*?</script>', '', text, flags=re.DOTALL | re.IGNORECASE)
58
+ text = re.sub(r'<style[^>]*>.*?</style>', '', text, flags=re.DOTALL | re.IGNORECASE)
59
+ # Remove HTML tags
60
+ text = re.sub(r'<[^>]+>', ' ', text)
61
+ # Normalize whitespace
62
+ text = re.sub(r'\s+', ' ', text).strip()
63
+
64
+ return text
65
+ except Exception as e:
66
+ logger.error(f"Failed to fetch URL {url}: {e}")
67
+ raise ValueError(f"Failed to fetch URL: {str(e)}")
68
+
69
+
70
+ def normalize_text(text: str) -> str:
71
+ """
72
+ Sanitize and normalize text before ingestion.
73
+ """
74
+ # Remove excessive whitespace
75
+ text = re.sub(r'\s+', ' ', text)
76
+ # Remove control characters except newlines and tabs
77
+ text = re.sub(r'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]', '', text)
78
+ # Strip leading/trailing whitespace
79
+ text = text.strip()
80
+ return text
81
+
82
+
83
+ async def prepare_ingestion_payload(
84
+ tenant_id: str,
85
+ content: str,
86
+ source_type: Optional[str] = None,
87
+ filename: Optional[str] = None,
88
+ url: Optional[str] = None,
89
+ doc_id: Optional[str] = None,
90
+ metadata: Optional[Dict[str, Any]] = None
91
+ ) -> Dict[str, Any]:
92
+ """
93
+ Prepare ingestion payload according to the system prompt specification.
94
+
95
+ Returns:
96
+ {
97
+ "action": "ingest_document",
98
+ "tenant_id": "...",
99
+ "source_type": "pdf | docx | txt | url | raw_text",
100
+ "content": "...",
101
+ "metadata": {
102
+ "filename": "...",
103
+ "url": "...",
104
+ "doc_id": "..."
105
+ }
106
+ }
107
+ """
108
+ # Auto-detect source type if not provided
109
+ if not source_type:
110
+ source_type = detect_source_type(content, filename, url)
111
+
112
+ # Handle URL: fetch content (async)
113
+ if source_type == "url" and url:
114
+ try:
115
+ content = await extract_text_from_url(url)
116
+ except Exception as e:
117
+ logger.warning(f"URL fetch failed, using provided content: {e}")
118
+
119
+ # Normalize content
120
+ content = normalize_text(content)
121
+
122
+ if not content:
123
+ raise ValueError("Content is empty after normalization")
124
+
125
+ # Generate doc_id if not provided
126
+ if not doc_id:
127
+ if filename:
128
+ doc_id = filename
129
+ elif url:
130
+ parsed = urlparse(url)
131
+ doc_id = f"{parsed.netloc}{parsed.path}".replace('/', '_')[:100]
132
+ else:
133
+ import hashlib
134
+ doc_id = hashlib.md5(content.encode()).hexdigest()[:16]
135
+
136
+ # Build metadata
137
+ ingestion_metadata = {
138
+ "doc_id": doc_id,
139
+ **(metadata or {})
140
+ }
141
+
142
+ if filename:
143
+ ingestion_metadata["filename"] = filename
144
+ if url:
145
+ ingestion_metadata["url"] = url
146
+
147
+ return {
148
+ "action": "ingest_document",
149
+ "tenant_id": tenant_id,
150
+ "source_type": source_type,
151
+ "content": content,
152
+ "metadata": ingestion_metadata
153
+ }
154
+
155
+
156
+ async def process_ingestion(
157
+ payload: Dict[str, Any],
158
+ rag_client
159
+ ) -> Dict[str, Any]:
160
+ """
161
+ Process the ingestion payload by sending it to the RAG MCP server.
162
+
163
+ Args:
164
+ payload: The ingestion payload from prepare_ingestion_payload
165
+ rag_client: RAGClient instance
166
+
167
+ Returns:
168
+ Result from RAG ingestion
169
+ """
170
+ tenant_id = payload["tenant_id"]
171
+ content = payload["content"]
172
+
173
+ # Send to RAG MCP server
174
+ result = await rag_client.ingest(content, tenant_id)
175
+
176
+ # Enhance result with metadata
177
+ return {
178
+ "status": "ok",
179
+ "tenant_id": tenant_id,
180
+ "source_type": payload["source_type"],
181
+ "doc_id": payload["metadata"].get("doc_id"),
182
+ "chunks_stored": result.get("chunks_stored", 0),
183
+ "metadata": payload["metadata"],
184
+ **result
185
+ }
186
+
backend/api/ingestion/pdf.py DELETED
File without changes
backend/api/routes/rag.py CHANGED
@@ -1,15 +1,33 @@
1
- from fastapi import APIRouter, Header, HTTPException
2
  from pydantic import BaseModel
 
3
  from api.mcp_clients.rag_client import RAGClient
 
 
 
 
 
 
 
4
 
5
  router = APIRouter()
6
  rag_client = RAGClient()
7
 
8
 
9
  class IngestRequest(BaseModel):
 
10
  content: str
11
 
12
 
 
 
 
 
 
 
 
 
 
13
  class SearchRequest(BaseModel):
14
  query: str
15
 
@@ -43,6 +61,7 @@ async def rag_ingest(
43
  x_tenant_id: str = Header(None)
44
  ):
45
  """
 
46
  Ingest content into tenant knowledge base using the RAG MCP server.
47
  """
48
 
@@ -60,6 +79,129 @@ async def rag_ingest(
60
  raise HTTPException(status_code=500, detail=str(e))
61
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  @router.get("/list")
64
  async def rag_list(
65
  limit: int = 1000,
 
1
+ from fastapi import APIRouter, Header, HTTPException, UploadFile, File, Form
2
  from pydantic import BaseModel
3
+ from typing import Optional, Dict, Any
4
  from api.mcp_clients.rag_client import RAGClient
5
+ from api.services.document_ingestion import (
6
+ prepare_ingestion_payload,
7
+ process_ingestion,
8
+ detect_source_type,
9
+ normalize_text,
10
+ extract_text_from_file_bytes
11
+ )
12
 
13
  router = APIRouter()
14
  rag_client = RAGClient()
15
 
16
 
17
  class IngestRequest(BaseModel):
18
+ """Legacy simple ingestion request"""
19
  content: str
20
 
21
 
22
+ class DocumentIngestRequest(BaseModel):
23
+ """Enhanced ingestion request matching the system prompt specification"""
24
+ action: str = "ingest_document"
25
+ tenant_id: Optional[str] = None # Can come from header
26
+ source_type: Optional[str] = None # pdf | docx | txt | url | raw_text | markdown
27
+ content: str
28
+ metadata: Optional[Dict[str, Any]] = None
29
+
30
+
31
  class SearchRequest(BaseModel):
32
  query: str
33
 
 
61
  x_tenant_id: str = Header(None)
62
  ):
63
  """
64
+ Legacy ingestion endpoint - simple content ingestion.
65
  Ingest content into tenant knowledge base using the RAG MCP server.
66
  """
67
 
 
79
  raise HTTPException(status_code=500, detail=str(e))
80
 
81
 
82
+ @router.post("/ingest-document")
83
+ async def rag_ingest_document(
84
+ req: DocumentIngestRequest,
85
+ x_tenant_id: Optional[str] = Header(None)
86
+ ):
87
+ """
88
+ Enhanced document ingestion endpoint matching the system prompt specification.
89
+
90
+ Supports:
91
+ - PDF, DOCX, TXT, Markdown files
92
+ - URLs (fetches content automatically)
93
+ - Raw text
94
+ - Metadata (filename, url, doc_id)
95
+
96
+ Expected payload:
97
+ {
98
+ "action": "ingest_document",
99
+ "tenant_id": "...",
100
+ "source_type": "pdf | docx | txt | url | raw_text",
101
+ "content": "...",
102
+ "metadata": {
103
+ "filename": "...",
104
+ "url": "...",
105
+ "doc_id": "..."
106
+ }
107
+ }
108
+ """
109
+ # Use tenant_id from header if not in body (for backward compatibility)
110
+ tenant_id = req.tenant_id or x_tenant_id
111
+ if not tenant_id:
112
+ raise HTTPException(status_code=400, detail="Missing tenant ID")
113
+
114
+ try:
115
+ # Prepare ingestion payload (async for URL fetching)
116
+ payload = await prepare_ingestion_payload(
117
+ tenant_id=tenant_id,
118
+ content=req.content,
119
+ source_type=req.source_type,
120
+ filename=req.metadata.get("filename") if req.metadata else None,
121
+ url=req.metadata.get("url") if req.metadata else None,
122
+ doc_id=req.metadata.get("doc_id") if req.metadata else None,
123
+ metadata=req.metadata
124
+ )
125
+
126
+ # Process ingestion
127
+ result = await process_ingestion(payload, rag_client)
128
+
129
+ return {
130
+ "status": "ok",
131
+ "message": f"Document ingested successfully. {result.get('chunks_stored', 0)} chunk(s) stored.",
132
+ **result
133
+ }
134
+ except ValueError as e:
135
+ raise HTTPException(status_code=400, detail=str(e))
136
+ except Exception as e:
137
+ raise HTTPException(status_code=500, detail=str(e))
138
+
139
+
140
+ @router.post("/ingest-file")
141
+ async def rag_ingest_file(
142
+ file: UploadFile = File(...),
143
+ x_tenant_id: Optional[str] = Header(None),
144
+ tenant_id: Optional[str] = Form(None)
145
+ ):
146
+ """
147
+ File upload endpoint for binary files (PDF, DOCX, TXT, MD).
148
+ Extracts text server-side and ingests into knowledge base.
149
+
150
+ Usage:
151
+ POST /rag/ingest-file
152
+ Headers:
153
+ x-tenant-id: <tenant_id>
154
+ Form Data:
155
+ file: <binary file>
156
+ tenant_id: <optional, can use header instead>
157
+ """
158
+ # Use tenant_id from form or header
159
+ tenant_id_value = tenant_id or x_tenant_id
160
+ if not tenant_id_value:
161
+ raise HTTPException(status_code=400, detail="Missing tenant ID")
162
+
163
+ try:
164
+ # Read file bytes
165
+ file_bytes = await file.read()
166
+ if not file_bytes:
167
+ raise HTTPException(status_code=400, detail="File is empty")
168
+
169
+ # Extract text from binary file
170
+ try:
171
+ extracted_text = extract_text_from_file_bytes(file_bytes, file.filename or "unknown")
172
+ except ValueError as e:
173
+ raise HTTPException(status_code=400, detail=str(e))
174
+
175
+ if not extracted_text or not extracted_text.strip():
176
+ raise HTTPException(status_code=400, detail="No text could be extracted from file")
177
+
178
+ # Prepare ingestion payload
179
+ payload = await prepare_ingestion_payload(
180
+ tenant_id=tenant_id_value,
181
+ content=extracted_text,
182
+ source_type=None, # Auto-detect from filename
183
+ filename=file.filename,
184
+ url=None,
185
+ doc_id=None,
186
+ metadata=None
187
+ )
188
+
189
+ # Process ingestion
190
+ result = await process_ingestion(payload, rag_client)
191
+
192
+ return {
193
+ "status": "ok",
194
+ "message": f"File '{file.filename}' ingested successfully. {result.get('chunks_stored', 0)} chunk(s) stored.",
195
+ **result
196
+ }
197
+ except HTTPException:
198
+ raise
199
+ except ValueError as e:
200
+ raise HTTPException(status_code=400, detail=str(e))
201
+ except Exception as e:
202
+ raise HTTPException(status_code=500, detail=str(e))
203
+
204
+
205
  @router.get("/list")
206
  async def rag_list(
207
  limit: int = 1000,
backend/api/services/INGESTION_SYSTEM.md ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Document Ingestion System
2
+
3
+ ## Overview
4
+
5
+ The backend now supports a comprehensive document ingestion system that matches the system prompt specification. This allows AI agents to automatically detect and ingest various document types (PDF, DOCX, TXT, URLs, raw text) with full metadata support.
6
+
7
+ ## Endpoints
8
+
9
+ ### 1. Legacy Endpoint (Backward Compatible)
10
+ ```
11
+ POST /rag/ingest
12
+ Headers:
13
+ x-tenant-id: <tenant_id>
14
+ Body:
15
+ {
16
+ "content": "text content to ingest"
17
+ }
18
+ ```
19
+
20
+ ### 2. Enhanced Document Ingestion Endpoint
21
+ ```
22
+ POST /rag/ingest-document
23
+ Headers:
24
+ x-tenant-id: <tenant_id> (optional if in body)
25
+ Body:
26
+ {
27
+ "action": "ingest_document",
28
+ "tenant_id": "<tenant_id>", // Optional if in header
29
+ "source_type": "pdf | docx | txt | url | raw_text | markdown", // Auto-detected if not provided
30
+ "content": "text content or URL",
31
+ "metadata": {
32
+ "filename": "document.pdf",
33
+ "url": "https://example.com/doc",
34
+ "doc_id": "unique-document-id"
35
+ }
36
+ }
37
+ ```
38
+
39
+ ## Features
40
+
41
+ ### Automatic Source Type Detection
42
+ - **PDF**: Detected from `.pdf` extension or filename
43
+ - **DOCX**: Detected from `.docx` or `.doc` extension
44
+ - **TXT**: Detected from `.txt` or `.text` extension
45
+ - **Markdown**: Detected from `.md` or `.markdown` extension
46
+ - **URL**: Detected from URL in content or metadata
47
+ - **Raw Text**: Default fallback for plain text
48
+
49
+ ### URL Processing
50
+ - Automatically fetches content from URLs
51
+ - Strips HTML tags and scripts
52
+ - Normalizes whitespace
53
+ - Handles redirects and timeouts
54
+
55
+ ### Text Normalization
56
+ - Removes excessive whitespace
57
+ - Strips control characters
58
+ - Sanitizes input before ingestion
59
+
60
+ ### Metadata Support
61
+ - `filename`: Original filename
62
+ - `url`: Source URL
63
+ - `doc_id`: Unique document identifier (auto-generated if not provided)
64
+ - Custom metadata can be added to the metadata object
65
+
66
+ ## Usage Examples
67
+
68
+ ### Example 1: Ingest Raw Text
69
+ ```json
70
+ {
71
+ "action": "ingest_document",
72
+ "tenant_id": "tenant123",
73
+ "source_type": "raw_text",
74
+ "content": "This is a company policy document...",
75
+ "metadata": {
76
+ "filename": "policy.txt",
77
+ "doc_id": "policy-2024-01"
78
+ }
79
+ }
80
+ ```
81
+
82
+ ### Example 2: Ingest from URL
83
+ ```json
84
+ {
85
+ "action": "ingest_document",
86
+ "tenant_id": "tenant123",
87
+ "source_type": "url",
88
+ "content": "https://example.com/documentation",
89
+ "metadata": {
90
+ "url": "https://example.com/documentation",
91
+ "doc_id": "docs-example-com"
92
+ }
93
+ }
94
+ ```
95
+
96
+ ### Example 3: Ingest PDF (with extracted text)
97
+ ```json
98
+ {
99
+ "action": "ingest_document",
100
+ "tenant_id": "tenant123",
101
+ "source_type": "pdf",
102
+ "content": "<extracted PDF text>",
103
+ "metadata": {
104
+ "filename": "manual.pdf",
105
+ "doc_id": "manual-2024"
106
+ }
107
+ }
108
+ ```
109
+
110
+ ## Response Format
111
+
112
+ ```json
113
+ {
114
+ "status": "ok",
115
+ "message": "Document ingested successfully. 5 chunk(s) stored.",
116
+ "tenant_id": "tenant123",
117
+ "source_type": "raw_text",
118
+ "doc_id": "policy-2024-01",
119
+ "chunks_stored": 5,
120
+ "metadata": {
121
+ "filename": "policy.txt",
122
+ "doc_id": "policy-2024-01"
123
+ }
124
+ }
125
+ ```
126
+
127
+ ## Integration with AI Agents
128
+
129
+ The system is designed to work with AI agents that follow the system prompt specification:
130
+
131
+ 1. **Agent detects** document/URL/pasted content
132
+ 2. **Agent prepares** ingestion payload with proper structure
133
+ 3. **Agent sends** to `POST /rag/ingest-document`
134
+ 4. **Backend processes**:
135
+ - Detects/validates source type
136
+ - Fetches URL content if needed
137
+ - Normalizes text
138
+ - Sends to RAG MCP server for chunking/embedding
139
+ - Stores in pgvector
140
+ 5. **Agent confirms** ingestion to user
141
+
142
+ ## Error Handling
143
+
144
+ - **400 Bad Request**: Missing tenant_id, invalid payload, empty content
145
+ - **500 Internal Server Error**: RAG MCP server error, database error, URL fetch failure
146
+
147
+ ## Notes
148
+
149
+ - The legacy `/rag/ingest` endpoint remains for backward compatibility
150
+ - Source type is auto-detected if not provided
151
+ - URL fetching is async and handles timeouts gracefully
152
+ - All content is normalized before ingestion
153
+ - Metadata is preserved and stored with chunks
154
+
backend/api/services/agent_orchestrator.py CHANGED
@@ -202,7 +202,18 @@ class AgentOrchestrator:
202
  try:
203
  fallback = await self.llm.simple_call(req.message, temperature=req.temperature)
204
  except Exception as llm_error:
205
- fallback = f"I encountered an error while processing your request: {str(e)}. Additionally, the AI service is unavailable: {str(llm_error)}"
 
 
 
 
 
 
 
 
 
 
 
206
  return AgentResponse(
207
  text=fallback,
208
  decision=AgentDecision(action="respond", tool=None, tool_input=None, reason=f"tool_error_fallback: {e}"),
@@ -219,7 +230,19 @@ class AgentOrchestrator:
219
  llm_out = await self.llm.simple_call(req.message, temperature=req.temperature)
220
  except Exception as e:
221
  # If LLM fails, return a helpful error message
222
- llm_out = f"I apologize, but I'm unable to process your request right now. The AI service is unavailable: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
223
  reasoning_trace.append({
224
  "step": "error",
225
  "tool": "llm",
@@ -377,7 +400,20 @@ class AgentOrchestrator:
377
  )
378
  except Exception as e:
379
  tool_traces.append({"tool": "llm", "error": str(e)})
380
- fallback = f"I encountered an error while synthesizing the response: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
381
  return AgentResponse(
382
  text=fallback,
383
  decision=AgentDecision(
 
202
  try:
203
  fallback = await self.llm.simple_call(req.message, temperature=req.temperature)
204
  except Exception as llm_error:
205
+ error_msg = str(llm_error)
206
+ if "Cannot connect" in error_msg or "Ollama" in error_msg:
207
+ fallback = (
208
+ f"I encountered an error while processing your request: {str(e)}\n\n"
209
+ f"Additionally, the AI service (Ollama) is unavailable: {error_msg}\n\n"
210
+ f"To fix:\n"
211
+ f"1. Install Ollama from https://ollama.ai\n"
212
+ f"2. Start: `ollama serve`\n"
213
+ f"3. Pull model: `ollama pull {os.getenv('OLLAMA_MODEL', 'llama3.1:latest')}`"
214
+ )
215
+ else:
216
+ fallback = f"I encountered an error while processing your request: {str(e)}. Additionally, the AI service is unavailable: {error_msg}"
217
  return AgentResponse(
218
  text=fallback,
219
  decision=AgentDecision(action="respond", tool=None, tool_input=None, reason=f"tool_error_fallback: {e}"),
 
230
  llm_out = await self.llm.simple_call(req.message, temperature=req.temperature)
231
  except Exception as e:
232
  # If LLM fails, return a helpful error message
233
+ error_msg = str(e)
234
+ if "Cannot connect" in error_msg or "Ollama" in error_msg:
235
+ llm_out = (
236
+ f"I couldn't connect to the AI service (Ollama). "
237
+ f"Error: {error_msg}\n\n"
238
+ f"To fix this:\n"
239
+ f"1. Install Ollama from https://ollama.ai\n"
240
+ f"2. Start Ollama: `ollama serve`\n"
241
+ f"3. Pull the model: `ollama pull {os.getenv('OLLAMA_MODEL', 'llama3.1:latest')}`\n"
242
+ f"4. Or set OLLAMA_URL and OLLAMA_MODEL in your .env file"
243
+ )
244
+ else:
245
+ llm_out = f"I apologize, but I'm unable to process your request right now. The AI service is unavailable: {error_msg}"
246
  reasoning_trace.append({
247
  "step": "error",
248
  "tool": "llm",
 
400
  )
401
  except Exception as e:
402
  tool_traces.append({"tool": "llm", "error": str(e)})
403
+ error_msg = str(e)
404
+ # Provide helpful error message
405
+ if "Cannot connect" in error_msg or "Ollama" in error_msg:
406
+ fallback = (
407
+ f"I couldn't connect to the AI service (Ollama). "
408
+ f"Error: {error_msg}\n\n"
409
+ f"To fix this:\n"
410
+ f"1. Install Ollama from https://ollama.ai\n"
411
+ f"2. Start Ollama: `ollama serve`\n"
412
+ f"3. Pull the model: `ollama pull {os.getenv('OLLAMA_MODEL', 'llama3.1:latest')}`\n"
413
+ f"4. Or set OLLAMA_URL and OLLAMA_MODEL in your .env file"
414
+ )
415
+ else:
416
+ fallback = f"I encountered an error while synthesizing the response: {error_msg}"
417
  return AgentResponse(
418
  text=fallback,
419
  decision=AgentDecision(
backend/api/services/document_ingestion.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Document Ingestion Service
3
+
4
+ Handles ingestion of various document types (PDF, DOCX, TXT, URL, raw_text)
5
+ with metadata support and automatic type detection.
6
+ """
7
+
8
+ import os
9
+ import re
10
+ import logging
11
+ from typing import Dict, Any, Optional
12
+ from urllib.parse import urlparse
13
+ import httpx
14
+ from io import BytesIO
15
+
16
+ logger = logging.getLogger("document_ingestion")
17
+
18
+
19
+ def detect_source_type(content: str, filename: Optional[str] = None, url: Optional[str] = None) -> str:
20
+ """
21
+ Detect the source type from content, filename, or URL.
22
+ Returns: 'pdf', 'docx', 'txt', 'url', 'raw_text', 'markdown'
23
+ """
24
+ if url:
25
+ return "url"
26
+
27
+ if filename:
28
+ ext = filename.lower().split('.')[-1] if '.' in filename else ''
29
+ if ext in ['pdf']:
30
+ return 'pdf'
31
+ elif ext in ['docx', 'doc']:
32
+ return 'docx'
33
+ elif ext in ['txt', 'text']:
34
+ return 'txt'
35
+ elif ext in ['md', 'markdown']:
36
+ return 'markdown'
37
+
38
+ # Heuristic detection from content
39
+ content_lower = content.lower()
40
+ if 'http://' in content_lower or 'https://' in content_lower or 'www.' in content_lower:
41
+ return 'url'
42
+
43
+ return 'raw_text'
44
+
45
+
46
+ async def extract_text_from_url(url: str, timeout: int = 30) -> str:
47
+ """
48
+ Fetch and extract text content from a URL (async).
49
+ """
50
+ try:
51
+ async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
52
+ response = await client.get(url)
53
+ response.raise_for_status()
54
+
55
+ # Basic HTML stripping (for simple pages)
56
+ text = response.text
57
+ # Remove script and style tags
58
+ text = re.sub(r'<script[^>]*>.*?</script>', '', text, flags=re.DOTALL | re.IGNORECASE)
59
+ text = re.sub(r'<style[^>]*>.*?</style>', '', text, flags=re.DOTALL | re.IGNORECASE)
60
+ # Remove HTML tags
61
+ text = re.sub(r'<[^>]+>', ' ', text)
62
+ # Normalize whitespace
63
+ text = re.sub(r'\s+', ' ', text).strip()
64
+
65
+ return text
66
+ except Exception as e:
67
+ logger.error(f"Failed to fetch URL {url}: {e}")
68
+ raise ValueError(f"Failed to fetch URL: {str(e)}")
69
+
70
+
71
+ def extract_text_from_file_bytes(file_bytes: bytes, filename: str) -> str:
72
+ """
73
+ Extract text from binary file data (PDF, DOCX, etc.).
74
+
75
+ Args:
76
+ file_bytes: Binary file content
77
+ filename: Original filename (for type detection)
78
+
79
+ Returns:
80
+ Extracted text content
81
+ """
82
+ ext = filename.lower().split('.')[-1] if '.' in filename else ''
83
+
84
+ # PDF extraction
85
+ if ext == 'pdf':
86
+ try:
87
+ import PyPDF2
88
+ pdf_file = BytesIO(file_bytes)
89
+ pdf_reader = PyPDF2.PdfReader(pdf_file)
90
+ text_parts = []
91
+ for page in pdf_reader.pages:
92
+ text_parts.append(page.extract_text())
93
+ return '\n'.join(text_parts)
94
+ except ImportError:
95
+ logger.warning("PyPDF2 not installed, cannot extract PDF text")
96
+ raise ValueError("PDF extraction requires PyPDF2. Install with: pip install PyPDF2")
97
+ except Exception as e:
98
+ logger.error(f"PDF extraction failed: {e}")
99
+ raise ValueError(f"Failed to extract text from PDF: {str(e)}")
100
+
101
+ # DOCX extraction
102
+ elif ext in ['docx', 'doc']:
103
+ try:
104
+ from docx import Document
105
+ doc_file = BytesIO(file_bytes)
106
+ doc = Document(doc_file)
107
+ return '\n'.join(paragraph.text for paragraph in doc.paragraphs)
108
+ except ImportError:
109
+ logger.warning("python-docx not installed, cannot extract DOCX text")
110
+ raise ValueError("DOCX extraction requires python-docx. Install with: pip install python-docx")
111
+ except Exception as e:
112
+ logger.error(f"DOCX extraction failed: {e}")
113
+ raise ValueError(f"Failed to extract text from DOCX: {str(e)}")
114
+
115
+ # Text files (TXT, MD)
116
+ elif ext in ['txt', 'md', 'markdown', 'text']:
117
+ try:
118
+ return file_bytes.decode('utf-8', errors='ignore')
119
+ except Exception as e:
120
+ logger.error(f"Text file decoding failed: {e}")
121
+ raise ValueError(f"Failed to decode text file: {str(e)}")
122
+
123
+ else:
124
+ # Try to decode as UTF-8 text as fallback
125
+ try:
126
+ return file_bytes.decode('utf-8', errors='ignore')
127
+ except Exception:
128
+ raise ValueError(f"Unsupported file type: {ext}. Supported: pdf, docx, txt, md")
129
+
130
+
131
+ def normalize_text(text: str) -> str:
132
+ """
133
+ Sanitize and normalize text before ingestion.
134
+ """
135
+ # Remove excessive whitespace
136
+ text = re.sub(r'\s+', ' ', text)
137
+ # Remove control characters except newlines and tabs
138
+ text = re.sub(r'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]', '', text)
139
+ # Strip leading/trailing whitespace
140
+ text = text.strip()
141
+ return text
142
+
143
+
144
+ async def prepare_ingestion_payload(
145
+ tenant_id: str,
146
+ content: str,
147
+ source_type: Optional[str] = None,
148
+ filename: Optional[str] = None,
149
+ url: Optional[str] = None,
150
+ doc_id: Optional[str] = None,
151
+ metadata: Optional[Dict[str, Any]] = None
152
+ ) -> Dict[str, Any]:
153
+ """
154
+ Prepare ingestion payload according to the system prompt specification.
155
+
156
+ Returns:
157
+ {
158
+ "action": "ingest_document",
159
+ "tenant_id": "...",
160
+ "source_type": "pdf | docx | txt | url | raw_text",
161
+ "content": "...",
162
+ "metadata": {
163
+ "filename": "...",
164
+ "url": "...",
165
+ "doc_id": "..."
166
+ }
167
+ }
168
+ """
169
+ # Auto-detect source type if not provided
170
+ if not source_type:
171
+ source_type = detect_source_type(content, filename, url)
172
+
173
+ # Handle URL: fetch content (async)
174
+ if source_type == "url" and url:
175
+ try:
176
+ content = await extract_text_from_url(url)
177
+ except Exception as e:
178
+ logger.warning(f"URL fetch failed, using provided content: {e}")
179
+
180
+ # Normalize content
181
+ content = normalize_text(content)
182
+
183
+ if not content:
184
+ raise ValueError("Content is empty after normalization")
185
+
186
+ # Generate doc_id if not provided
187
+ if not doc_id:
188
+ if filename:
189
+ doc_id = filename
190
+ elif url:
191
+ parsed = urlparse(url)
192
+ doc_id = f"{parsed.netloc}{parsed.path}".replace('/', '_')[:100]
193
+ else:
194
+ import hashlib
195
+ doc_id = hashlib.md5(content.encode()).hexdigest()[:16]
196
+
197
+ # Build metadata
198
+ ingestion_metadata = {
199
+ "doc_id": doc_id,
200
+ **(metadata or {})
201
+ }
202
+
203
+ if filename:
204
+ ingestion_metadata["filename"] = filename
205
+ if url:
206
+ ingestion_metadata["url"] = url
207
+
208
+ return {
209
+ "action": "ingest_document",
210
+ "tenant_id": tenant_id,
211
+ "source_type": source_type,
212
+ "content": content,
213
+ "metadata": ingestion_metadata
214
+ }
215
+
216
+
217
+ async def process_ingestion(
218
+ payload: Dict[str, Any],
219
+ rag_client
220
+ ) -> Dict[str, Any]:
221
+ """
222
+ Process the ingestion payload by sending it to the RAG MCP server.
223
+
224
+ Args:
225
+ payload: The ingestion payload from prepare_ingestion_payload
226
+ rag_client: RAGClient instance
227
+
228
+ Returns:
229
+ Result from RAG ingestion
230
+ """
231
+ tenant_id = payload["tenant_id"]
232
+ content = payload["content"]
233
+
234
+ # Send to RAG MCP server
235
+ result = await rag_client.ingest(content, tenant_id)
236
+
237
+ # Enhance result with metadata
238
+ return {
239
+ "status": "ok",
240
+ "tenant_id": tenant_id,
241
+ "source_type": payload["source_type"],
242
+ "doc_id": payload["metadata"].get("doc_id"),
243
+ "chunks_stored": result.get("chunks_stored", 0),
244
+ "metadata": payload["metadata"],
245
+ **result
246
+ }
247
+
createingdummydata.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from docx import Document
2
+
3
+ # Dummy data
4
+ data = {
5
+ "Day": ["Day 1", "Day 2", "Day 3", "Day 4", "Day 5"],
6
+ "Breakfast": [
7
+ "Oatmeal with sliced bananas and honey",
8
+ "Scrambled eggs with toast and orange juice",
9
+ "Greek yogurt with granola and berries",
10
+ "Pancakes with maple syrup and strawberries",
11
+ "Smoothie (spinach, banana, yogurt, almond milk)"
12
+ ],
13
+ "Lunch": [
14
+ "Grilled chicken salad with mixed greens and vinaigrette",
15
+ "Turkey sandwich with lettuce, tomato, and chips",
16
+ "Vegetable soup with whole-grain roll",
17
+ "Tuna salad wrap with carrot sticks",
18
+ "Caesar salad with grilled shrimp"
19
+ ],
20
+ "Dinner": [
21
+ "Spaghetti with marinara sauce and garlic bread",
22
+ "Baked salmon with steamed broccoli and rice",
23
+ "Beef stir-fry with mixed vegetables and noodles",
24
+ "Chicken curry with basmati rice",
25
+ "Veggie pizza with side salad"
26
+ ]
27
+ }
28
+
29
+ # Create DOCX document
30
+ doc = Document()
31
+ doc.add_heading("5-Day Meal Plan", level=1)
32
+
33
+ for i in range(5):
34
+ doc.add_heading(data["Day"][i], level=2)
35
+ doc.add_paragraph(f"Breakfast: {data['Breakfast'][i]}")
36
+ doc.add_paragraph(f"Lunch: {data['Lunch'][i]}")
37
+ doc.add_paragraph(f"Dinner: {data['Dinner'][i]}")
38
+ doc.add_paragraph("")
39
+
40
+ # Save file
41
+ path = "5_day_meal_plan.docx"
42
+ doc.save(path)
43
+
44
+ print(f"Saved DOCX file to: {path}")
frontend/components/knowledge-base-panel.tsx CHANGED
@@ -1,6 +1,6 @@
1
  "use client";
2
 
3
- import { useState } from "react";
4
  import Link from "next/link";
5
 
6
  type SearchResult = {
@@ -9,6 +9,8 @@ type SearchResult = {
9
  relevance?: number;
10
  };
11
 
 
 
12
  const API_BASE =
13
  process.env.NEXT_PUBLIC_API_URL?.replace(/\/$/, "") || "http://localhost:8000";
14
 
@@ -18,9 +20,13 @@ export function KnowledgeBasePanel() {
18
  const [searchResults, setSearchResults] = useState<SearchResult[]>([]);
19
  const [isSearching, setIsSearching] = useState(false);
20
  const [ingestContent, setIngestContent] = useState("");
 
 
 
21
  const [isIngesting, setIsIngesting] = useState(false);
22
  const [ingestStatus, setIngestStatus] = useState<string | null>(null);
23
  const [searchError, setSearchError] = useState<string | null>(null);
 
24
 
25
  async function handleSearch() {
26
  if (!searchQuery.trim() || isSearching) return;
@@ -56,30 +62,130 @@ export function KnowledgeBasePanel() {
56
  }
57
  }
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  async function handleIngest() {
60
  if (!ingestContent.trim() || isIngesting) return;
61
  setIsIngesting(true);
62
  setIngestStatus(null);
63
 
64
  try {
65
- const response = await fetch(`${API_BASE}/rag/ingest`, {
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  method: "POST",
67
  headers: {
68
  "Content-Type": "application/json",
69
  "x-tenant-id": tenantId,
70
  },
71
- body: JSON.stringify({ content: ingestContent }),
 
 
 
 
 
 
72
  });
73
 
74
  if (!response.ok) {
75
- throw new Error(`Ingestion failed: ${response.status}`);
 
 
 
76
  }
77
 
78
  const data = await response.json();
79
  setIngestStatus(
80
- `✅ Successfully ingested ${data.chunks_stored || 0} chunk(s)`,
81
  );
82
  setIngestContent("");
 
 
 
 
 
83
  } catch (err) {
84
  console.error(err);
85
  setIngestStatus(
@@ -186,23 +292,95 @@ export function KnowledgeBasePanel() {
186
  Add to Knowledge Base
187
  </p>
188
  <p className="mt-2 text-sm text-slate-300">
189
- Paste text content to ingest. It will be chunked, embedded, and
190
- stored in your tenant's knowledge base.
191
  </p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  <textarea
193
- placeholder="Paste document content here (e.g., policy text, procedures, documentation)..."
 
 
 
 
194
  value={ingestContent}
195
  onChange={(e) => setIngestContent(e.target.value)}
196
  className="mt-4 w-full rounded-2xl border border-white/10 bg-white/5 px-4 py-3 text-sm text-white outline-none focus:border-cyan-200/80"
197
  rows={6}
198
  />
 
 
 
 
 
 
 
 
 
 
 
 
199
  <div className="mt-4 flex items-center gap-3">
200
  <button
201
  onClick={handleIngest}
202
- disabled={isIngesting || !ingestContent.trim()}
 
 
 
203
  className="rounded-2xl bg-gradient-to-r from-emerald-400 to-teal-500 px-6 py-2.5 font-semibold text-slate-950 shadow-lg shadow-emerald-500/30 transition hover:-translate-y-0.5 disabled:cursor-not-allowed disabled:opacity-60"
204
  >
205
- {isIngesting ? "Ingesting…" : "Ingest Content"}
206
  </button>
207
  {ingestStatus && (
208
  <p className="text-sm text-slate-300">{ingestStatus}</p>
 
1
  "use client";
2
 
3
+ import { useState, useRef } from "react";
4
  import Link from "next/link";
5
 
6
  type SearchResult = {
 
9
  relevance?: number;
10
  };
11
 
12
+ type SourceType = "raw_text" | "url" | "pdf" | "docx" | "txt" | "markdown";
13
+
14
  const API_BASE =
15
  process.env.NEXT_PUBLIC_API_URL?.replace(/\/$/, "") || "http://localhost:8000";
16
 
 
20
  const [searchResults, setSearchResults] = useState<SearchResult[]>([]);
21
  const [isSearching, setIsSearching] = useState(false);
22
  const [ingestContent, setIngestContent] = useState("");
23
+ const [sourceType, setSourceType] = useState<SourceType>("raw_text");
24
+ const [filename, setFilename] = useState("");
25
+ const [url, setUrl] = useState("");
26
  const [isIngesting, setIsIngesting] = useState(false);
27
  const [ingestStatus, setIngestStatus] = useState<string | null>(null);
28
  const [searchError, setSearchError] = useState<string | null>(null);
29
+ const fileInputRef = useRef<HTMLInputElement>(null);
30
 
31
  async function handleSearch() {
32
  if (!searchQuery.trim() || isSearching) return;
 
62
  }
63
  }
64
 
65
+ async function handleFileUpload(event: React.ChangeEvent<HTMLInputElement>) {
66
+ const file = event.target.files?.[0];
67
+ if (!file) return;
68
+
69
+ // Detect file type from extension
70
+ const ext = file.name.split('.').pop()?.toLowerCase();
71
+ let detectedType: SourceType = "raw_text";
72
+ if (ext === "pdf") detectedType = "pdf";
73
+ else if (ext === "docx" || ext === "doc") detectedType = "docx";
74
+ else if (ext === "txt" || ext === "text") detectedType = "txt";
75
+ else if (ext === "md" || ext === "markdown") detectedType = "markdown";
76
+
77
+ setSourceType(detectedType);
78
+ setFilename(file.name);
79
+
80
+ // For binary files (PDF, DOCX), upload directly to server
81
+ if (detectedType === "pdf" || detectedType === "docx") {
82
+ await handleFileIngest(file);
83
+ return;
84
+ }
85
+
86
+ // For text files, read and show in textarea
87
+ const reader = new FileReader();
88
+ reader.onload = async (e) => {
89
+ const text = e.target?.result as string;
90
+ setIngestContent(text);
91
+ };
92
+ reader.readAsText(file);
93
+ }
94
+
95
+ async function handleFileIngest(file: File) {
96
+ setIsIngesting(true);
97
+ setIngestStatus(null);
98
+
99
+ try {
100
+ const formData = new FormData();
101
+ formData.append("file", file);
102
+
103
+ const response = await fetch(`${API_BASE}/rag/ingest-file`, {
104
+ method: "POST",
105
+ headers: {
106
+ "x-tenant-id": tenantId,
107
+ },
108
+ body: formData,
109
+ });
110
+
111
+ if (!response.ok) {
112
+ const errorData = await response.json().catch(() => ({}));
113
+ throw new Error(
114
+ errorData.detail || `File ingestion failed: ${response.status}`,
115
+ );
116
+ }
117
+
118
+ const data = await response.json();
119
+ setIngestStatus(
120
+ `✅ ${data.message || `Successfully ingested ${data.chunks_stored || 0} chunk(s)`}`,
121
+ );
122
+ setFilename("");
123
+ if (fileInputRef.current) {
124
+ fileInputRef.current.value = "";
125
+ }
126
+ } catch (err) {
127
+ console.error(err);
128
+ setIngestStatus(
129
+ err instanceof Error
130
+ ? `❌ Error: ${err.message}`
131
+ : "Failed to ingest file. Is the RAG MCP server running?",
132
+ );
133
+ } finally {
134
+ setIsIngesting(false);
135
+ }
136
+ }
137
+
138
  async function handleIngest() {
139
  if (!ingestContent.trim() || isIngesting) return;
140
  setIsIngesting(true);
141
  setIngestStatus(null);
142
 
143
  try {
144
+ // Prepare metadata
145
+ const metadata: Record<string, string> = {};
146
+ if (filename) metadata.filename = filename;
147
+ if (url || sourceType === "url") {
148
+ const ingestUrl = url || ingestContent.trim();
149
+ metadata.url = ingestUrl;
150
+ }
151
+ if (filename) {
152
+ // Generate doc_id from filename
153
+ metadata.doc_id = filename.replace(/[^a-zA-Z0-9]/g, "_").toLowerCase();
154
+ }
155
+
156
+ // Use the new enhanced endpoint
157
+ const response = await fetch(`${API_BASE}/rag/ingest-document`, {
158
  method: "POST",
159
  headers: {
160
  "Content-Type": "application/json",
161
  "x-tenant-id": tenantId,
162
  },
163
+ body: JSON.stringify({
164
+ action: "ingest_document",
165
+ tenant_id: tenantId,
166
+ source_type: sourceType,
167
+ content: sourceType === "url" ? (url || ingestContent.trim()) : ingestContent,
168
+ metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
169
+ }),
170
  });
171
 
172
  if (!response.ok) {
173
+ const errorData = await response.json().catch(() => ({}));
174
+ throw new Error(
175
+ errorData.detail || `Ingestion failed: ${response.status}`,
176
+ );
177
  }
178
 
179
  const data = await response.json();
180
  setIngestStatus(
181
+ `✅ ${data.message || `Successfully ingested ${data.chunks_stored || 0} chunk(s)`}`,
182
  );
183
  setIngestContent("");
184
+ setFilename("");
185
+ setUrl("");
186
+ if (fileInputRef.current) {
187
+ fileInputRef.current.value = "";
188
+ }
189
  } catch (err) {
190
  console.error(err);
191
  setIngestStatus(
 
292
  Add to Knowledge Base
293
  </p>
294
  <p className="mt-2 text-sm text-slate-300">
295
+ Upload files (PDF, DOCX, TXT, MD), paste text, or provide URLs. Content will be chunked, embedded, and stored.
 
296
  </p>
297
+
298
+ {/* Source Type Selector */}
299
+ <div className="mt-4 flex flex-wrap gap-2">
300
+ {(["raw_text", "url", "pdf", "docx", "txt", "markdown"] as SourceType[]).map((type) => (
301
+ <button
302
+ key={type}
303
+ onClick={() => {
304
+ setSourceType(type);
305
+ if (type !== "url") setUrl("");
306
+ }}
307
+ className={`rounded-full px-4 py-2 text-xs font-semibold uppercase tracking-wider transition ${
308
+ sourceType === type
309
+ ? "bg-cyan-500 text-slate-950"
310
+ : "bg-white/5 text-slate-300 hover:bg-white/10"
311
+ }`}
312
+ >
313
+ {type.replace("_", " ")}
314
+ </button>
315
+ ))}
316
+ </div>
317
+
318
+ {/* File Upload */}
319
+ <div className="mt-4">
320
+ <input
321
+ ref={fileInputRef}
322
+ type="file"
323
+ accept=".pdf,.docx,.doc,.txt,.md,.markdown"
324
+ onChange={handleFileUpload}
325
+ className="hidden"
326
+ id="file-upload"
327
+ />
328
+ <label
329
+ htmlFor="file-upload"
330
+ className="inline-flex cursor-pointer items-center gap-2 rounded-full border border-white/10 bg-white/5 px-4 py-2 text-sm text-slate-300 transition hover:bg-white/10"
331
+ >
332
+ 📄 Upload File (PDF, DOCX, TXT, MD)
333
+ </label>
334
+ {filename && (
335
+ <span className="ml-3 text-sm text-cyan-300">{filename}</span>
336
+ )}
337
+ </div>
338
+
339
+ {/* URL Input (when source type is URL) */}
340
+ {sourceType === "url" && (
341
+ <input
342
+ type="url"
343
+ placeholder="Enter URL to fetch content from..."
344
+ value={url}
345
+ onChange={(e) => setUrl(e.target.value)}
346
+ className="mt-4 w-full rounded-2xl border border-white/10 bg-white/5 px-4 py-3 text-sm text-white outline-none focus:border-cyan-200/80"
347
+ />
348
+ )}
349
+
350
+ {/* Content Textarea */}
351
  <textarea
352
+ placeholder={
353
+ sourceType === "url"
354
+ ? "Or paste URL here..."
355
+ : "Paste document content here (e.g., policy text, procedures, documentation, FAQs)..."
356
+ }
357
  value={ingestContent}
358
  onChange={(e) => setIngestContent(e.target.value)}
359
  className="mt-4 w-full rounded-2xl border border-white/10 bg-white/5 px-4 py-3 text-sm text-white outline-none focus:border-cyan-200/80"
360
  rows={6}
361
  />
362
+
363
+ {/* Filename Input (optional) */}
364
+ {sourceType !== "url" && (
365
+ <input
366
+ type="text"
367
+ placeholder="Filename (optional, e.g., policy.pdf)"
368
+ value={filename}
369
+ onChange={(e) => setFilename(e.target.value)}
370
+ className="mt-3 w-full rounded-2xl border border-white/10 bg-white/5 px-4 py-3 text-sm text-white outline-none focus:border-cyan-200/80"
371
+ />
372
+ )}
373
+
374
  <div className="mt-4 flex items-center gap-3">
375
  <button
376
  onClick={handleIngest}
377
+ disabled={
378
+ isIngesting ||
379
+ (!ingestContent.trim() && !url.trim() && sourceType === "url")
380
+ }
381
  className="rounded-2xl bg-gradient-to-r from-emerald-400 to-teal-500 px-6 py-2.5 font-semibold text-slate-950 shadow-lg shadow-emerald-500/30 transition hover:-translate-y-0.5 disabled:cursor-not-allowed disabled:opacity-60"
382
  >
383
+ {isIngesting ? "Ingesting…" : `Ingest as ${sourceType.replace("_", " ")}`}
384
  </button>
385
  {ingestStatus && (
386
  <p className="text-sm text-slate-300">{ingestStatus}</p>
requirements.txt CHANGED
@@ -8,4 +8,7 @@ supabase
8
  sentence-transformers
9
  pytest
10
  pytest-asyncio
11
- duckduckgo-search
 
 
 
 
8
  sentence-transformers
9
  pytest
10
  pytest-asyncio
11
+ duckduckgo-search
12
+ PyPDF2
13
+ python-docx
14
+ python-multipart