black279 commited on
Commit
c57ca6b
Β·
verified Β·
1 Parent(s): a8e9fed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +91 -44
app.py CHANGED
@@ -1,6 +1,4 @@
1
  import os
2
- import json
3
- from datetime import datetime
4
  from pathlib import Path
5
  import gradio as gr
6
  from huggingface_hub import InferenceClient
@@ -100,7 +98,6 @@ def process_files(files):
100
  content = "\n\n" + "="*50 + "\nπŸ“Ž UPLOADED DOCUMENTS\n" + "="*50 + "\n\n"
101
 
102
  for file_obj in files:
103
- # Handle both file paths and file objects
104
  file_path = file_obj if isinstance(file_obj, str) else file_obj.name
105
  file_name = Path(file_path).name
106
  file_ext = Path(file_path).suffix.lower()
@@ -122,24 +119,41 @@ def process_files(files):
122
 
123
  return content
124
 
125
- def respond(message, history, system_message, max_tokens, temperature, top_p, model_id, uploaded_files):
126
- """Main chat function - Works with ChatInterface"""
127
- token = os.getenv("HF_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  if not token:
130
- yield "⚠️ HF_TOKEN not configured. Please set it in Space settings β†’ Repository secrets."
131
  return
132
 
133
  try:
134
  client = InferenceClient(token=token, model=model_id)
135
 
136
- # Build messages
137
  messages = [{"role": "system", "content": system_message}]
138
 
139
- # Add history
140
- for user_msg, assistant_msg in history:
141
- messages.append({"role": "user", "content": user_msg})
142
- messages.append({"role": "assistant", "content": assistant_msg})
143
 
144
  # Process uploaded files
145
  file_content = ""
@@ -164,34 +178,41 @@ def respond(message, history, system_message, max_tokens, temperature, top_p, mo
164
  yield response
165
 
166
  except Exception as e:
167
- yield f"❌ Error: {str(e)}\n\nTry a different model or check your token."
168
 
169
  def update_system_prompt(preset):
170
  return SYSTEM_PROMPTS.get(preset, SYSTEM_PROMPTS["Default"])
171
 
172
- # Build interface using ChatInterface
173
- with gr.Blocks(title="AI Document Assistant") as demo:
174
 
175
  gr.Markdown(
176
  """
177
- # πŸ€– AI Document Assistant
178
- Upload documents (PDF, DOCX, TXT, CSV) and chat with AI about their content
179
  """
180
  )
181
 
182
  with gr.Row():
183
  with gr.Column(scale=3):
184
- # File upload area
185
  file_upload = gr.File(
186
  label="πŸ“ Upload Documents",
187
  file_count="multiple",
188
  file_types=[".pdf", ".docx", ".txt", ".csv", ".md", ".py", ".json"],
189
  )
190
 
191
- # ChatInterface handles all the message formatting automatically
192
  chat = gr.ChatInterface(
193
  fn=respond,
194
- chatbot=gr.Chatbot(height=500, show_label=False),
 
 
 
 
 
 
 
195
  textbox=gr.Textbox(
196
  placeholder="πŸ’¬ Ask a question about your documents...",
197
  show_label=False,
@@ -199,15 +220,14 @@ with gr.Blocks(title="AI Document Assistant") as demo:
199
  additional_inputs=[
200
  gr.Textbox(
201
  value=SYSTEM_PROMPTS["Document Analyzer"],
202
- label="πŸ’¬ System Prompt",
203
- lines=3,
204
- visible=False, # Hidden in settings panel
205
  ),
206
- gr.Slider(128, 4096, 2048, step=128, label="Max Tokens", visible=False),
207
- gr.Slider(0.1, 2.0, 0.7, step=0.1, label="Temperature", visible=False),
208
- gr.Slider(0.1, 1.0, 0.95, step=0.05, label="Top-p", visible=False),
209
- gr.Dropdown(choices=MODELS, value=MODELS[0], label="Model", visible=False),
210
- file_upload, # Pass file upload to the function
211
  ],
212
  retry_btn="πŸ”„ Retry",
213
  undo_btn="↩️ Undo",
@@ -220,13 +240,13 @@ with gr.Blocks(title="AI Document Assistant") as demo:
220
  model_dropdown = gr.Dropdown(
221
  choices=MODELS,
222
  value=MODELS[0],
223
- label="πŸ€– Model",
224
  )
225
 
226
  preset_dropdown = gr.Dropdown(
227
  choices=list(SYSTEM_PROMPTS.keys()),
228
  value="Document Analyzer",
229
- label="πŸ“‹ Preset",
230
  )
231
 
232
  system_prompt = gr.Textbox(
@@ -235,17 +255,34 @@ with gr.Blocks(title="AI Document Assistant") as demo:
235
  lines=4,
236
  )
237
 
238
- gr.Markdown("### πŸŽ›οΈ Parameters")
239
 
240
- max_tokens = gr.Slider(128, 4096, 2048, step=128, label="Max Tokens")
241
- temperature = gr.Slider(0.1, 2.0, 0.7, step=0.1, label="Temperature")
242
- top_p = gr.Slider(0.1, 1.0, 0.95, step=0.05, label="Top-p")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
- # Link settings to ChatInterface hidden inputs
245
  model_dropdown.change(
246
  lambda x: x,
247
  inputs=[model_dropdown],
248
- outputs=[chat.additional_inputs[4]], # Update model in ChatInterface
249
  )
250
 
251
  preset_dropdown.change(
@@ -257,7 +294,7 @@ with gr.Blocks(title="AI Document Assistant") as demo:
257
  system_prompt.change(
258
  lambda x: x,
259
  inputs=[system_prompt],
260
- outputs=[chat.additional_inputs[0]], # Update system prompt in ChatInterface
261
  )
262
 
263
  max_tokens.change(
@@ -281,14 +318,24 @@ with gr.Blocks(title="AI Document Assistant") as demo:
281
  gr.Markdown(
282
  """
283
  ---
284
- **πŸ’‘ How to use:**
285
- 1. Upload your documents (PDF, DOCX, TXT, CSV) using the file uploader
286
- 2. Ask questions about the content in the chat
287
- 3. Adjust settings on the right panel
288
- 4. Lower temperature = focused responses, Higher = creative
 
 
 
 
 
 
 
 
289
 
290
- **πŸ“¦ Requires:** pypdf, python-docx, pandas
291
- **πŸ” Setup:** Add HF_TOKEN in Space settings β†’ Repository secrets
 
 
292
  """
293
  )
294
 
 
1
  import os
 
 
2
  from pathlib import Path
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
 
98
  content = "\n\n" + "="*50 + "\nπŸ“Ž UPLOADED DOCUMENTS\n" + "="*50 + "\n\n"
99
 
100
  for file_obj in files:
 
101
  file_path = file_obj if isinstance(file_obj, str) else file_obj.name
102
  file_name = Path(file_path).name
103
  file_ext = Path(file_path).suffix.lower()
 
119
 
120
  return content
121
 
122
+ def respond(
123
+ message,
124
+ history,
125
+ system_message,
126
+ max_tokens,
127
+ temperature,
128
+ top_p,
129
+ model_id,
130
+ uploaded_files,
131
+ request: gr.Request, # Gradio 6 OAuth token access
132
+ ):
133
+ """Main chat function - Gradio 6 format with OAuth"""
134
+
135
+ # Get OAuth token from request (Gradio 6 with hf_oauth)
136
+ token = None
137
+ if request:
138
+ token = request.headers.get("authorization", "").replace("Bearer ", "")
139
+
140
+ # Fallback to environment variable
141
+ if not token:
142
+ token = os.getenv("HF_TOKEN")
143
 
144
  if not token:
145
+ yield "⚠️ Please log in with your Hugging Face account to use this Space."
146
  return
147
 
148
  try:
149
  client = InferenceClient(token=token, model=model_id)
150
 
151
+ # Build messages in OpenAI format (required for Gradio 6)
152
  messages = [{"role": "system", "content": system_message}]
153
 
154
+ # Add history - Gradio 6 uses messages format
155
+ for msg in history:
156
+ messages.append(msg)
 
157
 
158
  # Process uploaded files
159
  file_content = ""
 
178
  yield response
179
 
180
  except Exception as e:
181
+ yield f"❌ Error: {str(e)}\n\nTry a different model or refresh the page."
182
 
183
  def update_system_prompt(preset):
184
  return SYSTEM_PROMPTS.get(preset, SYSTEM_PROMPTS["Default"])
185
 
186
+ # Gradio 6 Interface
187
+ with gr.Blocks(title="Jarvis - AI Document Assistant") as demo:
188
 
189
  gr.Markdown(
190
  """
191
+ # πŸ’¬ Jarvis - AI Assistant
192
+ Upload documents (PDF, DOCX, TXT, CSV) and chat with powerful AI models
193
  """
194
  )
195
 
196
  with gr.Row():
197
  with gr.Column(scale=3):
198
+ # File upload
199
  file_upload = gr.File(
200
  label="πŸ“ Upload Documents",
201
  file_count="multiple",
202
  file_types=[".pdf", ".docx", ".txt", ".csv", ".md", ".py", ".json"],
203
  )
204
 
205
+ # ChatInterface - Gradio 6 format
206
  chat = gr.ChatInterface(
207
  fn=respond,
208
+ chatbot=gr.Chatbot(
209
+ height=500,
210
+ show_label=False,
211
+ avatar_images=(
212
+ None,
213
+ "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png"
214
+ ),
215
+ ),
216
  textbox=gr.Textbox(
217
  placeholder="πŸ’¬ Ask a question about your documents...",
218
  show_label=False,
 
220
  additional_inputs=[
221
  gr.Textbox(
222
  value=SYSTEM_PROMPTS["Document Analyzer"],
223
+ label="System Prompt",
224
+ visible=False,
 
225
  ),
226
+ gr.Slider(128, 4096, 2048, step=128, visible=False),
227
+ gr.Slider(0.1, 2.0, 0.7, step=0.1, visible=False),
228
+ gr.Slider(0.1, 1.0, 0.95, step=0.05, visible=False),
229
+ gr.Dropdown(choices=MODELS, value=MODELS[0], visible=False),
230
+ file_upload,
231
  ],
232
  retry_btn="πŸ”„ Retry",
233
  undo_btn="↩️ Undo",
 
240
  model_dropdown = gr.Dropdown(
241
  choices=MODELS,
242
  value=MODELS[0],
243
+ label="πŸ€– AI Model",
244
  )
245
 
246
  preset_dropdown = gr.Dropdown(
247
  choices=list(SYSTEM_PROMPTS.keys()),
248
  value="Document Analyzer",
249
+ label="πŸ“‹ Behavior Preset",
250
  )
251
 
252
  system_prompt = gr.Textbox(
 
255
  lines=4,
256
  )
257
 
258
+ gr.Markdown("### πŸŽ›οΈ Generation")
259
 
260
+ max_tokens = gr.Slider(
261
+ 128, 4096, 2048,
262
+ step=128,
263
+ label="Max Tokens",
264
+ info="Maximum response length"
265
+ )
266
+
267
+ temperature = gr.Slider(
268
+ 0.1, 2.0, 0.7,
269
+ step=0.1,
270
+ label="Temperature",
271
+ info="Creativity (higher = more random)"
272
+ )
273
+
274
+ top_p = gr.Slider(
275
+ 0.1, 1.0, 0.95,
276
+ step=0.05,
277
+ label="Top-p",
278
+ info="Nucleus sampling"
279
+ )
280
 
281
+ # Connect settings to ChatInterface
282
  model_dropdown.change(
283
  lambda x: x,
284
  inputs=[model_dropdown],
285
+ outputs=[chat.additional_inputs[4]],
286
  )
287
 
288
  preset_dropdown.change(
 
294
  system_prompt.change(
295
  lambda x: x,
296
  inputs=[system_prompt],
297
+ outputs=[chat.additional_inputs[0]],
298
  )
299
 
300
  max_tokens.change(
 
318
  gr.Markdown(
319
  """
320
  ---
321
+ ### πŸ’‘ How to Use
322
+
323
+ 1. **Log in** with your Hugging Face account (OAuth enabled)
324
+ 2. **Upload documents** - PDF, DOCX, TXT, CSV supported
325
+ 3. **Ask questions** about the content
326
+ 4. **Adjust settings** for different response styles
327
+
328
+ ### πŸ“Š Supported Formats
329
+ - **PDF**: Text extraction from all pages
330
+ - **DOCX**: Microsoft Word documents
331
+ - **TXT/MD**: Plain text and Markdown
332
+ - **CSV**: Data files with statistics
333
+ - **Code**: Python, JavaScript, JSON, etc.
334
 
335
+ ### 🎯 Tips
336
+ - Lower temperature (0.1-0.5) = Focused, deterministic
337
+ - Higher temperature (0.8-2.0) = Creative, varied
338
+ - Try different models for different tasks
339
  """
340
  )
341