anishdabhane commited on
Commit
03e323a
·
1 Parent(s): 44a1d37

feat: inspector v1 with light/dark mode, tools, prompts, resources and ping functionality

Browse files
Files changed (9) hide show
  1. .gitignore +23 -0
  2. README.md +37 -0
  3. app.py +492 -0
  4. handlers.py +247 -0
  5. mcp_client.py +206 -0
  6. pyproject.toml +11 -0
  7. requirements.txt +3 -0
  8. theme.py +72 -0
  9. uv.lock +0 -0
.gitignore ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Virtual Environments
7
+ .venv/
8
+ venv/
9
+ env/
10
+ .env
11
+
12
+ # Distribution / Packaging
13
+ dist/
14
+ build/
15
+ *.egg-info/
16
+
17
+ # IDEs
18
+ .vscode/
19
+ .idea/
20
+
21
+ # Misc
22
+ .DS_Store
23
+ .python-version
README.md CHANGED
@@ -11,4 +11,41 @@ license: mit
11
  short_description: Debug and test remote MCP servers via Gradio.
12
  ---
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
11
  short_description: Debug and test remote MCP servers via Gradio.
12
  ---
13
 
14
+ # Gradio MCP Inspector
15
+
16
+ A powerful GUI-based inspector for the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/), built with [Gradio](https://gradio.app/). This tool allows developers to interactively explore, debug, and test MCP servers.
17
+
18
+ ## Features
19
+
20
+ - **🔌 Flexible Connection**: Support for both **Streamable HTTP** and **SSE** (Server-Sent Events) transports.
21
+ - **📂 Resources**: Browse available resources, view templates, and inspect content.
22
+ - **💬 Prompts**: List prompts and execute them with dynamically generated input forms.
23
+ - **🛠️ Tools**: Discover tools and run them with schema-based argument inputs.
24
+ - **🕵️ Debugging**: View raw JSON-RPC requests and responses for every interaction.
25
+ - **⚙️ Configurable**: Adjustable timeouts and connection settings.
26
+
27
+ ## Installation
28
+
29
+ 1. Clone the repository:
30
+ ```bash
31
+ git clone https://github.com/yourusername/gradio-mcp-inspector.git
32
+ cd gradio-mcp-inspector
33
+ ```
34
+
35
+ 2. Install dependencies:
36
+ ```bash
37
+ pip install -r requirements.txt
38
+ ```
39
+
40
+ ## Usage
41
+
42
+ Run the application locally:
43
+
44
+ ```bash
45
+ python app.py
46
+ ```
47
+
48
+ Open your browser and navigate to the provided URL (typically `http://127.0.0.1:7860`).
49
+
50
+
51
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,492 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gradio as gr
4
+ import json
5
+
6
+ from handlers import (
7
+ connect,
8
+ disconnect,
9
+ list_resources_with_history,
10
+ list_resource_templates_with_history,
11
+ on_resource_select,
12
+ list_prompts_with_history,
13
+ invoke_prompt_with_history,
14
+ list_tools_with_history,
15
+ invoke_tool_with_history,
16
+ ping_with_history,
17
+ custom_request_with_history,
18
+ get_sampling_log,
19
+ )
20
+
21
+ with gr.Blocks(title="Gradio MCP Inspector") as app:
22
+
23
+ gr.Markdown("# Gradio MCP Inspector")
24
+ server_url_state = gr.State("")
25
+ history_state = gr.State([])
26
+ tools_state = gr.State([])
27
+ resources_state = gr.State([])
28
+ templates_state = gr.State([])
29
+ prompts_state = gr.State([])
30
+
31
+ with gr.Row(equal_height=True):
32
+ with gr.Column(scale=1, min_width=320):
33
+ transport_type = gr.Dropdown(
34
+ ["Streamable HTTP","SSE"],
35
+ label="Transport Type",
36
+ value="Streamable HTTP",
37
+ )
38
+ base_url_input = gr.Textbox(
39
+ label="URL",
40
+ placeholder="https://your-mcp-server.example/mcp",
41
+ )
42
+
43
+ with gr.Accordion("Authentication", open=False):
44
+ header_name = gr.Textbox(label="Header Name", placeholder="Authorization")
45
+ bearer_token = gr.Textbox(label="Bearer Token", placeholder="Bearer Token",type="password")
46
+
47
+ with gr.Accordion("Configuration", open=False):
48
+ request_timeout = gr.Number(label="Request Timeout", value=10000)
49
+ reset_timeout_on_progress = gr.Dropdown(
50
+ label="Reset Timeout on Progress",
51
+ choices=["True", "False"],
52
+ value="True"
53
+ )
54
+ max_total_timeout = gr.Number(label="Maximum Total Timeout", value=60000)
55
+
56
+ status_badge = gr.Markdown("**Status:** 🔴 Disconnected.")
57
+ initial_connect_btn = gr.Button("Connect", variant="primary")
58
+ reconnect_btn = gr.Button("Reconnect", variant="primary", visible=False)
59
+ disconnect_btn = gr.Button("Disconnect", variant="stop", visible=False)
60
+
61
+ initial_connect_btn.click(
62
+ connect,
63
+ inputs=[transport_type, base_url_input, header_name, bearer_token, request_timeout, reset_timeout_on_progress, max_total_timeout],
64
+ outputs=[server_url_state, status_badge, initial_connect_btn, reconnect_btn, disconnect_btn],
65
+ )
66
+ reconnect_btn.click(
67
+ connect,
68
+ inputs=[transport_type, base_url_input, header_name, bearer_token, request_timeout, reset_timeout_on_progress, max_total_timeout],
69
+ outputs=[server_url_state, status_badge, initial_connect_btn, reconnect_btn, disconnect_btn],
70
+ )
71
+ disconnect_btn.click(
72
+ disconnect,
73
+ outputs=[server_url_state, status_badge, initial_connect_btn, reconnect_btn, disconnect_btn],
74
+ )
75
+
76
+ # Add spacing
77
+ gr.HTML("<div style='margin-top: 13em;'></div>")
78
+
79
+
80
+ theme_selector = gr.Radio(
81
+ ["Light", "Dark"],
82
+ value="Dark",
83
+ label="Theme",
84
+ interactive=True,
85
+ )
86
+ gr.Markdown("Connected indicator updates once you reconnect.")
87
+
88
+ with gr.Column(scale=3, min_width=640):
89
+ with gr.Tabs():
90
+ with gr.Tab("Resources"):
91
+ with gr.Row():
92
+ with gr.Column():
93
+ gr.Markdown("### Resources")
94
+ list_resources_btn = gr.Button("List Resources")
95
+ clear_resources_btn = gr.Button("Clear")
96
+ resource_list = gr.Radio(label="Available Resources", choices=[], interactive=True)
97
+ resource_empty_msg = gr.Markdown("⚠️ No resources found.", visible=False)
98
+
99
+ with gr.Column():
100
+ gr.Markdown("### Resource Templates")
101
+ list_templates_btn = gr.Button("List Templates")
102
+ clear_templates_btn = gr.Button("Clear")
103
+ template_list = gr.Radio(label="Available Templates", choices=[], interactive=True)
104
+ template_empty_msg = gr.Markdown("⚠️ No templates found.", visible=False)
105
+
106
+ with gr.Column():
107
+ gr.Markdown("### Select a resource or template")
108
+ # resource_content_info = gr.Markdown("Select a resource or template from the list to view its contents")
109
+ # resource_content_view = gr.Code(label="Resource Content", language="json", visible=False)
110
+
111
+ with gr.Accordion("Debug Info", open=False):
112
+ resource_list_request = gr.Code(label="List Request", language="json")
113
+ resource_list_response = gr.Code(label="List Response", language="json")
114
+ with gr.Column(scale=2):
115
+ @gr.render(inputs=[resource_list, resources_state])
116
+ def render_resource_details(uri, resources):
117
+ if not uri or not resources:
118
+ return
119
+ res = next((r for r in resources if r["uri"] == uri), None)
120
+ if res:
121
+ gr.Markdown(f"### {res.get('name', 'Resource')}")
122
+ if res.get("description"):
123
+ gr.Markdown(res["description"])
124
+ if res.get("mimeType"):
125
+ gr.Markdown(f"**MIME Type:** `{res['mimeType']}`")
126
+
127
+ resource_content_view = gr.Code(label="Resource Content", language="json", visible=False)
128
+ resource_read_request = gr.Code(label="Read Request", language="json", visible=False)
129
+ resource_read_response = gr.Code(label="Read Response", language="json", visible=False)
130
+
131
+ with gr.Accordion("Resource Templates", open=False):
132
+ with gr.Row():
133
+ template_list = gr.Radio(label="Templates", choices=[], visible=False)
134
+ template_empty_msg = gr.Markdown("⚠️ No templates found.", visible=False)
135
+
136
+ @gr.render(inputs=[template_list, templates_state])
137
+ def render_template_details(uri_template, templates):
138
+ if not uri_template or not templates:
139
+ return
140
+ tmpl = next((t for t in templates if t["uriTemplate"] == uri_template), None)
141
+ if tmpl:
142
+ gr.Markdown(f"### {tmpl.get('name', 'Template')}")
143
+ if tmpl.get("description"):
144
+ gr.Markdown(tmpl["description"])
145
+ if tmpl.get("mimeType"):
146
+ gr.Markdown(f"**MIME Type:** `{tmpl['mimeType']}`")
147
+ gr.Markdown(f"**URI Template:** `{tmpl['uriTemplate']}`")
148
+
149
+ with gr.Tab("Prompts"):
150
+ with gr.Row():
151
+ with gr.Column(scale=1):
152
+ gr.Markdown("### Prompts")
153
+ list_prompts_btn = gr.Button("List Prompts")
154
+ clear_prompts_btn = gr.Button("Clear")
155
+ prompt_list = gr.Radio(label="Available Prompts", choices=[], interactive=True)
156
+ prompt_empty_msg = gr.Markdown("⚠️ No prompts found.", visible=False)
157
+
158
+ with gr.Column(scale=2):
159
+ @gr.render(inputs=[prompt_list, prompts_state])
160
+ def render_prompt_form(prompt_name, prompts):
161
+ if not prompt_name or not prompts:
162
+ gr.Markdown("### Select a prompt")
163
+ gr.Markdown("Select a prompt from the list to view its details and invoke it.")
164
+ return
165
+
166
+ prompt = next((p for p in prompts if p["name"] == prompt_name), None)
167
+ if not prompt:
168
+ return
169
+
170
+ gr.Markdown(f"### {prompt['name']}")
171
+ gr.Markdown(prompt.get("description", "No description provided."))
172
+
173
+ arguments = prompt.get("arguments", [])
174
+ inputs = {}
175
+
176
+ with gr.Group():
177
+ for arg in arguments:
178
+ arg_name = arg["name"]
179
+ label = arg_name + (" *" if arg.get("required") else "")
180
+ desc = arg.get("description", "")
181
+ # Prompts usually take string arguments
182
+ inputs[arg_name] = gr.Textbox(label=label, placeholder=desc)
183
+
184
+ run_btn = gr.Button("Get Prompt", variant="primary")
185
+
186
+ async def wrapper(base_url, timeout, history, *form_values):
187
+ args = {}
188
+ keys = list(inputs.keys())
189
+ for i, key in enumerate(keys):
190
+ val = form_values[i]
191
+ if val:
192
+ args[key] = val
193
+ # Convert timeout (ms) to seconds for the handler
194
+ timeout_sec = float(timeout) / 1000.0 if timeout else 30.0
195
+ request, response, history, rendered = await invoke_prompt_with_history(base_url, timeout_sec, prompt_name, json.dumps(args), history)
196
+ return (
197
+ gr.update(value=response, visible=True),
198
+ history,
199
+ rendered
200
+ )
201
+
202
+ run_btn.click(
203
+ wrapper,
204
+ inputs=[server_url_state, request_timeout, history_state] + list(inputs.values()),
205
+ outputs=[prompt_call_response, history_state, history_panel]
206
+ )
207
+
208
+ prompt_call_response = gr.Markdown(label="Result", visible=False)
209
+
210
+ with gr.Accordion("Debug Info", open=False):
211
+ prompt_list_request = gr.Code(label="List Request", language="json")
212
+ prompt_list_response = gr.Code(label="List Response", language="json")
213
+ prompt_call_request = gr.Code(label="Invocation Request", language="json")
214
+ with gr.Tab("Tools"):
215
+ with gr.Row():
216
+ with gr.Column(scale=1):
217
+ gr.Markdown("### Tools")
218
+ list_tools_btn = gr.Button("List Tools")
219
+ clear_tools_btn = gr.Button("Clear")
220
+ tool_list = gr.Radio(label="Available Tools", choices=[], interactive=True)
221
+ tool_empty_msg = gr.Markdown("⚠️ No tools found.", visible=False)
222
+
223
+ with gr.Column(scale=2):
224
+ @gr.render(inputs=[tool_list, tools_state])
225
+ def render_tool_form(tool_name, tools):
226
+ if not tool_name or not tools:
227
+ gr.Markdown("### Select a tool")
228
+ gr.Markdown("Select a tool from the list to view its details and run it")
229
+ return
230
+
231
+ tool = next((t for t in tools if t["name"] == tool_name), None)
232
+ if not tool:
233
+ return
234
+
235
+ gr.Markdown(f"### {tool['name']}")
236
+ gr.Markdown(tool.get("description", "No description provided."))
237
+
238
+ schema = tool.get("inputSchema", {})
239
+ properties = schema.get("properties", {})
240
+ required = schema.get("required", [])
241
+
242
+ inputs = {}
243
+
244
+ with gr.Group():
245
+ for prop_name, prop_schema in properties.items():
246
+ label = prop_name + (" *" if prop_name in required else "")
247
+ prop_type = prop_schema.get("type", "string")
248
+ desc = prop_schema.get("description", "")
249
+ default_val = prop_schema.get("default", None)
250
+ enum_vals = prop_schema.get("enum", None)
251
+
252
+ if enum_vals:
253
+ inputs[prop_name] = gr.Dropdown(
254
+ label=label,
255
+ choices=enum_vals,
256
+ value=default_val,
257
+ info=desc
258
+ )
259
+ elif prop_type == "string":
260
+ inputs[prop_name] = gr.Textbox(
261
+ label=label,
262
+ placeholder=desc,
263
+ value=default_val if default_val is not None else ""
264
+ )
265
+ elif prop_type == "integer" or prop_type == "number":
266
+ inputs[prop_name] = gr.Number(
267
+ label=label,
268
+ info=desc,
269
+ value=default_val
270
+ )
271
+ elif prop_type == "boolean":
272
+ inputs[prop_name] = gr.Checkbox(
273
+ label=label,
274
+ info=desc,
275
+ value=default_val if default_val is not None else False
276
+ )
277
+ else:
278
+ # For arrays/objects, try to show default as JSON if exists
279
+ def_json = json.dumps(default_val) if default_val is not None else "{}"
280
+ inputs[prop_name] = gr.Code(
281
+ label=label + " (JSON)",
282
+ language="json",
283
+ value=def_json
284
+ )
285
+
286
+ run_btn = gr.Button("Run Tool", variant="primary")
287
+
288
+ async def wrapper(base_url, timeout, history, *form_values):
289
+ args = {}
290
+ keys = list(inputs.keys())
291
+ for i, key in enumerate(keys):
292
+ val = form_values[i]
293
+ # Only include if it has a value or is required?
294
+ # For now, include if not None/Empty string to avoid sending empty strings for optional fields
295
+ if val is not None and val != "":
296
+ args[key] = val
297
+
298
+ # Convert timeout (ms) to seconds
299
+ timeout_sec = float(timeout) / 1000.0 if timeout else 30.0
300
+ return await invoke_tool_with_history(base_url, timeout_sec, tool_name, json.dumps(args), history)
301
+
302
+ run_btn.click(
303
+ wrapper,
304
+ inputs=[server_url_state, request_timeout, history_state] + list(inputs.values()),
305
+ outputs=[tool_call_request, tool_call_response, history_state, history_panel]
306
+ )
307
+ tool_call_response = gr.Markdown(label="Tool Result")
308
+ with gr.Accordion("Debug Info", open=False):
309
+ tool_list_request = gr.Code(label="List Request", language="json")
310
+ tool_list_response = gr.Code(label="List Response", language="json")
311
+ tool_call_request = gr.Code(label="Invocation Request", language="json") # Moved outside render block
312
+
313
+ with gr.Tab("Ping"):
314
+ with gr.Row():
315
+ gr.Column(scale=1)
316
+ with gr.Column(scale=0, min_width=150):
317
+ ping_btn = gr.Button("Ping Server", variant="primary")
318
+ gr.Column(scale=1)
319
+ with gr.Tab("Sampling"):
320
+ gr.Markdown("When the server requests LLM sampling, requests will appear here for approval.")
321
+
322
+ gr.Markdown("### Recent Requests")
323
+
324
+ sampling_log_display = gr.Markdown("No pending requests")
325
+
326
+ with gr.Row():
327
+ refresh_log_btn = gr.Button("Refresh", variant="primary")
328
+
329
+ refresh_log_btn.click(
330
+ lambda: get_sampling_log(),
331
+ outputs=[sampling_log_display]
332
+ )
333
+
334
+ with gr.Tab("Roots"):
335
+ gr.Markdown("Display root resources reported by the server here.")
336
+
337
+ with gr.Tab("Auth"):
338
+ gr.Markdown("Use the Authentication accordion on the left to manage credentials.")
339
+
340
+ with gr.Row():
341
+ with gr.Column():
342
+ gr.Markdown("### History")
343
+ history_panel = gr.HTML(value="<p><em>No calls yet</em></p>")
344
+ with gr.Column():
345
+ gr.Markdown("### Server Notifications")
346
+ notifications_panel = gr.Markdown(value="_No notifications yet_")
347
+
348
+ # Wiring (done after layout so every component is defined)
349
+ list_resources_btn.click(
350
+ list_resources_with_history,
351
+ inputs=[server_url_state, request_timeout, history_state],
352
+ outputs=[
353
+ resource_list_request,
354
+ resource_list_response,
355
+ resource_list,
356
+ resource_empty_msg,
357
+ resources_state,
358
+ history_state,
359
+ history_panel,
360
+ ],
361
+ )
362
+
363
+ list_templates_btn.click(
364
+ list_resource_templates_with_history,
365
+ inputs=[server_url_state, request_timeout, history_state],
366
+ outputs=[
367
+ resource_list_request,
368
+ resource_list_response,
369
+ template_list,
370
+ template_empty_msg,
371
+ templates_state,
372
+ history_state,
373
+ history_panel,
374
+ ],
375
+ )
376
+
377
+ resource_list.change(
378
+ on_resource_select,
379
+ inputs=[server_url_state, request_timeout, resource_list, history_state],
380
+ outputs=[
381
+ resource_read_request,
382
+ resource_read_response,
383
+ resource_content_view,
384
+ history_state,
385
+ history_panel,
386
+ ],
387
+ )
388
+
389
+ clear_resources_btn.click(
390
+ lambda: (gr.update(choices=[], value=None, visible=True), gr.update(value="", visible=False), gr.update(visible=False)),
391
+ outputs=[resource_list, resource_content_view, resource_empty_msg]
392
+ )
393
+
394
+ clear_templates_btn.click(
395
+ lambda: (gr.update(choices=[], value=None, visible=True), gr.update(visible=False)),
396
+ outputs=[template_list, template_empty_msg]
397
+ )
398
+
399
+ list_prompts_btn.click(
400
+ list_prompts_with_history,
401
+ inputs=[server_url_state, request_timeout, history_state],
402
+ outputs=[
403
+ prompt_list_request,
404
+ prompt_list_response,
405
+ prompt_list,
406
+ prompt_empty_msg,
407
+ prompts_state,
408
+ history_state,
409
+ history_panel,
410
+ ],
411
+ )
412
+
413
+ # prompt_list.change(
414
+ # lambda: (gr.update(visible=False), gr.update(visible=True)),
415
+ # outputs=[prompt_instruction, prompt_details]
416
+ # )
417
+
418
+ # invoke_prompt_btn.click(
419
+ # invoke_prompt_with_history,
420
+ # inputs=[
421
+ # server_url_state,
422
+ # timeout_seconds,
423
+ # prompt_list,
424
+ # prompt_args,
425
+ # history_state,
426
+ # ],
427
+ # outputs=[
428
+ # prompt_call_request,
429
+ # prompt_call_response,
430
+ # history_state,
431
+ # history_panel,
432
+ # ],
433
+ # )
434
+
435
+ clear_prompts_btn.click(
436
+ lambda: (gr.update(choices=[], value=None), gr.update(visible=False)),
437
+ outputs=[prompt_list, prompt_empty_msg],
438
+ )
439
+
440
+ list_tools_btn.click(
441
+ list_tools_with_history,
442
+ inputs=[server_url_state, request_timeout, history_state],
443
+ outputs=[
444
+ tool_list_request,
445
+ tool_list_response,
446
+ tool_list,
447
+ tool_empty_msg,
448
+ tools_state,
449
+ history_state,
450
+ history_panel,
451
+ ],
452
+ )
453
+
454
+ clear_tools_btn.click(
455
+ lambda: (gr.update(choices=[], value=None), gr.update(visible=False)),
456
+ outputs=[tool_list, tool_empty_msg],
457
+ )
458
+
459
+ ping_btn.click(
460
+ ping_with_history,
461
+ inputs=[server_url_state, request_timeout, history_state],
462
+ outputs=[history_state, history_panel],
463
+ )
464
+
465
+ theme_selector.change(
466
+ None,
467
+ inputs=theme_selector,
468
+ js="""(theme) => {
469
+ if (theme === "Dark") {
470
+ document.body.classList.add('dark');
471
+ } else {
472
+ document.body.classList.remove('dark');
473
+ }
474
+ }"""
475
+ )
476
+
477
+ # send_btn.click(
478
+ # custom_request_with_history,
479
+ # inputs=[server_url_state, method, params, timeout_seconds, history_state],
480
+ # outputs=[
481
+ # custom_request_box,
482
+ # custom_response_box,
483
+ # history_state,
484
+ # history_panel,
485
+ # ],
486
+ # )
487
+
488
+ from theme import CustomLightTheme
489
+
490
+ if __name__ == "__main__":
491
+ app.launch(theme=gr.themes.Soft())
492
+ # app.launch(theme=CustomLightTheme())
handlers.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import gradio as gr
4
+ import json
5
+ from fastmcp.client.sampling import SamplingMessage, SamplingParams, RequestContext
6
+
7
+ from mcp_client import (
8
+ connect as mcp_connect,
9
+ disconnect as mcp_disconnect,
10
+ invoke_prompt,
11
+ invoke_tool,
12
+ list_prompts,
13
+ list_resources,
14
+ list_resource_templates,
15
+ list_tools,
16
+ ping_server,
17
+ read_resource,
18
+ send_custom_request,
19
+ )
20
+
21
+
22
+ def _render_history(history: list[dict]) -> str:
23
+ if not history:
24
+ return "_No calls yet_"
25
+
26
+ html_parts = []
27
+ total_items = len(history)
28
+ for idx, entry in enumerate(history):
29
+ display_num = total_items - idx
30
+ method = entry["method"]
31
+ request = entry["request"]
32
+ response = entry["response"]
33
+
34
+ # Create an accordion-style HTML using details/summary
35
+ html = f"""
36
+ <details style="margin-bottom: 10px; border: 1px solid #ddd; border-radius: 4px; padding: 10px;">
37
+ <summary style="cursor: pointer; font-weight: bold; user-select: none;">
38
+ {display_num}. {method}
39
+ </summary>
40
+ <div style="margin-top: 10px;">
41
+ <div style="margin-bottom: 10px;">
42
+ <strong>Request:</strong>
43
+ <pre style="background: #333333; color: #ffffff; padding: 10px; border-radius: 4px; overflow-x: auto; max-height: 300px;">{request}</pre>
44
+ </div>
45
+ <div>
46
+ <strong>Response:</strong>
47
+ <pre style="background: #333333; color: #ffffff; padding: 10px; border-radius: 4px; overflow-x: auto; max-height: 300px;">{response}</pre>
48
+ </div>
49
+ </div>
50
+ </details>
51
+ """
52
+ html_parts.append(html)
53
+
54
+ return "\n".join(html_parts)
55
+
56
+
57
+ def _update_history(history: list[dict] | None, method: str, request: str, response: str):
58
+ history = history or []
59
+ history.insert(
60
+ 0,
61
+ {
62
+ "method": method,
63
+ "request": request,
64
+ "response": response,
65
+ },
66
+ )
67
+ return history, _render_history(history)
68
+
69
+
70
+ async def connect(transport: str, url: str, header_name: str, token: str, request_timeout: float, reset_timeout: str, max_timeout: float):
71
+ cleaned_url = url.strip()
72
+ if not cleaned_url:
73
+ return (
74
+ "",
75
+ "**Status:** ⚠️ Provide a server URL.",
76
+ gr.update(visible=True),
77
+ gr.update(visible=False),
78
+ gr.update(visible=False),
79
+ )
80
+
81
+ try:
82
+ # Convert ms to seconds for the client
83
+ timeout_sec = float(request_timeout) / 1000.0 if request_timeout else 10.0
84
+ # Establish persistent connection
85
+ await mcp_connect(cleaned_url, timeout_sec, transport, sampling_handler)
86
+ except Exception as e:
87
+ return (
88
+ cleaned_url,
89
+ f"**Status:** 🔴 Connection failed: {str(e)}",
90
+ gr.update(visible=True),
91
+ gr.update(visible=False),
92
+ gr.update(visible=False),
93
+ )
94
+
95
+ badge = f"**Status:** 🟢 Connected via {transport}."
96
+ if token.strip():
97
+ badge += " (bearer token applied)"
98
+ elif header_name.strip():
99
+ badge += f" ({header_name} header applied)"
100
+ return (
101
+ cleaned_url,
102
+ badge,
103
+ gr.update(visible=False),
104
+ gr.update(visible=True),
105
+ gr.update(visible=True),
106
+ )
107
+
108
+
109
+ async def disconnect():
110
+ await mcp_disconnect()
111
+ return (
112
+ "",
113
+ "**Status:** 🔴 Disconnected.",
114
+ gr.update(visible=True),
115
+ gr.update(visible=False),
116
+ gr.update(visible=False),
117
+ )
118
+
119
+
120
+ async def custom_request_with_history(base_url, method, params, timeout, history):
121
+ request, response = await send_custom_request(base_url, method, params, timeout)
122
+ history, rendered = _update_history(history, method or "custom", request, response)
123
+ return request, response, history, rendered
124
+
125
+
126
+ async def list_tools_with_history(base_url, timeout, history):
127
+ request, response, tools_data = await list_tools(base_url, timeout, sampling_handler)
128
+ names = [t["name"] for t in tools_data]
129
+ update = gr.update(choices=names, value=None, visible=bool(names))
130
+ empty_msg_update = gr.update(visible=not names)
131
+ history, rendered = _update_history(history, "tools/list", request, response)
132
+ return request, response, update, empty_msg_update, tools_data, history, rendered
133
+
134
+
135
+ async def invoke_tool_with_history(base_url, timeout, tool_name, args, history):
136
+ request, response = await invoke_tool(base_url, timeout, tool_name, args, sampling_handler)
137
+ history, rendered = _update_history(history, "tools/call", request, response)
138
+ return request, response, history, rendered
139
+
140
+
141
+ async def list_prompts_with_history(base_url, timeout, history):
142
+ request, response, prompts_data = await list_prompts(base_url, timeout, sampling_handler)
143
+ names = [p["name"] for p in prompts_data]
144
+ update = gr.update(choices=names, value=None, visible=bool(names))
145
+ empty_msg_update = gr.update(visible=not names)
146
+ history, rendered = _update_history(history, "prompts/list", request, response)
147
+ return request, response, update, empty_msg_update, prompts_data, history, rendered
148
+
149
+
150
+ async def invoke_prompt_with_history(base_url, timeout, prompt_name, args, history):
151
+ request, response = await invoke_prompt(base_url, timeout, prompt_name, args, sampling_handler)
152
+ history, rendered = _update_history(history, "prompts/invoke", request, response)
153
+ return request, response, history, rendered
154
+
155
+
156
+ async def list_resources_with_history(base_url, timeout, history):
157
+ request, response, resources_data = await list_resources(base_url, timeout, sampling_handler)
158
+ names = [r["uri"] for r in resources_data]
159
+ update = gr.update(choices=names, value=None, visible=bool(names))
160
+ empty_msg_update = gr.update(visible=not names)
161
+ history, rendered = _update_history(history, "resources/list", request, response)
162
+ return request, response, update, empty_msg_update, resources_data, history, rendered
163
+
164
+
165
+ async def list_resource_templates_with_history(base_url, timeout, history):
166
+ request, response, templates_data = await list_resource_templates(base_url, timeout, sampling_handler)
167
+ names = [t["uriTemplate"] for t in templates_data]
168
+ update = gr.update(choices=names, value=None, visible=bool(names))
169
+ empty_msg_update = gr.update(visible=not names)
170
+ history, rendered = _update_history(history, "resources/templates/list", request, response)
171
+ return request, response, update, empty_msg_update, templates_data, history, rendered
172
+
173
+
174
+ async def read_resource_with_history(base_url, timeout, resource_uri, history):
175
+ request, response = await read_resource(base_url, timeout, resource_uri, sampling_handler)
176
+ history, rendered = _update_history(history, "resources/read", request, response)
177
+ return request, response, history, rendered
178
+
179
+
180
+ async def ping_with_history(base_url, timeout, history):
181
+ request, response = await ping_server(base_url, timeout, sampling_handler)
182
+ history, rendered = _update_history(history, "ping", request, response)
183
+ return history, rendered
184
+
185
+
186
+ # Sampling handler that will be set dynamically
187
+ sampling_log = []
188
+
189
+
190
+ async def sampling_handler(
191
+ messages: list[SamplingMessage], params: SamplingParams, context: RequestContext
192
+ ) -> str:
193
+ """Simple sampling handler that logs requests and returns a mock response."""
194
+ # Extract message content
195
+ conversation = []
196
+ for message in messages:
197
+ content = (
198
+ message.content.text
199
+ if hasattr(message.content, "text")
200
+ else str(message.content)
201
+ )
202
+ conversation.append(f"{message.role}: {content}")
203
+
204
+ # Log the sampling request
205
+ log_entry = {
206
+ "messages": conversation,
207
+ "system_prompt": params.systemPrompt if hasattr(params, "systemPrompt") else None,
208
+ "temperature": params.temperature if hasattr(params, "temperature") else None,
209
+ "max_tokens": params.maxTokens if hasattr(params, "maxTokens") else None,
210
+ }
211
+ sampling_log.insert(0, log_entry)
212
+
213
+ # Return a simple response
214
+ return "This is a mock response from the sampling handler. In a real implementation, this would call an LLM API."
215
+
216
+
217
+ def get_sampling_log():
218
+ """Get the current sampling log."""
219
+ if not sampling_log:
220
+ return "No sampling requests yet."
221
+
222
+ log_text = []
223
+ for idx, entry in enumerate(sampling_log[:10], 1): # Show last 10 entries
224
+ log_text.append(f"### Request {idx}")
225
+ log_text.append(f"**Messages:**")
226
+ for msg in entry["messages"]:
227
+ log_text.append(f"- {msg}")
228
+ if entry["system_prompt"]:
229
+ log_text.append(f"**System Prompt:** {entry['system_prompt']}")
230
+ if entry["temperature"] is not None:
231
+ log_text.append(f"**Temperature:** {entry['temperature']}")
232
+ if entry["max_tokens"]:
233
+ log_text.append(f"**Max Tokens:** {entry['max_tokens']}")
234
+ log_text.append("---")
235
+
236
+ return "\n".join(log_text)
237
+
238
+ async def on_resource_select(base_url, timeout, resource_uri, history):
239
+ request, response = await read_resource(base_url, timeout, resource_uri, sampling_handler)
240
+ history, rendered = _update_history(history, "resources/read", request, response)
241
+ return (
242
+ gr.update(value=request, visible=True),
243
+ gr.update(value=response, visible=True),
244
+ gr.update(value=response, visible=True),
245
+ history,
246
+ rendered
247
+ )
mcp_client.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import Tuple
5
+ from contextlib import AsyncExitStack
6
+
7
+ from fastmcp import Client
8
+ from fastmcp.client import SSETransport, StreamableHttpTransport
9
+ import mcp.types as types
10
+ import httpx
11
+
12
+ JsonStrPair = Tuple[str, str]
13
+
14
+ # Global client state
15
+ _client_session: Client | None = None
16
+ _client_exit_stack: AsyncExitStack | None = None
17
+
18
+
19
+ async def connect(base_url: str, timeout_seconds: float, transport_type: str, sampling_handler=None):
20
+ global _client_session, _client_exit_stack
21
+ await disconnect()
22
+
23
+ _client_exit_stack = AsyncExitStack()
24
+
25
+ if transport_type == "Streamable HTTP":
26
+ transport = StreamableHttpTransport(base_url)
27
+ else:
28
+ # Default to SSE
29
+ transport = SSETransport(base_url)
30
+
31
+ client = Client(transport=transport, timeout=timeout_seconds, sampling_handler=sampling_handler)
32
+ # Enter the async context to establish connection
33
+ _client_session = await _client_exit_stack.enter_async_context(client)
34
+ return _client_session
35
+
36
+
37
+ async def disconnect():
38
+ global _client_session, _client_exit_stack
39
+ if _client_exit_stack:
40
+ await _client_exit_stack.aclose()
41
+ _client_session = None
42
+ _client_exit_stack = None
43
+
44
+
45
+ def _get_client() -> Client:
46
+ if _client_session is None:
47
+ raise RuntimeError("Client not connected. Please connect first.")
48
+ return _client_session
49
+
50
+
51
+ async def list_resources(base_url: str, timeout_seconds: float, sampling_handler=None) -> tuple[str, str, list[str]]:
52
+ try:
53
+ client = _get_client()
54
+ resources = await client.list_resources()
55
+ # Convert Pydantic models to list of dicts then to formatted JSON
56
+ resources_data = [r.model_dump() for r in resources]
57
+ return "list_resources()", json.dumps(resources_data, indent=2), resources_data
58
+ except Exception as e:
59
+ return "Error", str(e), []
60
+
61
+
62
+ async def list_resource_templates(base_url: str, timeout_seconds: float, sampling_handler=None) -> tuple[str, str, list[str]]:
63
+ try:
64
+ client = _get_client()
65
+ templates = await client.list_resource_templates()
66
+ templates_data = [t.model_dump() for t in templates]
67
+ return "list_resource_templates()", json.dumps(templates_data, indent=2), templates_data
68
+ except Exception as e:
69
+ return "Error", str(e), []
70
+
71
+
72
+ async def read_resource(base_url: str, timeout_seconds: float, resource_uri: str, sampling_handler=None) -> JsonStrPair:
73
+ if not resource_uri:
74
+ return "", "Select a resource first."
75
+ try:
76
+ client = _get_client()
77
+ resource = await client.read_resource(resource_uri)
78
+ return f"read_resource({resource_uri})", resource.model_dump_json(indent=2)
79
+ except Exception as e:
80
+ return "Error", str(e)
81
+
82
+
83
+ async def list_prompts(base_url: str, timeout_seconds: float, sampling_handler=None) -> tuple[str, str, list[str]]:
84
+ try:
85
+ client = _get_client()
86
+ prompts = await client.list_prompts()
87
+ prompts_data = [p.model_dump() for p in prompts]
88
+ return "list_prompts()", json.dumps(prompts_data, indent=2), prompts_data
89
+ except Exception as e:
90
+ return "Error", str(e), []
91
+
92
+
93
+ async def invoke_prompt(
94
+ base_url: str, timeout_seconds: float, prompt_name: str, arguments_text: str, sampling_handler=None
95
+ ) -> JsonStrPair:
96
+ if not prompt_name:
97
+ return "", "Select a prompt first."
98
+ try:
99
+ args = json.loads(arguments_text) if arguments_text.strip() else {}
100
+ except ValueError as exc:
101
+ return "", f"Invalid JSON arguments: {exc}"
102
+
103
+ try:
104
+ client = _get_client()
105
+ # Use manual request to ensure empty arguments dict is sent (workaround for potential fastmcp/mcp issue)
106
+ result = await client.session.send_request(
107
+ types.GetPromptRequest(
108
+ method="prompts/get",
109
+ params=types.GetPromptRequestParams(
110
+ name=prompt_name,
111
+ arguments=args
112
+ )
113
+ ),
114
+ types.GetPromptResult
115
+ )
116
+ # Extract text from messages
117
+ messages_text = []
118
+ if hasattr(result, "messages"):
119
+ for msg in result.messages:
120
+ if hasattr(msg, "content"):
121
+ content = msg.content
122
+ if hasattr(content, "text"):
123
+ messages_text.append(content.text)
124
+ elif isinstance(content, dict) and "text" in content:
125
+ messages_text.append(content["text"])
126
+
127
+ final_result = "\n\n".join(messages_text) if messages_text else "No text content found in prompt result."
128
+ return f"get_prompt({prompt_name})", final_result
129
+ except Exception as e:
130
+ return "Error", str(e)
131
+
132
+
133
+ async def list_tools(base_url: str, timeout_seconds: float, sampling_handler=None) -> tuple[str, str, list[dict]]:
134
+ try:
135
+ client = _get_client()
136
+ tools = await client.list_tools()
137
+ # Return list of tool dictionaries (including schema)
138
+ tools_data = [t.model_dump() for t in tools]
139
+ return "list_tools()", json.dumps(tools_data, indent=2), tools_data
140
+ except Exception as e:
141
+ return "Error", str(e), []
142
+
143
+
144
+ async def invoke_tool(
145
+ base_url: str, timeout_seconds: float, tool_name: str, arguments_text: str, sampling_handler=None
146
+ ) -> JsonStrPair:
147
+ if not tool_name:
148
+ return "", "Select a tool first."
149
+ try:
150
+ args = json.loads(arguments_text) if arguments_text.strip() else {}
151
+ except ValueError as exc:
152
+ return "", f"Invalid JSON arguments: {exc}"
153
+
154
+ try:
155
+ client = _get_client()
156
+ result = await client.call_tool(tool_name, arguments=args)
157
+
158
+ # Extract content from the result
159
+ content = []
160
+ if hasattr(result, "content"):
161
+ for item in result.content:
162
+ if hasattr(item, "text"):
163
+ content.append(item.text)
164
+ elif isinstance(item, dict) and "text" in item:
165
+ content.append(item["text"])
166
+ else:
167
+ content.append(str(item))
168
+
169
+ final_result = "\n".join(content) if content else "No content returned."
170
+ return f"call_tool({tool_name})", final_result
171
+ except Exception as e:
172
+ return "Error", str(e)
173
+
174
+
175
+ async def ping_server(base_url: str, timeout_seconds: float, sampling_handler=None) -> JsonStrPair:
176
+ try:
177
+ client = _get_client()
178
+ await client.ping()
179
+ return "ping()", json.dumps({"status": "Pong"}, indent=2)
180
+ except Exception as e:
181
+ return "Error", str(e)
182
+
183
+
184
+ async def send_custom_request(
185
+ base_url: str, method: str, params_text: str, timeout_seconds: float
186
+ ) -> JsonStrPair:
187
+ # Fallback to manual httpx for custom requests since FastMCP Client is high-level
188
+ try:
189
+ params = json.loads(params_text) if params_text.strip() else {}
190
+ except ValueError as exc:
191
+ return "", f"Invalid JSON params: {exc}"
192
+
193
+ payload = {
194
+ "jsonrpc": "2.0",
195
+ "id": 1,
196
+ "method": method,
197
+ "params": params,
198
+ }
199
+
200
+ try:
201
+ async with httpx.AsyncClient(timeout=timeout_seconds) as client:
202
+ response = await client.post(base_url, json=payload)
203
+ response.raise_for_status()
204
+ return json.dumps(payload, indent=2), json.dumps(response.json(), indent=2)
205
+ except Exception as exc:
206
+ return json.dumps(payload, indent=2), f"Error: {exc}"
pyproject.toml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "gradio-mcp-inspector"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "fastmcp>=2.13.1",
9
+ "gradio>=6.0.1",
10
+ "httpx>=0.28.1",
11
+ ]
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ fastmcp>=2.13.1
2
+ gradio>=5.0.0
3
+ httpx>=0.28.1
theme.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import Iterable
3
+ import gradio as gr
4
+ from gradio.themes.base import Base
5
+ from gradio.themes.utils import colors, fonts, sizes
6
+
7
+ class CustomLightTheme(Base):
8
+ def __init__(
9
+ self,
10
+ *,
11
+ primary_hue: colors.Color | str = colors.indigo,
12
+ secondary_hue: colors.Color | str = colors.blue,
13
+ neutral_hue: colors.Color | str = colors.slate,
14
+ spacing_size: sizes.Size | str = sizes.spacing_md,
15
+ radius_size: sizes.Size | str = sizes.radius_lg,
16
+ text_size: sizes.Size | str = sizes.text_md,
17
+ font: fonts.Font | str | Iterable[fonts.Font | str] = (
18
+ fonts.GoogleFont("Inter"),
19
+ "ui-sans-serif",
20
+ "system-ui",
21
+ "sans-serif",
22
+ ),
23
+ font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
24
+ fonts.GoogleFont("JetBrains Mono"),
25
+ "ui-monospace",
26
+ "Consolas",
27
+ "monospace",
28
+ ),
29
+ ):
30
+ super().__init__(
31
+ primary_hue=primary_hue,
32
+ secondary_hue=secondary_hue,
33
+ neutral_hue=neutral_hue,
34
+ spacing_size=spacing_size,
35
+ radius_size=radius_size,
36
+ text_size=text_size,
37
+ font=font,
38
+ font_mono=font_mono,
39
+ )
40
+ super().set(
41
+ body_background_fill="white",
42
+ body_background_fill_dark="*neutral_950",
43
+ body_text_color="*neutral_950",
44
+ body_text_color_dark="*neutral_50",
45
+ background_fill_primary="white",
46
+ background_fill_primary_dark="*neutral_900",
47
+ background_fill_secondary="#f9fafb",
48
+ background_fill_secondary_dark="*neutral_800",
49
+ border_color_primary="#e5e7eb",
50
+ border_color_primary_dark="*neutral_700",
51
+ block_background_fill="white",
52
+ block_background_fill_dark="*neutral_900",
53
+ block_label_background_fill="white",
54
+ block_label_background_fill_dark="*neutral_900",
55
+ input_background_fill="white",
56
+ input_background_fill_dark="*neutral_800",
57
+ button_primary_background_fill="*primary_600",
58
+ button_primary_background_fill_hover="*primary_700",
59
+ button_primary_text_color="white",
60
+ button_secondary_background_fill="white",
61
+ button_secondary_background_fill_hover="#f3f4f6",
62
+ button_secondary_text_color="*neutral_800",
63
+ block_title_text_color="*neutral_800",
64
+ block_title_text_color_dark="*neutral_200",
65
+ block_label_text_color="*neutral_500",
66
+ block_label_text_color_dark="*neutral_400",
67
+ input_border_color="#e5e7eb",
68
+ input_border_color_dark="*neutral_700",
69
+ input_border_width="1px",
70
+ input_shadow="0 1px 2px 0 rgb(0 0 0 / 0.05)",
71
+ input_shadow_focus="0 0 0 2px *primary_500",
72
+ )
uv.lock ADDED
The diff for this file is too large to render. See raw diff