Cardiosense-AG commited on
Commit
fc9442e
Β·
verified Β·
1 Parent(s): ff730ad

Update pages/02_Workflow_UI.py

Browse files
Files changed (1) hide show
  1. pages/02_Workflow_UI.py +250 -148
pages/02_Workflow_UI.py CHANGED
@@ -1,141 +1,134 @@
1
  # pages/02_Workflow_UI.py
2
- # -----------------------------------------------------------------------------
3
- # Why this change
4
- # -----------------------------------------------------------------------------
5
- # - Aligns the UI with the Phase 2 orchestration:
6
- # * Removed obsolete arguments (e.g., rag_toggles that forced mixing)
7
- # * Keeps generation natural-language for Assessment/Plan (no RAG injection)
8
- # - Adds a compact Explainability hook: if src.explainability.render_explainability_block
9
- # is present, the page will render the post-hoc block under an expander.
10
- # - Keeps logging minimal and UI latency obvious.
11
-
12
  from __future__ import annotations
13
 
14
- import json
 
15
  import time
16
  import uuid
17
- from typing import Any, Dict, List, Optional
18
 
19
  import streamlit as st
20
 
21
- from src.ai_core import generate_soap_draft
22
  from src.prompt_builder import build_referral_summary
 
 
 
 
23
 
24
- # Optional evidence mapping panel (if present)
25
- try:
26
- from src.reasoning_panel import render_reasoning_panel # type: ignore
27
- _HAS_REASONING_PANEL = True
28
- except Exception:
29
- _HAS_REASONING_PANEL = False
30
-
31
- # Explainability provider (render_explainability_block)
32
  try:
33
- from src.explainability import render_explainability_block # type: ignore
34
- _HAS_EXPLAINABILITY = True
35
  except Exception:
36
- _HAS_EXPLAINABILITY = False
37
-
38
-
39
- # --------------------------- Small utilities ---------------------------------
40
-
41
- def _parse_semver(v: str) -> tuple:
42
- try:
43
- parts = v.split(".")
44
- major = int(parts[0]); minor = int(parts[1]); patch = int((parts[2] or "0").split("+")[0])
45
- return (major, minor, patch)
46
- except Exception:
47
- return (0, 0, 0)
48
-
49
-
50
- def _render_timings(t0: float, t1: float, label: str = "Total"):
51
- st.caption(f"{label} runtime: **{t1 - t0:.2f}s**")
52
-
53
-
54
- def _render_json(data: Dict[str, Any], label: str, key: str):
55
- with st.expander(label, expanded=False):
56
- st.json(data, expanded=False)
57
-
58
-
59
- def _render_explainability(soap: Dict[str, Any], mapping: Dict[str, Any], raw_text: str, summary: str):
60
- if not _HAS_EXPLAINABILITY:
61
- st.info("Explainability provider not available.")
62
- return
63
- try:
64
- import inspect
65
- sig = inspect.signature(render_explainability_block)
66
- kwargs = {}
67
- if "soap" in sig.parameters:
68
- kwargs["soap"] = soap
69
- if "mapping" in sig.parameters:
70
- kwargs["mapping"] = mapping
71
- if "raw_text" in sig.parameters:
72
- kwargs["raw_text"] = raw_text
73
- if "summary" in sig.parameters:
74
- kwargs["summary"] = summary
75
- if kwargs:
76
- render_explainability_block(**kwargs)
77
- else:
78
- # Positional fallback: try soap first
79
- render_explainability_block(soap) # type: ignore
80
- except Exception as e:
81
- st.warning(f"Explainability failed: {e}")
82
-
83
-
84
- # ----------------------------- Page layout -----------------------------------
85
-
86
- st.set_page_config(page_title="AI E-Consult β€” Workflow", layout="wide")
 
 
 
 
87
 
88
- st.title("Workflow β€” Intake β†’ Draft β†’ Assessment/Plan")
89
-
90
- # Environment/version hint
91
- try:
92
- import transformers # noqa
93
- ver = getattr(transformers, "__version__", "unknown")
94
- min_needed = (4, 50, 0) # recommended for med-gemma paths + generate kwargs
95
- ok = _parse_semver(ver) >= min_needed
96
- st.caption(
97
- f"transformers detected: {ver} β€” "
98
- + ("βœ… meets recommended minimum." if ok else f"⚠️ recommend β‰₯ {'.'.join(map(str, min_needed))}.")
99
- )
100
- except Exception:
101
- st.caption("transformers not detected.")
102
 
103
- # --------------------------- Intake form -------------------------------------
 
 
104
 
105
- with st.form("intake_form", clear_on_submit=False):
 
106
  c1, c2, c3 = st.columns([1, 1, 2])
107
  with c1:
108
- age = st.number_input("Age", min_value=0, max_value=120, value=63, key="age")
109
- sex = st.selectbox("Sex", ["M", "F", "Other"], index=0, key="sex")
110
  with c2:
111
- chief_complaint = st.text_input("Chief complaint", value="Statin intolerance with high LDL", key="chief_complaint")
112
- question = st.text_input("Consult question", value="How to manage LDL with statin intolerance?", key="question")
113
  with c3:
114
- history = st.text_area("History / Notes", height=120, key="history", value="Stopped rosuvastatin due to myalgias; FHx premature ASCVD. Prior LDL 190.")
115
- medications = st.text_area("Medications", height=80, key="medications", value="Ezetimibe 10 mg daily; rosuvastatin discontinued.")
116
- allergies = st.text_area("Allergies", height=60, key="allergies", value="No known drug allergies.")
117
 
118
- labs = st.text_area("Labs (optional)", height=80, key="labs", value="LDL 172 mg/dL; HDL 38 mg/dL; TG 210 mg/dL; HbA1c 5.8%")
119
- imaging = st.text_area("Imaging (optional)", height=80, key="imaging", value="")
120
  submitted = st.form_submit_button("Generate Draft")
121
 
122
- # Toggles / params
123
- with st.sidebar:
124
- st.header("Generation Options")
125
- max_new_tokens = st.slider("Max new tokens", 256, 1200, 700, step=32)
126
- temperature = st.slider("Temperature", 0.0, 1.2, 0.2, step=0.05)
127
- top_p = st.slider("Top-p", 0.1, 1.0, 0.95, step=0.05)
128
-
129
- st.header("Evidence / Explainability")
130
- explain = st.checkbox("Enable Explainability (post‑hoc)", value=False, key="explainability_toggle")
131
-
132
- st.caption("Explainability is post‑hoc only; no RAG is injected into prompts.")
133
-
134
- # ------------------------- Generate & render ---------------------------------
 
 
 
 
135
 
136
  if submitted:
137
- t0 = time.time()
138
- # Build normalized referral summary for the generator
 
139
  intake = {
140
  "age": age,
141
  "sex": sex,
@@ -148,50 +141,159 @@ if submitted:
148
  "question": question,
149
  }
150
  summary = build_referral_summary(intake)
 
151
 
152
- # Generate SOAP-like JSON (Assessment/Plan natural-language; no RAG in prompt)
153
- soap, raw_text, mapping = generate_soap_draft(
154
  referral_summary=summary,
155
  max_new_tokens=max_new_tokens,
156
  temperature=temperature,
157
  top_p=top_p,
158
- mode="mapping", # keep mapping mode to populate registry; still no RAG in prompt
159
  )
160
  t1 = time.time()
161
 
162
- # Subjective/Objective editors (simple text)
163
- st.subheader("Subjective / Objective")
164
- col1, col2 = st.columns(2)
165
- with col1:
166
- subj = st.text_area("Subjective", value="\n".join(soap.get("subjective", [])), height=180, key="subj_text")
167
- with col2:
168
- obj = st.text_area("Objective", value="\n".join(soap.get("objective", [])), height=180, key="obj_text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- # Assessment & Plan (AI output, editable)
 
171
  st.subheader("Assessment / Plan")
172
- col3, col4 = st.columns(2)
173
- with col3:
174
- assess_text = st.text_area("Assessment (editable)", value="\n".join(soap.get("assessment", [])), height=220, key="assess_text")
175
- with col4:
176
- plan_text = st.text_area("Plan (editable)", value="\n".join(soap.get("plan", [])), height=220, key="plan_text")
177
-
178
- # Evidence mapping (if available)
179
- if _HAS_REASONING_PANEL:
180
- try:
181
- render_reasoning_panel(mapping)
182
- except Exception as e:
183
- st.warning(f"Evidence mapping failed: {e}")
184
-
185
- # Explainability (optional)
186
- if explain:
187
- st.subheader("Explainability")
188
- _render_explainability(soap, mapping, raw_text, summary)
189
-
190
- # Timings
191
- st.caption(f"Generation runtime: **{t1 - t0:.2f}s**")
192
-
193
- # Raw JSON (for debugging/export)
194
- _render_json(soap, "Generated SOAP JSON", key="soap_json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
 
196
 
197
 
 
1
  # pages/02_Workflow_UI.py
 
 
 
 
 
 
 
 
 
 
2
  from __future__ import annotations
3
 
4
+ import hashlib
5
+ import os
6
  import time
7
  import uuid
8
+ from typing import Any, Dict, List
9
 
10
  import streamlit as st
11
 
 
12
  from src.prompt_builder import build_referral_summary
13
+ from src.ai_core import generate_soap_draft
14
+ from src.explainability import compute_referral_tokens_for_section, segment_claims
15
+ from src.guideline_annotator import find_guideline_refs_for_section, assign_endnotes
16
+ from src.log_utils import write_audit_log
17
 
18
+ # Canonical UI helpers from Phase 1A prototype (preferred)
 
 
 
 
 
 
 
19
  try:
20
+ from src.ui_helpers import render_referral_tokens, render_guideline_refs, ensure_dirs # type: ignore
 
21
  except Exception:
22
+ def render_referral_tokens(tokens: List[Dict]) -> None:
23
+ st.write(tokens)
24
+ def render_guideline_refs(refs: List[Dict]) -> None:
25
+ for r in refs:
26
+ st.markdown(f"- [{r.get('n')}] **{r.get('doc')}** β€” p.{r.get('page')}: {r.get('excerpt')}")
27
+ def ensure_dirs(paths: List[str]) -> None:
28
+ import os
29
+ for p in paths:
30
+ os.makedirs(p, exist_ok=True)
31
+
32
+ EXPLAINABILITY_ENABLED = (os.environ.get("EXPLAINABILITY_ENABLED","1").lower() in {"1","true","yes"})
33
+
34
+ PAGE_KEY = "workflow_v3"
35
+
36
+ # ------------------------- helpers -------------------------
37
+
38
+ def _hash_text(s: str) -> str:
39
+ return hashlib.sha256((s or "").strip().encode("utf-8")).hexdigest()
40
+
41
+ def _section_status(ai_text: str, current_text: str) -> Dict[str, Any]:
42
+ ai_h = _hash_text(ai_text)
43
+ cur_h = _hash_text(current_text)
44
+ modified = ai_h != cur_h
45
+ return {"ai_hash": ai_h, "cur_hash": cur_h, "modified": modified}
46
+
47
+ def _banner_for_section(section_name: str, modified: bool):
48
+ if modified:
49
+ st.warning(f"⚠️ **Manual Modification β€” {section_name} referral tokens frozen.**")
50
+ else:
51
+ st.success(f"βœ… **AI‑generated β€” {section_name}**")
52
+
53
+ def _setup_session():
54
+ if PAGE_KEY not in st.session_state:
55
+ st.session_state[PAGE_KEY] = {
56
+ "case_id": None,
57
+ "endnote_registry": {}, # (doc::pX) -> n
58
+ "assessment": {"ai": "", "text": "", "tokens": [], "refs": []},
59
+ "plan": {"ai": "", "text": "", "tokens": [], "refs": []},
60
+ "summary": "",
61
+ "gen_params": {},
62
+ "metrics": {"coverage_percent": 0.0, "evidence_count": 0},
63
+ }
64
+
65
+ def _calc_coverage(section_text: str, refs: List[Dict]) -> float:
66
+ claims = segment_claims(section_text)
67
+ if not claims:
68
+ return 0.0
69
+ # Approximate: any refs -> 100% if single claim, else 50% baseline if multi-claim
70
+ has_any = 1 if refs else 0
71
+ return 100.0 * (has_any / 2) if len(claims) >= 2 else 100.0 * has_any
72
+
73
+ # ------------------------- UI -------------------------
74
+
75
+ st.set_page_config(page_title="AI E‑Consult β€” Explainability Workflow", layout="wide")
76
+ st.title("E‑Consult Workflow β€” Assessment/Plan with Explainability")
77
 
78
+ with st.sidebar:
79
+ st.header("Inputs & Options")
80
+ sample = st.selectbox("Load sample case", ["β€”", "lipids", "ckd_dose"])
81
+ st.caption("Samples populate intake fields; you can edit them.")
82
+ st.divider()
83
+ max_new_tokens = st.slider("Max new tokens", 256, 1200, 700, step=32)
84
+ temperature = st.slider("Temperature", 0.0, 1.2, 0.2, step=0.05)
85
+ top_p = st.slider("Top‑p", 0.1, 1.0, 0.95, step=0.05)
86
+ st.caption("MedGemma generates natural‑language Assessment & Plan (no RAG in prompt).")
 
 
 
 
 
87
 
88
+ st.header("Explainability")
89
+ st.caption("Referral tokens are post‑hoc (E5 similarity) and **never exported** to EMR.")
90
+ st.caption("Guideline refs are post‑hoc via FAISS; you can re‑run them after edits.")
91
 
92
+ # Intake form
93
+ with st.form("intake_form_v3", clear_on_submit=False):
94
  c1, c2, c3 = st.columns([1, 1, 2])
95
  with c1:
96
+ age = st.number_input("Age", min_value=0, max_value=120, value=63, key="age_v3")
97
+ sex = st.selectbox("Sex", ["M", "F", "Other"], index=0, key="sex_v3")
98
  with c2:
99
+ chief_complaint = st.text_input("Chief complaint", value="Statin intolerance with high LDL", key="cc_v3")
100
+ question = st.text_input("Consult question", value="How to manage LDL with statin intolerance?", key="q_v3")
101
  with c3:
102
+ history = st.text_area("History / Notes", height=120, key="hx_v3", value="Stopped rosuvastatin due to myalgias; FHx premature ASCVD. Prior LDL 190.")
103
+ medications = st.text_area("Medications", height=80, key="meds_v3", value="Ezetimibe 10 mg daily; rosuvastatin discontinued.")
104
+ allergies = st.text_area("Allergies", height=60, key="allergies_v3", value="No known drug allergies.")
105
 
106
+ labs = st.text_area("Labs (optional)", height=80, key="labs_v3", value="LDL 172 mg/dL; HDL 38 mg/dL; TG 210 mg/dL; HbA1c 5.8%")
107
+ imaging = st.text_area("Imaging (optional)", height=80, key="img_v3", value="")
108
  submitted = st.form_submit_button("Generate Draft")
109
 
110
+ # Seed samples
111
+ if sample == "lipids":
112
+ st.session_state["cc_v3"] = "Statin intolerance with high LDL"
113
+ st.session_state["q_v3"] = "How to manage LDL with statin intolerance?"
114
+ st.session_state["hx_v3"] = "Prior LDL 190. Myalgias on multiple statins. No ASCVD event. Considering PCSK9."
115
+ st.session_state["meds_v3"] = "Ezetimibe 10 mg daily. CoQ10 OTC."
116
+ st.session_state["labs_v3"] = "LDL 172 mg/dL; HDL 38 mg/dL; TG 210 mg/dL"
117
+ elif sample == "ckd_dose":
118
+ st.session_state["cc_v3"] = "CKD stage 4 β€” medication dosing question"
119
+ st.session_state["q_v3"] = "Adjust DOAC and antibiotics dosing?"
120
+ st.session_state["hx_v3"] = "eGFR 22 mL/min/1.73m2; HTN; T2DM; recent cellulitis."
121
+ st.session_state["meds_v3"] = "Apixaban 5 mg BID; metformin 1,000 mg BID; TMP-SMX DS; lisinopril 20 mg daily."
122
+ st.session_state["labs_v3"] = "Creatinine 2.9 mg/dL; K 4.9 mEq/L; HbA1c 7.8%"
123
+
124
+ # state init
125
+ _setup_session()
126
+ state = st.session_state[PAGE_KEY]
127
 
128
  if submitted:
129
+ case_id = str(uuid.uuid4())
130
+ state["case_id"] = case_id
131
+
132
  intake = {
133
  "age": age,
134
  "sex": sex,
 
141
  "question": question,
142
  }
143
  summary = build_referral_summary(intake)
144
+ state["summary"] = summary
145
 
146
+ t0 = time.time()
147
+ soap, raw_text = generate_soap_draft(
148
  referral_summary=summary,
149
  max_new_tokens=max_new_tokens,
150
  temperature=temperature,
151
  top_p=top_p,
 
152
  )
153
  t1 = time.time()
154
 
155
+ # Prepare AI outputs
156
+ assessment_ai = "\n".join(soap.get("assessment", []))
157
+ plan_ai = "\n".join(soap.get("plan", []))
158
+
159
+ # Compute referral tokens (frozen thereafter)
160
+ assess_tokens = compute_referral_tokens_for_section(assessment_ai, summary, top_n=4, min_weight=0.03)
161
+ plan_tokens = compute_referral_tokens_for_section(plan_ai, summary, top_n=4, min_weight=0.03)
162
+
163
+ # Seed guideline refs (initially for AI text)
164
+ endreg = {}
165
+ assess_refs_raw = find_guideline_refs_for_section(assessment_ai, top_k_per_claim=3, max_refs_per_claim=2)
166
+ endreg, assess_refs = assign_endnotes(endreg, assess_refs_raw)
167
+ plan_refs_raw = find_guideline_refs_for_section(plan_ai, top_k_per_claim=3, max_refs_per_claim=2)
168
+ endreg, plan_refs = assign_endnotes(endreg, plan_refs_raw)
169
+
170
+ # Store in state
171
+ state["endnote_registry"] = endreg
172
+ state["assessment"] = {"ai": assessment_ai, "text": assessment_ai, "tokens": assess_tokens, "refs": assess_refs}
173
+ state["plan"] = {"ai": plan_ai, "text": plan_ai, "tokens": plan_tokens, "refs": plan_refs}
174
+ state["gen_params"] = {"max_new_tokens": max_new_tokens, "temperature": temperature, "top_p": top_p}
175
+
176
+ # Metrics
177
+ coverage = _calc_coverage(assessment_ai, assess_refs) # simple proxy
178
+ evidence_count = len(assess_refs) + len(plan_refs)
179
+ state["metrics"]["coverage_percent"] = coverage
180
+ state["metrics"]["evidence_count"] = evidence_count
181
+
182
+ # Auto-write audit snapshot
183
+ try:
184
+ from transformers import __version__ as tx_ver # type: ignore
185
+ except Exception:
186
+ tx_ver = "unknown"
187
+
188
+ payload = {
189
+ "runtime_sec": (t1 - t0),
190
+ "sections": {
191
+ "assessment": {"tokens": state["assessment"]["tokens"], "refs": state["assessment"]["refs"]},
192
+ "plan": {"tokens": state["plan"]["tokens"], "refs": state["plan"]["refs"]}
193
+ },
194
+ "coverage_percent": coverage,
195
+ "evidence_count": evidence_count,
196
+ "section_edit_flags": {"assessment": False, "plan": False},
197
+ "explainability_mode": "posthoc_sim",
198
+ "model_id": "env:MODEL_ID",
199
+ "transformers_version": tx_ver,
200
+ "embedding_model_id": "intfloat/e5-base-v2",
201
+ "index_build_meta": {"created_at": None, "num_chunks": None},
202
+ "gpu_cuda": False,
203
+ "gen_params": state["gen_params"],
204
+ }
205
+ try:
206
+ fp = write_audit_log(case_id, payload)
207
+ st.caption(f"Audit snapshot saved: `{fp}`")
208
+ except Exception as e:
209
+ st.caption(f"Audit log skipped: {e}")
210
 
211
+ # Render editors and explainability if we have state
212
+ if state["assessment"]["ai"]:
213
  st.subheader("Assessment / Plan")
214
+ colA, colB = st.columns(2)
215
+
216
+ # ---------------- Assessment ----------------
217
+ with colA:
218
+ st.markdown("### Assessment")
219
+ assessment_text = st.text_area("Edit assessment", value=state["assessment"]["text"], height=220, key="assess_text_v3")
220
+ state["assessment"]["text"] = assessment_text
221
+ status = _section_status(state["assessment"]["ai"], assessment_text)
222
+ _banner_for_section("Assessment", status["modified"])
223
+
224
+ if EXPLAINABILITY_ENABLED:
225
+ with st.expander("Explainability β€” Assessment", expanded=False):
226
+ st.caption("Referral cues (AI provenance)")
227
+ render_referral_tokens(state["assessment"]["tokens"]) # frozen tokens
228
+ st.caption("Guideline references")
229
+ if status["modified"]:
230
+ if st.button("Re-run Guidelines (Assessment)", key="rerun_assess"):
231
+ raw_refs = find_guideline_refs_for_section(assessment_text, top_k_per_claim=3, max_refs_per_claim=2)
232
+ new_reg, numbered = assign_endnotes(state["endnote_registry"], raw_refs)
233
+ state["endnote_registry"] = new_reg
234
+ state["assessment"]["refs"] = numbered
235
+ render_guideline_refs(state["assessment"]["refs"])
236
+
237
+ # ---------------- Plan ----------------
238
+ with colB:
239
+ st.markdown("### Plan")
240
+ plan_text = st.text_area("Edit plan", value=state["plan"]["text"], height=220, key="plan_text_v3")
241
+ state["plan"]["text"] = plan_text
242
+ status_p = _section_status(state["plan"]["ai"], plan_text)
243
+ _banner_for_section("Plan", status_p["modified"])
244
+
245
+ if EXPLAINABILITY_ENABLED:
246
+ with st.expander("Explainability β€” Plan", expanded=False):
247
+ st.caption("Referral cues (AI provenance)")
248
+ render_referral_tokens(state["plan"]["tokens"]) # frozen tokens
249
+ st.caption("Guideline references")
250
+ if status_p["modified"]:
251
+ if st.button("Re-run Guidelines (Plan)", key="rerun_plan"):
252
+ raw_refs = find_guideline_refs_for_section(plan_text, top_k_per_claim=3, max_refs_per_claim=2)
253
+ new_reg, numbered = assign_endnotes(state["endnote_registry"], raw_refs)
254
+ state["endnote_registry"] = new_reg
255
+ state["plan"]["refs"] = numbered
256
+ render_guideline_refs(state["plan"]["refs"])
257
+
258
+ # References block
259
+ if state["endnote_registry"]:
260
+ st.divider()
261
+ st.markdown("### References")
262
+ # Render combined unique refs by number
263
+ combined = { (r["n"], r["doc"], r["page"]): r for r in state["assessment"]["refs"] + state["plan"]["refs"] }
264
+ for key in sorted(combined.keys()):
265
+ r = combined[key]
266
+ st.markdown(f"[{r['n']}] **{r['doc']}** β€” p.{r['page']}")
267
+
268
+ # Save snapshot
269
+ st.divider()
270
+ if st.button("Save Audit Snapshot", key="save_snapshot_v3"):
271
+ flags = {
272
+ "assessment": _section_status(state['assessment']['ai'], state['assessment']['text'])["modified"],
273
+ "plan": _section_status(state['plan']['ai'], state['plan']['text'])["modified"],
274
+ }
275
+ cov = state["metrics"]["coverage_percent"]
276
+ payload = {
277
+ "runtime_sec": None,
278
+ "sections": {
279
+ "assessment": {"tokens": state["assessment"]["tokens"], "refs": state["assessment"]["refs"]},
280
+ "plan": {"tokens": state["plan"]["tokens"], "refs": state["plan"]["refs"]}
281
+ },
282
+ "coverage_percent": cov,
283
+ "evidence_count": len(state["assessment"]["refs"]) + len(state["plan"]["refs"]),
284
+ "section_edit_flags": flags,
285
+ "explainability_mode": "posthoc_sim",
286
+ "model_id": "env:MODEL_ID",
287
+ "transformers_version": "auto",
288
+ "embedding_model_id": "intfloat/e5-base-v2",
289
+ "index_build_meta": {"created_at": None, "num_chunks": None},
290
+ "gpu_cuda": False,
291
+ "gen_params": state["gen_params"],
292
+ }
293
+ case_id = state["case_id"] or str(uuid.uuid4())
294
+ fp = write_audit_log(case_id, payload)
295
+ st.success(f"Snapshot saved to `{fp}`")
296
+
297
 
298
 
299