Chae commited on
Commit
78a3087
·
1 Parent(s): 5dd05ec

feat: minimal working inplace feedback

Browse files
Files changed (5) hide show
  1. __pycache__/cards.cpython-311.pyc +0 -0
  2. cards.py +2 -12
  3. chat.py +9 -8
  4. inplace_chat.py +182 -0
  5. streamlit_app.py +12 -17
__pycache__/cards.cpython-311.pyc CHANGED
Binary files a/__pycache__/cards.cpython-311.pyc and b/__pycache__/cards.cpython-311.pyc differ
 
cards.py CHANGED
@@ -6,15 +6,5 @@ def chat_card():
6
  # st.chat_message("assistant").write("Hello, user!")
7
  # st.chat_input("Type something")
8
 
9
- # def status_card():
10
- # st.page_link("status.py", label="Status", icon=":material/error:")
11
- # cols = st.columns(2)
12
- # cols[0].error("Error")
13
- # cols[0].warning("Warning")
14
- # cols[1].info("Info")
15
- # cols[1].success("Success")
16
- # cols = st.columns(2)
17
- # if cols[0].button("Toast!"):
18
- # st.toast("Toast message", icon=":material/notifications:")
19
- # if cols[1].button("Balloons!"):
20
- # st.balloons()
 
6
  # st.chat_message("assistant").write("Hello, user!")
7
  # st.chat_input("Type something")
8
 
9
+ def inplace_chat_card():
10
+ st.page_link("inplace_chat.py", label="In-place Feedback Chat", icon=":material/edit_document:")
 
 
 
 
 
 
 
 
 
 
chat.py CHANGED
@@ -4,7 +4,7 @@ from huggingface_hub import InferenceClient
4
  import os
5
  import json
6
 
7
- st.header("In-place feedback demo")
8
 
9
  # import the inference client
10
  hf_token = os.getenv("HF_TOKEN")
@@ -27,13 +27,14 @@ if "chat_history" not in st.session_state:
27
  for message in st.session_state.chat_history:
28
  with st.chat_message(message["role"]):
29
  st.markdown(message["content"])
30
- if "reasoning" in message:
31
- with st.expander("Show reasoning"):
32
- st.markdown(message["reasoning"])
33
 
34
  if prompt := st.chat_input("Send a message"):
35
  # add message to chat history
36
  st.session_state.chat_history.append({"role": "user", "content": prompt})
 
37
  # show message
38
  with st.chat_message("user"):
39
  st.markdown(prompt) # for markdown of user text
@@ -63,10 +64,10 @@ if prompt := st.chat_input("Send a message"):
63
  st.markdown(reply)
64
  print(reply)
65
 
66
- reasoning = getattr(response.choices[0], "reasoning", None)
67
- if reasoning:
68
- with st.expander("Show reasoning"):
69
- st.markdown(reasoning)
70
 
71
  # Add assistant response to chat history
72
  st.session_state.chat_history.append(
 
4
  import os
5
  import json
6
 
7
+ st.header("Multi-turn feedback chatbot demo")
8
 
9
  # import the inference client
10
  hf_token = os.getenv("HF_TOKEN")
 
27
  for message in st.session_state.chat_history:
28
  with st.chat_message(message["role"]):
29
  st.markdown(message["content"])
30
+ # if "reasoning" in message:
31
+ # with st.expander("Show reasoning"):
32
+ # st.markdown(message["reasoning"])
33
 
34
  if prompt := st.chat_input("Send a message"):
35
  # add message to chat history
36
  st.session_state.chat_history.append({"role": "user", "content": prompt})
37
+
38
  # show message
39
  with st.chat_message("user"):
40
  st.markdown(prompt) # for markdown of user text
 
64
  st.markdown(reply)
65
  print(reply)
66
 
67
+ # reasoning = getattr(response.choices[0], "reasoning", None)
68
+ # if reasoning:
69
+ # with st.expander("Show reasoning"):
70
+ # st.markdown(reasoning)
71
 
72
  # Add assistant response to chat history
73
  st.session_state.chat_history.append(
inplace_chat.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # inplace_chat.py
2
+ import os
3
+ import json
4
+ import difflib
5
+ import streamlit as st
6
+ from huggingface_hub import InferenceClient
7
+
8
+ # === Page header ===
9
+ st.header("In-place feedback chatbot demo")
10
+
11
+ # === Theme primary color (from .streamlit/config.toml) ===
12
+ PRIMARY = "#cb785c" # keep in sync with [theme] primaryColor
13
+
14
+ # HF Inference client
15
+ hf_token = os.getenv("HF_TOKEN")
16
+ client = InferenceClient(model="openai/gpt-oss-20b", token=hf_token)
17
+
18
+ ####### initialize session states ######
19
+ if "chat_history" not in st.session_state:
20
+ st.session_state.chat_history = [] # [{"role": "user", ...}, {"role": "assistant", ...}]
21
+ if "original_user_prompt" not in st.session_state:
22
+ st.session_state.original_user_prompt = ""
23
+ if "editable" not in st.session_state:
24
+ st.session_state.editable = False
25
+ if "prev_text" not in st.session_state:
26
+ st.session_state.prev_text = ""
27
+ if "edited_text" not in st.session_state:
28
+ st.session_state.edited_text = ""
29
+ if "prev_msgs" not in st.session_state:
30
+ st.session_state.prev_msgs = []
31
+
32
+ # === Utility: compact "removed vs added" summary for the inplace_prefix ===
33
+ ### later separate this into parse_diff.py
34
+ def summarize_edit(old: str, new: str) -> tuple[str, str]:
35
+ """
36
+ Produce coarse 'removed_text' and 'edited_text' by joining all deletions and insertions.
37
+ Good enough for a single bullet like: - Replaced {removed_text} with {edited_text}
38
+ """
39
+ sm = difflib.SequenceMatcher(a=old, b=new)
40
+ removed_chunks, added_chunks = [], []
41
+ for tag, i1, i2, j1, j2 in sm.get_opcodes():
42
+ if tag in ("delete", "replace"):
43
+ chunk = old[i1:i2].strip()
44
+ if chunk:
45
+ removed_chunks.append(chunk)
46
+ if tag in ("insert", "replace"):
47
+ chunk = new[j1:j2].strip()
48
+ if chunk:
49
+ added_chunks.append(chunk)
50
+ removed_text = " / ".join(removed_chunks) if removed_chunks else "(none)"
51
+ edited_text = " / ".join(added_chunks) if added_chunks else "(none)"
52
+ return removed_text, edited_text
53
+
54
+ # === Render current conversation ===
55
+ for i, msg in enumerate(st.session_state.chat_history):
56
+ # Hide the last assistant message while the editor is open
57
+ if st.session_state.editable and i == len(st.session_state.chat_history) - 1 and msg["role"] == "assistant":
58
+ continue
59
+ with st.chat_message(msg["role"]):
60
+ st.markdown(msg["content"])
61
+
62
+ if prompt := st.chat_input("Send a message"):
63
+ st.session_state.original_user_prompt = prompt
64
+ st.session_state.chat_history.append({"role": "user", "content": prompt})
65
+ with st.chat_message("user"):
66
+ st.markdown(prompt)
67
+
68
+ messages_for_api = [
69
+ {"role": "system", "content": "You are a helpful assistant."},
70
+ {"role": "user", "content": prompt},
71
+ ]
72
+
73
+ with st.chat_message("assistant"):
74
+ with st.spinner("Thinking…"):
75
+ try:
76
+ response = client.chat_completion(
77
+ messages=messages_for_api,
78
+ max_tokens=60000,
79
+ temperature=0.7,
80
+ top_p=0.95,
81
+ stream=False,
82
+ )
83
+
84
+ reply = response.choices[0].message["content"]
85
+ st.markdown(reply)
86
+ st.session_state.chat_history.append(
87
+ {"role": "assistant", "content": reply}
88
+ )
89
+ st.session_state.prev_text = reply
90
+ # save previous messages for reference
91
+ st.session_state.prev_msgs.append(reply)
92
+
93
+ except Exception as e:
94
+ st.error(f"Error: {e}")
95
+
96
+ # Edit features
97
+ if st.session_state.chat_history and st.session_state.chat_history[-1]["role"] == "assistant":
98
+ # button
99
+ with st.chat_message("assistant"):
100
+ col_spacer, col_edit = st.columns([0.93, 0.07])
101
+ with col_edit:
102
+ if st.button("✏️", key="edit_btn", help="Edit response"):
103
+ st.session_state.editable = True
104
+
105
+ ######## Edit mode #########
106
+ if st.session_state.editable:
107
+ st.markdown('<div class="editable-on">', unsafe_allow_html=True)
108
+ with st.chat_message("assistant"):
109
+ st.caption("Editing the last response…")
110
+ st.session_state.edited_text = st.text_area(
111
+ " ",
112
+ value=st.session_state.prev_text,
113
+ height=500,
114
+ label_visibility="collapsed",
115
+ key="edit_textarea",
116
+ )
117
+ # Bottom-right finish button
118
+ right = st.columns([1, 0.25])[1]
119
+ with right:
120
+ finished_edit = st.button(
121
+ "Finish edit",
122
+ icon=":material/edit:",
123
+ type="primary",
124
+ use_container_width=True,
125
+ key="finish_edit_btn",
126
+ )
127
+ st.markdown("</div>", unsafe_allow_html=True)
128
+ else:
129
+ finished_edit = False
130
+
131
+ # === Handle edit submission (backend: in-place continuation) ===
132
+ if finished_edit:
133
+ st.session_state.chat_history.pop()
134
+ removed_text, added_text = summarize_edit(st.session_state.prev_text, st.session_state.edited_text)
135
+
136
+ system_prompt = "The user edited your previous answer. Please continue reasoning from the edited version below."
137
+ original_user_prompt = st.session_state.original_user_prompt
138
+
139
+ inplace_prefix = (
140
+ "This is the previous user correction to the reasoning step:\n"
141
+ f"- Replaced {json.dumps(removed_text)} with {json.dumps(added_text)}\n"
142
+ "---\n"
143
+ f"{st.session_state.edited_text}\n"
144
+ "### Further reasoning and validation\n"
145
+ )
146
+
147
+ # Exit edit mode before generation
148
+ st.session_state.editable = False
149
+
150
+ with st.chat_message("assistant"):
151
+ with st.spinner("Continuing from your edit…"):
152
+ try:
153
+ resp = client.chat_completion(
154
+ messages=[
155
+ {"role": "system", "content": system_prompt},
156
+ {
157
+ "role": "user",
158
+ "content": original_user_prompt
159
+ + "\n\nUser has edited your previous response. Continue from the corrected text below:\n\n"
160
+ + inplace_prefix
161
+ },
162
+ ],
163
+ max_tokens=60000,
164
+ temperature=0.7,
165
+ top_p=0.95,
166
+ )
167
+
168
+ generated = resp.choices[0].message["content"]
169
+ st.session_state.prev_text = generated
170
+ print("generated text after feedback")
171
+ print(generated)
172
+ st.session_state.chat_history.append(
173
+ {"role": "assistant", "content": generated}
174
+ )
175
+ st.rerun()
176
+
177
+ except Exception as e:
178
+ st.error(f"Error while continuing from edit: {e}")
179
+ st.session_state.chat_history.append(
180
+ {"role": "assistant", "content": st.session_state.prev_text}
181
+ )
182
+ st.session_state.editable = False
streamlit_app.py CHANGED
@@ -12,21 +12,25 @@ from cards import (
12
  # media_card,
13
  # layouts_card,
14
  chat_card,
 
15
  # status_card
16
  )
17
 
18
  if "init" not in st.session_state:
19
- # st.session_state.chart_data = pd.DataFrame(
20
- # np.random.randn(20, 3), columns=["a", "b", "c"]
21
- # )
22
- # st.session_state.map_data = pd.DataFrame(
23
- # np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
24
- # columns=["lat", "lon"],
25
- # )
26
  st.session_state.init = True
27
 
28
 
29
  pages = [
 
 
 
 
 
 
 
 
 
 
30
  # st.Page(
31
  # "home.py",
32
  # title="Home",
@@ -62,11 +66,6 @@ pages = [
62
  # title="Layouts",
63
  # icon=":material/dashboard:"
64
  # ),
65
- st.Page(
66
- "chat.py",
67
- title="Chat",
68
- icon=":material/chat:"
69
- ),
70
  # st.Page(
71
  # "status.py",
72
  # title="Status",
@@ -78,11 +77,7 @@ page = st.navigation(pages)
78
  page.run()
79
 
80
  with st.sidebar.container(height=310):
81
- if page.title == "Chat":
82
- chat_card()
83
- else:
84
- st.page_link("home.py", label="Home", icon=":material/home:")
85
- st.write("Welcome to the home page!")
86
 
87
  st.sidebar.caption(
88
  "This app uses [Space Grotesk](https://fonts.google.com/specimen/Space+Grotesk) "
 
12
  # media_card,
13
  # layouts_card,
14
  chat_card,
15
+ inplace_chat_card,
16
  # status_card
17
  )
18
 
19
  if "init" not in st.session_state:
 
 
 
 
 
 
 
20
  st.session_state.init = True
21
 
22
 
23
  pages = [
24
+ st.Page(
25
+ "inplace_chat.py",
26
+ title="In-place Feedback Chat",
27
+ icon=":material/edit_document:"
28
+ ),
29
+ st.Page(
30
+ "chat.py",
31
+ title="Chat",
32
+ icon=":material/chat:"
33
+ ),
34
  # st.Page(
35
  # "home.py",
36
  # title="Home",
 
66
  # title="Layouts",
67
  # icon=":material/dashboard:"
68
  # ),
 
 
 
 
 
69
  # st.Page(
70
  # "status.py",
71
  # title="Status",
 
77
  page.run()
78
 
79
  with st.sidebar.container(height=310):
80
+ st.write("This is a simple demo for CSED499 Inplace feedback.")
 
 
 
 
81
 
82
  st.sidebar.caption(
83
  "This app uses [Space Grotesk](https://fonts.google.com/specimen/Space+Grotesk) "