Spaces:
Sleeping
Sleeping
File size: 8,867 Bytes
48e84df f4c443a 41a9d8b a612709 f4c443a a612709 f4c443a a612709 f4c443a 41a9d8b a612709 41a9d8b f4c443a 41a9d8b f4c443a a612709 41a9d8b a612709 f4c443a 6598f74 f4c443a 6598f74 f4c443a 6598f74 dc31452 6598f74 f4c443a 6598f74 dc31452 6598f74 f4c443a 6598f74 f4c443a 23ecbc0 41a9d8b f4c443a 23ecbc0 f4c443a dc31452 23ecbc0 f4c443a 23ecbc0 f4c443a 23ecbc0 f84e24f f4c443a 23ecbc0 41a9d8b f4c443a 41a9d8b f4c443a 6598f74 23ecbc0 41a9d8b f4c443a 6598f74 23ecbc0 41a9d8b f4c443a 23ecbc0 f4c443a a612709 f4c443a 41a9d8b f4c443a e31c1a9 f4c443a 41a9d8b f4c443a 23ecbc0 f4c443a 23ecbc0 f4c443a a612709 41a9d8b a612709 48e84df 41a9d8b 48e84df 6598f74 23ecbc0 6598f74 23ecbc0 6598f74 23ecbc0 6598f74 f4c443a 6598f74 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
import threading
import time
import uuid
import gradio as gr
from gradio import ChatMessage
from agents.reg_radar import RegRadarAgent
from tools.llm import stream_llm
class UIHandler:
def __init__(self):
self.agent = RegRadarAgent()
def streaming_chatbot(self, message, history, user_id_state):
"""Process messages with tool visibility and lock input during response generation"""
# Initialize user_id if not set
if not user_id_state:
user_id_state = f"user-{uuid.uuid4().hex[:4]}"
user_id = user_id_state
if not message.strip():
return history, "", gr.update(interactive=True), user_id_state
# Add user message
history.append(ChatMessage(role="user", content=message))
# Start timer
start_time = time.time()
# Disable input box at the start
yield history, "", gr.update(interactive=False), user_id_state
# Detect if this is a regulatory query
is_regulatory = self.agent.is_regulatory_query(message)
if not is_regulatory:
yield from self._handle_general_chat(message, history, user_id_state)
return
yield from self._handle_regulatory_chat(
message, history, user_id_state, user_id, start_time
)
def _handle_general_chat(self, message, history, user_id_state):
"""Handle general (non-regulatory) chat flow with context from conversation history."""
history.append(
ChatMessage(role="assistant", content="💬 Processing general query...")
)
yield history, "", gr.update(interactive=False), user_id_state
# Clear processing message and stream response
history.pop()
streaming_content = ""
history.append(ChatMessage(role="assistant", content=""))
# Gather last 5 user/assistant messages as context
context_msgs = []
for msg in history[-10:]:
if isinstance(msg, dict):
role = msg.get("role")
content = msg.get("content")
else:
role = getattr(msg, "role", None)
content = getattr(msg, "content", None)
if role in ("user", "assistant"):
context_msgs.append(f"{role.capitalize()}: {content}")
context_str = "\n".join(context_msgs[-5:])
# Compose prompt with context
if context_str:
prompt = f"""
You are an expert assistant. Here is the recent conversation history:
{context_str}
Now answer the user's latest message:
{message}
"""
else:
prompt = message
for chunk in stream_llm(prompt):
streaming_content += chunk
history[-1] = ChatMessage(role="assistant", content=streaming_content)
yield history, "", gr.update(interactive=False), user_id_state
# Re-enable input box at the end
yield history, "", gr.update(interactive=True), user_id_state
def _handle_regulatory_chat(
self, message, history, user_id_state, user_id, start_time
):
"""Handle regulatory chat flow."""
# Show tool detection
tool_key, tool_name = self.agent.determine_intended_tool(message)
# Initial processing message with tool info (collapsible)
status_msg = (
f"Using **{tool_name}** to analyze your query (estimated 10-20 seconds)..."
)
history.append(
ChatMessage(
role="assistant",
content=status_msg,
metadata={"title": f"🛠️ Tool Selected: {tool_name}"},
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Extract parameters and process query
params = self.agent.extract_parameters(message)
# Clear status and show parameter extraction (collapsible)
history.pop()
param_msg = self.agent.format_parameter_extraction(params)
history.append(
ChatMessage(
role="assistant",
content=param_msg,
metadata={"title": "📍Parameter Extraction"},
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Show tool execution steps (collapsible)
tool_status = f"""
**Executing {tool_name}...**
⏳ _This process may take 40-90 seconds depending on the number of webpages being crawled._
"""
history.append(
ChatMessage(
role="assistant",
content=tool_status,
metadata={"title": "📢 Tool Execution Status"},
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Process the regulatory query
results = self.agent.process_regulatory_query(message, params, user_id=user_id)
crawl_results = results["crawl_results"]
memory_results = results["memory_results"]
# Show collapsible raw results
if crawl_results["results"]:
collapsible_results = self._format_crawl_results(crawl_results["results"])
history.append(
ChatMessage(
role="assistant",
content=collapsible_results,
metadata={"title": "🌐 Raw Regulatory Data", "status": "done"},
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Display memory results if available
if memory_results:
memory_msg = self._format_memory_results(memory_results)
history.append(
ChatMessage(
role="assistant",
content=memory_msg,
metadata={"title": "💾 Past Memories", "status": "done"},
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Generate final analysis (no metadata, standard message)
history.append(
ChatMessage(
role="assistant", content="📝 **Generating Compliance Report...**"
)
)
yield history, "", gr.update(interactive=False), user_id_state
# Clear generating message and stream final report
history.pop()
streaming_content = ""
history.append(ChatMessage(role="assistant", content=""))
for chunk in self.agent.generate_report(params, crawl_results, memory_results):
streaming_content += chunk
history[-1] = ChatMessage(role="assistant", content=streaming_content)
yield history, "", gr.update(interactive=False), user_id_state
# Show completion time appended to the final report (no metadata)
elapsed = time.time() - start_time
history[-1] = ChatMessage(
role="assistant",
content=streaming_content + f"\n\n✨ Analysis complete ({elapsed:.1f}s).",
)
# Re-enable input box at the end
yield history, "", gr.update(interactive=True), user_id_state
# Save to memory in the background
threading.Thread(
target=self.agent.memory_tools.save_to_memory,
args=(user_id, message, streaming_content),
daemon=True,
).start()
def _format_crawl_results(self, results):
"""Format crawl results for display, removing duplicates by URL."""
seen_urls = set()
results_display = []
count = 0
for result in results:
url = result["url"]
if url in seen_urls:
continue
seen_urls.add(url)
title = result["title"][:100] if result["title"] else "No Title"
count += 1
results_display.append(f"""
**{count}. {result["source"]}**
- Title: {title}...
- URL: {url}
""")
if results_display:
# Only return the content, let Gradio's metadata title handle the dropdown
collapsible_results = "\n".join(results_display)
else:
collapsible_results = "No unique regulatory updates found."
return collapsible_results
def _format_memory_results(self, memory_results):
"""Format memory results for display."""
top_memories = memory_results[:3]
memory_details = ""
for i, mem in enumerate(top_memories, 1):
memory_text = mem.get("memory", "N/A")
memory_details += f"\n**{i}. Memory:** {memory_text[:300]}...\n"
memory_msg = f"Found {len(memory_results)} similar past queries in memory. \nTop 3 shown below:\n{memory_details}"
return memory_msg
def delayed_clear(self, user_id_state):
time.sleep(0.1) # 100ms delay to allow generator cancellation
return [], "", gr.update(interactive=True), user_id_state
|