Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,85 +5,70 @@ import pandas as pd
|
|
| 5 |
import io
|
| 6 |
import base64
|
| 7 |
import math
|
| 8 |
-
import ast
|
| 9 |
import logging
|
| 10 |
import numpy as np
|
| 11 |
import plotly.graph_objects as go
|
| 12 |
import asyncio
|
| 13 |
-
import
|
| 14 |
|
| 15 |
# Set up logging
|
| 16 |
logging.basicConfig(level=logging.DEBUG)
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
| 19 |
-
# Function to safely parse JSON
|
| 20 |
def parse_input(json_input):
|
| 21 |
logger.debug("Attempting to parse input: %s", json_input)
|
| 22 |
try:
|
| 23 |
-
# Try to parse as JSON first
|
| 24 |
data = json.loads(json_input)
|
| 25 |
logger.debug("Successfully parsed as JSON")
|
| 26 |
return data
|
| 27 |
except json.JSONDecodeError as e:
|
| 28 |
-
logger.error("JSON parsing failed: %s
|
| 29 |
-
raise ValueError(f"Malformed
|
| 30 |
|
| 31 |
-
# Function to ensure a value is a float
|
| 32 |
def ensure_float(value):
|
| 33 |
if value is None:
|
| 34 |
-
|
| 35 |
-
|
|
|
|
| 36 |
if isinstance(value, str):
|
| 37 |
try:
|
| 38 |
return float(value)
|
| 39 |
except ValueError:
|
| 40 |
-
logger.error("
|
| 41 |
-
return 0.0
|
| 42 |
-
|
| 43 |
-
return float(value)
|
| 44 |
-
return 0.0 # Default for any other type
|
| 45 |
|
| 46 |
-
# Function to get
|
| 47 |
def get_token(entry):
|
| 48 |
-
|
| 49 |
-
if token == "Unknown":
|
| 50 |
-
logger.warning("Missing 'token' key for entry: %s, using 'Unknown'", entry)
|
| 51 |
-
return token
|
| 52 |
|
| 53 |
# Function to create an empty Plotly figure
|
| 54 |
def create_empty_figure(title):
|
| 55 |
return go.Figure().update_layout(title=title, xaxis_title="", yaxis_title="", showlegend=False)
|
| 56 |
|
| 57 |
-
#
|
| 58 |
async def precompute_chunk(json_input, chunk_size, current_chunk):
|
| 59 |
try:
|
| 60 |
data = parse_input(json_input)
|
| 61 |
content = data.get("content", []) if isinstance(data, dict) else data
|
| 62 |
if not isinstance(content, list):
|
| 63 |
-
raise ValueError("Content must be a list
|
| 64 |
|
| 65 |
tokens = []
|
| 66 |
logprobs = []
|
| 67 |
top_alternatives = []
|
| 68 |
for entry in content:
|
| 69 |
if not isinstance(entry, dict):
|
| 70 |
-
logger.warning("Skipping non-dictionary entry: %s", entry)
|
| 71 |
continue
|
| 72 |
logprob = ensure_float(entry.get("logprob", None))
|
| 73 |
-
if logprob >= -100000:
|
| 74 |
tokens.append(get_token(entry))
|
| 75 |
logprobs.append(logprob)
|
| 76 |
-
top_probs = entry.get("top_logprobs", {})
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
top_probs = {}
|
| 80 |
-
finite_top_probs = []
|
| 81 |
-
for key, value in top_probs.items():
|
| 82 |
-
float_value = ensure_float(value)
|
| 83 |
-
if float_value is not None and math.isfinite(float_value):
|
| 84 |
-
finite_top_probs.append((key, float_value))
|
| 85 |
-
sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
|
| 86 |
-
top_alternatives.append(sorted_probs)
|
| 87 |
|
| 88 |
if not tokens or not logprobs:
|
| 89 |
return None, None, None
|
|
@@ -94,502 +79,165 @@ async def precompute_chunk(json_input, chunk_size, current_chunk):
|
|
| 94 |
if start_idx >= len(tokens):
|
| 95 |
return None, None, None
|
| 96 |
|
| 97 |
-
|
| 98 |
-
paginated_logprobs = logprobs[start_idx:end_idx]
|
| 99 |
-
paginated_alternatives = top_alternatives[start_idx:end_idx]
|
| 100 |
-
|
| 101 |
-
return paginated_tokens, paginated_logprobs, paginated_alternatives
|
| 102 |
except Exception as e:
|
| 103 |
logger.error("Precomputation failed for chunk %d: %s", current_chunk + 1, str(e))
|
| 104 |
return None, None, None
|
| 105 |
|
| 106 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
def visualize_logprobs(json_input, chunk=0, chunk_size=100):
|
| 108 |
try:
|
| 109 |
-
# Parse the input (handles JSON only)
|
| 110 |
data = parse_input(json_input)
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
if not isinstance(content, list):
|
| 116 |
-
raise ValueError("Content must be a list of entries")
|
| 117 |
-
elif isinstance(data, list):
|
| 118 |
-
content = data # Handle direct list input (though only JSON is expected)
|
| 119 |
-
else:
|
| 120 |
-
raise ValueError("Input must be a dictionary with 'content' key or a list of entries")
|
| 121 |
-
|
| 122 |
-
# Extract tokens, log probs, and top alternatives, skipping non-finite values with fixed filter of -100000
|
| 123 |
tokens = []
|
| 124 |
logprobs = []
|
| 125 |
-
top_alternatives = []
|
| 126 |
for entry in content:
|
| 127 |
if not isinstance(entry, dict):
|
| 128 |
-
logger.warning("Skipping non-dictionary entry: %s", entry)
|
| 129 |
continue
|
| 130 |
logprob = ensure_float(entry.get("logprob", None))
|
| 131 |
-
if logprob >= -100000:
|
| 132 |
tokens.append(get_token(entry))
|
| 133 |
logprobs.append(logprob)
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
top_probs = {} # Default to empty dict for None
|
| 139 |
-
# Ensure all values in top_logprobs are floats and create a list of tuples
|
| 140 |
-
finite_top_probs = []
|
| 141 |
-
for key, value in top_probs.items():
|
| 142 |
-
float_value = ensure_float(value)
|
| 143 |
-
if float_value is not None and math.isfinite(float_value):
|
| 144 |
-
finite_top_probs.append((key, float_value))
|
| 145 |
-
# Sort by log probability (descending) to get all alternatives
|
| 146 |
-
sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
|
| 147 |
-
top_alternatives.append(sorted_probs) # Store all alternatives, dynamic length
|
| 148 |
-
else:
|
| 149 |
-
logger.debug("Skipping entry with logprob: %s (type: %s)", entry.get("logprob"), type(entry.get("logprob", None)))
|
| 150 |
-
|
| 151 |
-
# Check if there's valid data after filtering
|
| 152 |
if not logprobs or not tokens:
|
| 153 |
-
return (create_empty_figure("Log Probabilities
|
| 154 |
|
| 155 |
-
# Paginate data for chunks of 100 tokens
|
| 156 |
total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
|
| 157 |
start_idx = chunk * chunk_size
|
| 158 |
end_idx = min((chunk + 1) * chunk_size, len(logprobs))
|
| 159 |
paginated_tokens = tokens[start_idx:end_idx]
|
| 160 |
paginated_logprobs = logprobs[start_idx:end_idx]
|
| 161 |
-
paginated_alternatives = top_alternatives[start_idx:end_idx]
|
| 162 |
|
| 163 |
-
#
|
| 164 |
main_fig = go.Figure()
|
| 165 |
main_fig.add_trace(go.Scatter(x=list(range(len(paginated_logprobs))), y=paginated_logprobs, mode='markers+lines', name='Log Prob', marker=dict(color='blue')))
|
| 166 |
-
main_fig.update_layout(
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
)
|
| 173 |
-
main_fig.update_traces(
|
| 174 |
-
customdata=[f"Token: {tok}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, prob) in enumerate(zip(paginated_tokens, paginated_logprobs))],
|
| 175 |
-
hovertemplate='<b>%{customdata}</b><extra></extra>'
|
| 176 |
-
)
|
| 177 |
-
|
| 178 |
-
# 2. Probability Drop Analysis (Interactive Plotly)
|
| 179 |
-
if len(paginated_logprobs) < 2:
|
| 180 |
-
drops_fig = create_empty_figure("Significant Probability Drops (Chunk %d)" % (chunk + 1))
|
| 181 |
-
else:
|
| 182 |
drops = [paginated_logprobs[i+1] - paginated_logprobs[i] for i in range(len(paginated_logprobs)-1)]
|
| 183 |
-
drops_fig = go.Figure()
|
| 184 |
drops_fig.add_trace(go.Bar(x=list(range(len(drops))), y=drops, name='Drop', marker_color='red'))
|
| 185 |
-
drops_fig.update_layout(
|
| 186 |
-
|
| 187 |
-
xaxis_title="Token Position (within chunk)",
|
| 188 |
-
yaxis_title="Log Probability Drop",
|
| 189 |
-
hovermode="closest",
|
| 190 |
-
clickmode='event+select'
|
| 191 |
-
)
|
| 192 |
-
drops_fig.update_traces(
|
| 193 |
-
customdata=[f"Drop: {drop:.4f}, From: {paginated_tokens[i]} to {paginated_tokens[i+1]}, Position: {i+start_idx}" for i, drop in enumerate(drops)],
|
| 194 |
-
hovertemplate='<b>%{customdata}</b><extra></extra>'
|
| 195 |
-
)
|
| 196 |
-
|
| 197 |
-
# Create DataFrame for the table with dynamic top_logprobs
|
| 198 |
-
table_data = []
|
| 199 |
-
max_alternatives = max(len(alts) for alts in paginated_alternatives) if paginated_alternatives else 0
|
| 200 |
-
for i, entry in enumerate(content[start_idx:end_idx]):
|
| 201 |
-
if not isinstance(entry, dict):
|
| 202 |
-
continue
|
| 203 |
-
logprob = ensure_float(entry.get("logprob", None))
|
| 204 |
-
if logprob >= -100000 and "top_logprobs" in entry: # Include all entries with default 0.0
|
| 205 |
-
token = get_token(entry)
|
| 206 |
-
top_logprobs = entry.get("top_logprobs", {})
|
| 207 |
-
if top_logprobs is None:
|
| 208 |
-
logger.debug("top_logprobs is None for token: %s, using empty dict", token)
|
| 209 |
-
top_logprobs = {} # Default to empty dict for None
|
| 210 |
-
# Ensure all values in top_logprobs are floats
|
| 211 |
-
finite_top_probs = []
|
| 212 |
-
for key, value in top_logprobs.items():
|
| 213 |
-
float_value = ensure_float(value)
|
| 214 |
-
if float_value is not None and math.isfinite(float_value):
|
| 215 |
-
finite_top_probs.append((key, float_value))
|
| 216 |
-
# Sort by log probability (descending)
|
| 217 |
-
sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
|
| 218 |
-
row = [token, f"{logprob:.4f}"]
|
| 219 |
-
for alt_token, alt_logprob in sorted_probs[:max_alternatives]: # Use max number of alternatives
|
| 220 |
-
row.append(f"{alt_token}: {alt_logprob:.4f}")
|
| 221 |
-
# Pad with empty strings if fewer alternatives than max
|
| 222 |
-
while len(row) < 2 + max_alternatives:
|
| 223 |
-
row.append("")
|
| 224 |
-
table_data.append(row)
|
| 225 |
-
|
| 226 |
-
df = (
|
| 227 |
-
pd.DataFrame(
|
| 228 |
-
table_data,
|
| 229 |
-
columns=["Token", "Log Prob"] + [f"Alt {i+1}" for i in range(max_alternatives)],
|
| 230 |
-
)
|
| 231 |
-
if table_data
|
| 232 |
-
else None
|
| 233 |
-
)
|
| 234 |
-
|
| 235 |
-
# Generate colored text (for the current chunk)
|
| 236 |
-
if paginated_logprobs:
|
| 237 |
-
min_logprob = min(paginated_logprobs)
|
| 238 |
-
max_logprob = max(paginated_logprobs)
|
| 239 |
-
if max_logprob == min_logprob:
|
| 240 |
-
normalized_probs = [0.5] * len(paginated_logprobs)
|
| 241 |
-
else:
|
| 242 |
-
normalized_probs = [
|
| 243 |
-
(lp - min_logprob) / (max_logprob - min_logprob) for lp in paginated_logprobs
|
| 244 |
-
]
|
| 245 |
-
|
| 246 |
-
colored_text = ""
|
| 247 |
-
for i, (token, norm_prob) in enumerate(zip(paginated_tokens, normalized_probs)):
|
| 248 |
-
r = int(255 * (1 - norm_prob)) # Red for low confidence
|
| 249 |
-
g = int(255 * norm_prob) # Green for high confidence
|
| 250 |
-
b = 0
|
| 251 |
-
color = f"rgb({r}, {g}, {b})"
|
| 252 |
-
colored_text += f'<span style="color: {color}; font-weight: bold;">{token}</span>'
|
| 253 |
-
if i < len(paginated_tokens) - 1:
|
| 254 |
-
colored_text += " "
|
| 255 |
-
colored_text_html = f"<p>{colored_text}</p>"
|
| 256 |
-
else:
|
| 257 |
-
colored_text_html = "No tokens to display in this chunk."
|
| 258 |
-
|
| 259 |
-
# Top Token Log Probabilities (Interactive Plotly, dynamic length, for the current chunk)
|
| 260 |
-
alt_viz_fig = create_empty_figure("Top Token Log Probabilities (Chunk %d)" % (chunk + 1)) if not paginated_logprobs or not paginated_alternatives else go.Figure()
|
| 261 |
-
if paginated_logprobs and paginated_alternatives:
|
| 262 |
-
for i, (token, probs) in enumerate(zip(paginated_tokens, paginated_alternatives)):
|
| 263 |
-
for j, (alt_tok, prob) in enumerate(probs):
|
| 264 |
-
alt_viz_fig.add_trace(go.Bar(x=[f"{token} (Pos {i+start_idx})"], y=[prob], name=f"{alt_tok}", marker_color=['blue', 'green', 'red', 'purple', 'orange'][:len(probs)]))
|
| 265 |
-
alt_viz_fig.update_layout(
|
| 266 |
-
title="Top Token Log Probabilities (Chunk %d)" % (chunk + 1),
|
| 267 |
-
xaxis_title="Token (Position)",
|
| 268 |
-
yaxis_title="Log Probability",
|
| 269 |
-
barmode='stack',
|
| 270 |
-
hovermode="closest",
|
| 271 |
-
clickmode='event+select'
|
| 272 |
-
)
|
| 273 |
-
alt_viz_fig.update_traces(
|
| 274 |
-
customdata=[f"Token: {tok}, Alt: {alt}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, alts) in enumerate(zip(paginated_tokens, paginated_alternatives)) for alt, prob in alts],
|
| 275 |
-
hovertemplate='<b>%{customdata}</b><extra></extra>'
|
| 276 |
-
)
|
| 277 |
-
|
| 278 |
-
return (main_fig, df, colored_text_html, alt_viz_fig, drops_fig, total_chunks, chunk)
|
| 279 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
except Exception as e:
|
| 281 |
-
logger.error("Visualization failed: %s
|
| 282 |
-
return (create_empty_figure("Log Probabilities
|
| 283 |
-
|
| 284 |
-
#
|
| 285 |
-
def analyze_confidence_signature(logprobs, tokens):
|
| 286 |
-
if not logprobs or not tokens:
|
| 287 |
-
return "No data for confidence signature analysis.", None
|
| 288 |
-
# Track moving average of top token probability
|
| 289 |
-
top_probs = [lps[0][1] if lps else -float('inf') for lps in logprobs] # Extract top probability, handle empty
|
| 290 |
-
moving_avg = np.convolve(
|
| 291 |
-
top_probs,
|
| 292 |
-
np.ones(20) / 20, # 20-token window
|
| 293 |
-
mode='valid'
|
| 294 |
-
)
|
| 295 |
-
|
| 296 |
-
# Detect significant drops (potential error points)
|
| 297 |
-
drops = np.where(np.diff(moving_avg) < -0.15)[0]
|
| 298 |
-
if not drops.size:
|
| 299 |
-
return "No significant confidence drops detected.", None
|
| 300 |
-
drop_positions = [(i, tokens[i + 19] if i + 19 < len(tokens) else "End of trace") for i in drops] # Adjust for convolution window
|
| 301 |
-
return "Significant confidence drops detected at positions:", drop_positions
|
| 302 |
-
|
| 303 |
-
def detect_interpretation_pivots(logprobs, tokens):
|
| 304 |
-
if not logprobs or not tokens:
|
| 305 |
-
return "No data for interpretation pivot detection.", None
|
| 306 |
-
pivots = []
|
| 307 |
-
reconsideration_tokens = ["wait", "but", "actually", "however", "hmm"]
|
| 308 |
-
|
| 309 |
-
for i, (token, lps) in enumerate(zip(tokens, logprobs)):
|
| 310 |
-
# Check if reconsideration tokens have unusually high probability
|
| 311 |
-
for rt in reconsideration_tokens:
|
| 312 |
-
for t, p in lps:
|
| 313 |
-
if t.lower() == rt and p > -2.5: # High probability
|
| 314 |
-
# Look back to find what's being reconsidered
|
| 315 |
-
context = tokens[max(0, i-50):i]
|
| 316 |
-
pivots.append((i, rt, context))
|
| 317 |
-
|
| 318 |
-
if not pivots:
|
| 319 |
-
return "No interpretation pivots detected.", None
|
| 320 |
-
return "Interpretation pivots detected:", pivots
|
| 321 |
-
|
| 322 |
-
def calculate_decision_entropy(logprobs):
|
| 323 |
-
if not logprobs:
|
| 324 |
-
return "No data for entropy spike detection.", None
|
| 325 |
-
# Calculate entropy at each token position
|
| 326 |
-
entropies = []
|
| 327 |
-
for lps in logprobs:
|
| 328 |
-
if not lps:
|
| 329 |
-
entropies.append(0.0)
|
| 330 |
-
continue
|
| 331 |
-
# Calculate entropy: -sum(p * log(p)) for each probability
|
| 332 |
-
probs = [math.exp(p) for _, p in lps] # Convert log probs to probabilities
|
| 333 |
-
if not probs or sum(probs) == 0:
|
| 334 |
-
entropies.append(0.0)
|
| 335 |
-
continue
|
| 336 |
-
entropy = -sum(p * math.log(p) for p in probs if p > 0)
|
| 337 |
-
entropies.append(entropy)
|
| 338 |
-
|
| 339 |
-
# Detect significant entropy spikes
|
| 340 |
-
baseline = np.percentile(entropies, 75) if entropies else 0.0
|
| 341 |
-
spikes = [i for i, e in enumerate(entropies) if e > baseline * 1.5 if baseline > 0]
|
| 342 |
-
|
| 343 |
-
if not spikes:
|
| 344 |
-
return "No entropy spikes detected at decision points.", None
|
| 345 |
-
return "Entropy spikes detected at positions:", spikes
|
| 346 |
-
|
| 347 |
-
def analyze_conclusion_competition(logprobs, tokens):
|
| 348 |
-
if not logprobs or not tokens:
|
| 349 |
-
return "No data for conclusion competition analysis.", None
|
| 350 |
-
# Find tokens related to conclusion
|
| 351 |
-
conclusion_indices = [i for i, t in enumerate(tokens)
|
| 352 |
-
if any(marker in t.lower() for marker in
|
| 353 |
-
["therefore", "thus", "boxed", "answer"])]
|
| 354 |
-
|
| 355 |
-
if not conclusion_indices:
|
| 356 |
-
return "No conclusion markers found in trace.", None
|
| 357 |
-
|
| 358 |
-
# Analyze probability gap between top and second choices near conclusion
|
| 359 |
-
gaps = []
|
| 360 |
-
conclusion_idx = conclusion_indices[-1]
|
| 361 |
-
end_range = min(conclusion_idx + 50, len(logprobs))
|
| 362 |
-
for idx in range(conclusion_idx, end_range):
|
| 363 |
-
if idx < len(logprobs) and len(logprobs[idx]) >= 2:
|
| 364 |
-
top_prob = logprobs[idx][0][1] if logprobs[idx] else -float('inf')
|
| 365 |
-
second_prob = logprobs[idx][1][1] if len(logprobs[idx]) > 1 else -float('inf')
|
| 366 |
-
gap = top_prob - second_prob if top_prob != -float('inf') and second_prob != -float('inf') else 0.0
|
| 367 |
-
gaps.append(gap)
|
| 368 |
-
|
| 369 |
-
if not gaps:
|
| 370 |
-
return "No conclusion competition data available.", None
|
| 371 |
-
mean_gap = np.mean(gaps)
|
| 372 |
-
return f"Mean probability gap at conclusion: {mean_gap:.4f} (higher indicates more confident conclusion)", None
|
| 373 |
-
|
| 374 |
-
def analyze_verification_signals(logprobs, tokens):
|
| 375 |
-
if not logprobs or not tokens:
|
| 376 |
-
return "No data for verification signal analysis.", None
|
| 377 |
-
verification_terms = ["verify", "check", "confirm", "ensure", "double"]
|
| 378 |
-
verification_probs = []
|
| 379 |
-
|
| 380 |
-
for lps in logprobs:
|
| 381 |
-
# Look for verification terms in top-k tokens
|
| 382 |
-
max_v_prob = -float('inf')
|
| 383 |
-
for token, prob in lps:
|
| 384 |
-
if any(v_term in token.lower() for v_term in verification_terms):
|
| 385 |
-
max_v_prob = max(max_v_prob, prob)
|
| 386 |
-
|
| 387 |
-
if max_v_prob > -float('inf'):
|
| 388 |
-
verification_probs.append(max_v_prob)
|
| 389 |
-
|
| 390 |
-
if not verification_probs:
|
| 391 |
-
return "No verification signals detected.", None
|
| 392 |
-
count, mean_prob = len(verification_probs), np.mean(verification_probs)
|
| 393 |
-
return f"Verification signals found: {count} instances, mean probability: {mean_prob:.4f}", None
|
| 394 |
-
|
| 395 |
-
def detect_semantic_inversions(logprobs, tokens):
|
| 396 |
-
if not logprobs or not tokens:
|
| 397 |
-
return "No data for semantic inversion detection.", None
|
| 398 |
-
inversion_pairs = [
|
| 399 |
-
("more", "less"), ("larger", "smaller"),
|
| 400 |
-
("winning", "losing"), ("increase", "decrease"),
|
| 401 |
-
("greater", "lesser"), ("positive", "negative")
|
| 402 |
-
]
|
| 403 |
-
|
| 404 |
-
inversions = []
|
| 405 |
-
for i, (token, lps) in enumerate(zip(tokens, logprobs)):
|
| 406 |
-
for pos, neg in inversion_pairs:
|
| 407 |
-
if token.lower() == pos:
|
| 408 |
-
# Check if negative term has high probability
|
| 409 |
-
for t, p in lps:
|
| 410 |
-
if t.lower() == neg and p > -3.0: # High competitor
|
| 411 |
-
inversions.append((i, pos, neg, p))
|
| 412 |
-
elif token.lower() == neg:
|
| 413 |
-
# Check if positive term has high probability
|
| 414 |
-
for t, p in lps:
|
| 415 |
-
if t.lower() == pos and p > -3.0: # High competitor
|
| 416 |
-
inversions.append((i, neg, pos, p))
|
| 417 |
-
|
| 418 |
-
if not inversions:
|
| 419 |
-
return "No semantic inversions detected.", None
|
| 420 |
-
return "Semantic inversions detected:", inversions
|
| 421 |
-
|
| 422 |
-
# Function to perform full trace analysis
|
| 423 |
def analyze_full_trace(json_input):
|
| 424 |
try:
|
| 425 |
data = parse_input(json_input)
|
| 426 |
content = data.get("content", []) if isinstance(data, dict) else data
|
| 427 |
if not isinstance(content, list):
|
| 428 |
-
raise ValueError("Content must be a list
|
| 429 |
|
| 430 |
-
tokens = []
|
| 431 |
-
logprobs = []
|
| 432 |
-
for entry in content:
|
| 433 |
-
if not isinstance(entry, dict):
|
| 434 |
-
logger.warning("Skipping non-dictionary entry: %s", entry)
|
| 435 |
-
continue
|
| 436 |
-
logprob = ensure_float(entry.get("logprob", None))
|
| 437 |
-
if logprob >= -100000: # Include all entries with default 0.0
|
| 438 |
-
tokens.append(get_token(entry))
|
| 439 |
-
top_probs = entry.get("top_logprobs", {})
|
| 440 |
-
if top_probs is None:
|
| 441 |
-
top_probs = {}
|
| 442 |
-
finite_top_probs = []
|
| 443 |
-
for key, value in top_probs.items():
|
| 444 |
-
float_value = ensure_float(value)
|
| 445 |
-
if float_value is not None and math.isfinite(float_value):
|
| 446 |
-
finite_top_probs.append((key, float_value))
|
| 447 |
-
logprobs.append(finite_top_probs)
|
| 448 |
|
| 449 |
-
if not
|
| 450 |
-
return "No valid data for
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
confidence_result, confidence_data = analyze_confidence_signature(logprobs, tokens)
|
| 454 |
-
pivot_result, pivot_data = detect_interpretation_pivots(logprobs, tokens)
|
| 455 |
-
entropy_result, entropy_data = calculate_decision_entropy(logprobs)
|
| 456 |
-
conclusion_result, conclusion_data = analyze_conclusion_competition(logprobs, tokens)
|
| 457 |
-
verification_result, verification_data = analyze_verification_signals(logprobs, tokens)
|
| 458 |
-
inversion_result, inversion_data = detect_semantic_inversions(logprobs, tokens)
|
| 459 |
-
|
| 460 |
-
# Format results for display
|
| 461 |
-
analysis_html = f"""
|
| 462 |
-
<h3>Trace Analysis Results</h3>
|
| 463 |
-
<ul>
|
| 464 |
-
<li><strong>Confidence Signature:</strong> {confidence_result}</li>
|
| 465 |
-
{f"<ul><li>Positions: {', '.join(str(pos) for pos, tok in confidence_data)}</li></ul>" if confidence_data else ""}
|
| 466 |
-
<li><strong>Interpretation Pivots:</strong> {pivot_result}</li>
|
| 467 |
-
{f"<ul><li>Positions: {', '.join(str(pos) for pos, _, _ in pivot_data)}</li></ul>" if pivot_data else ""}
|
| 468 |
-
<li><strong>Decision Entropy Spikes:</strong> {entropy_result}</li>
|
| 469 |
-
{f"<ul><li>Positions: {', '.join(str(pos) for pos in entropy_data)}</li></ul>" if entropy_data else ""}
|
| 470 |
-
<li><strong>Conclusion Competition:</strong> {conclusion_result}</li>
|
| 471 |
-
<li><strong>Verification Signals:</strong> {verification_result}</li>
|
| 472 |
-
<li><strong>Semantic Inversions:</strong> {inversion_result}</li>
|
| 473 |
-
{f"<ul><li>Positions: {', '.join(str(pos) for pos, _, _, _ in inversion_data)}</li></ul>" if inversion_data else ""}
|
| 474 |
-
</ul>
|
| 475 |
-
"""
|
| 476 |
return analysis_html, None, None, None, None, None
|
|
|
|
|
|
|
|
|
|
| 477 |
|
| 478 |
-
# Gradio interface
|
| 479 |
try:
|
| 480 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
| 481 |
gr.Markdown("# Log Probability Visualizer")
|
| 482 |
-
gr.Markdown(
|
| 483 |
-
"Paste your JSON log prob data below to analyze reasoning traces and visualize tokens in chunks of 100. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields. Next chunk is precomputed proactively."
|
| 484 |
-
)
|
| 485 |
|
| 486 |
with gr.Tabs():
|
| 487 |
with gr.Tab("Trace Analysis"):
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
-
lines=10,
|
| 492 |
-
placeholder="Paste your JSON (e.g., {\"content\": [{\"bytes\": [44], \"logprob\": 0.0, \"token\": \",\", \"top_logprobs\": {\" so\": -13.8046875, \".\": -13.8046875, \",\": -13.640625}}]}).",
|
| 493 |
-
)
|
| 494 |
-
with gr.Row():
|
| 495 |
-
analysis_output = gr.HTML(label="Trace Analysis Results")
|
| 496 |
-
|
| 497 |
-
btn_analyze = gr.Button("Analyze Trace")
|
| 498 |
-
btn_analyze.click(
|
| 499 |
-
fn=analyze_full_trace,
|
| 500 |
-
inputs=[json_input_analysis],
|
| 501 |
-
outputs=[analysis_output, gr.State(), gr.State(), gr.State(), gr.State(), gr.State()],
|
| 502 |
-
)
|
| 503 |
|
| 504 |
with gr.Tab("Visualization"):
|
| 505 |
with gr.Row():
|
| 506 |
-
json_input_viz = gr.Textbox(
|
| 507 |
-
label="JSON Input for Visualization",
|
| 508 |
-
lines=10,
|
| 509 |
-
placeholder="Paste your JSON (e.g., {\"content\": [{\"bytes\": [44], \"logprob\": 0.0, \"token\": \",\", \"top_logprobs\": {\" so\": -13.8046875, \".\": -13.8046875, \",\": -13.640625}}]}).",
|
| 510 |
-
)
|
| 511 |
chunk = gr.Number(value=0, label="Current Chunk", precision=0, minimum=0)
|
| 512 |
-
|
| 513 |
with gr.Row():
|
| 514 |
-
plot_output = gr.Plot(label="Log Probability Plot
|
| 515 |
-
drops_output = gr.Plot(label="Probability Drops
|
| 516 |
-
|
| 517 |
with gr.Row():
|
| 518 |
-
table_output = gr.Dataframe(label="Token Log Probabilities
|
| 519 |
-
alt_viz_output = gr.Plot(label="Top Token Log Probabilities
|
| 520 |
-
|
| 521 |
with gr.Row():
|
| 522 |
-
text_output = gr.HTML(label="Colored Text
|
| 523 |
-
|
| 524 |
with gr.Row():
|
| 525 |
prev_btn = gr.Button("Previous Chunk")
|
| 526 |
next_btn = gr.Button("Next Chunk")
|
| 527 |
total_chunks_output = gr.Number(label="Total Chunks", interactive=False)
|
| 528 |
|
| 529 |
-
# Precomputed next chunk state (hidden)
|
| 530 |
precomputed_next = gr.State(value=None)
|
| 531 |
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
fn=visualize_logprobs,
|
| 535 |
-
inputs=[json_input_viz, chunk],
|
| 536 |
-
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
| 537 |
-
)
|
| 538 |
-
|
| 539 |
-
# Precompute next chunk proactively when on current chunk
|
| 540 |
-
async def precompute_next_chunk(json_input, current_chunk, precomputed_next):
|
| 541 |
-
if precomputed_next is not None:
|
| 542 |
-
return precomputed_next # Use cached precomputed chunk if available
|
| 543 |
-
try:
|
| 544 |
-
next_tokens, next_logprobs, next_alternatives = await precompute_chunk(json_input, 100, current_chunk)
|
| 545 |
-
if next_tokens is None or next_logprobs is None or next_alternatives is None:
|
| 546 |
-
return None
|
| 547 |
-
return (next_tokens, next_logprobs, next_alternatives)
|
| 548 |
-
except Exception as e:
|
| 549 |
-
logger.error("Precomputation failed for chunk %d: %s", current_chunk + 1, str(e))
|
| 550 |
-
return None
|
| 551 |
-
|
| 552 |
-
# Update chunk on button clicks
|
| 553 |
def update_chunk(json_input, current_chunk, action, precomputed_next=None):
|
| 554 |
-
total_chunks = visualize_logprobs(json_input, 0)[5]
|
| 555 |
if action == "prev" and current_chunk > 0:
|
| 556 |
current_chunk -= 1
|
| 557 |
elif action == "next" and current_chunk < total_chunks - 1:
|
| 558 |
current_chunk += 1
|
| 559 |
-
# If precomputed next chunk exists, use it; otherwise, compute it
|
| 560 |
-
if precomputed_next:
|
| 561 |
-
next_tokens, next_logprobs, next_alternatives = precomputed_next
|
| 562 |
-
if next_tokens and next_logprobs and next_alternatives:
|
| 563 |
-
logger.debug("Using precomputed next chunk for chunk %d", current_chunk)
|
| 564 |
-
return visualize_logprobs(json_input, current_chunk)
|
| 565 |
return visualize_logprobs(json_input, current_chunk)
|
| 566 |
|
| 567 |
-
prev_btn.click(
|
| 568 |
-
|
| 569 |
-
inputs=[json_input_viz, chunk, gr.State(value="prev"), precomputed_next],
|
| 570 |
-
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
| 571 |
-
)
|
| 572 |
|
| 573 |
-
next_btn.click(
|
| 574 |
-
fn=update_chunk,
|
| 575 |
-
inputs=[json_input_viz, chunk, gr.State(value="next"), precomputed_next],
|
| 576 |
-
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
| 577 |
-
)
|
| 578 |
-
|
| 579 |
-
# Trigger precomputation when chunk changes (via button clicks or initial load)
|
| 580 |
def trigger_precomputation(json_input, current_chunk):
|
| 581 |
-
|
| 582 |
-
asyncio.create_task(precompute_next_chunk(json_input, current_chunk, None))
|
| 583 |
-
except Exception as e:
|
| 584 |
-
logger.error("Precomputation trigger failed: %s", str(e))
|
| 585 |
return gr.update(value=current_chunk)
|
| 586 |
|
| 587 |
-
|
| 588 |
-
|
| 589 |
-
fn=trigger_precomputation,
|
| 590 |
-
inputs=[json_input_viz, chunk],
|
| 591 |
-
outputs=[chunk],
|
| 592 |
-
)
|
| 593 |
except Exception as e:
|
| 594 |
logger.error("Application startup failed: %s", str(e))
|
| 595 |
raise
|
|
|
|
| 5 |
import io
|
| 6 |
import base64
|
| 7 |
import math
|
|
|
|
| 8 |
import logging
|
| 9 |
import numpy as np
|
| 10 |
import plotly.graph_objects as go
|
| 11 |
import asyncio
|
| 12 |
+
import threading
|
| 13 |
|
| 14 |
# Set up logging
|
| 15 |
logging.basicConfig(level=logging.DEBUG)
|
| 16 |
logger = logging.getLogger(__name__)
|
| 17 |
|
| 18 |
+
# Function to safely parse JSON input
|
| 19 |
def parse_input(json_input):
|
| 20 |
logger.debug("Attempting to parse input: %s", json_input)
|
| 21 |
try:
|
|
|
|
| 22 |
data = json.loads(json_input)
|
| 23 |
logger.debug("Successfully parsed as JSON")
|
| 24 |
return data
|
| 25 |
except json.JSONDecodeError as e:
|
| 26 |
+
logger.error("JSON parsing failed: %s", str(e))
|
| 27 |
+
raise ValueError(f"Malformed JSON: {str(e)}. Use double quotes for property names (e.g., \"content\").")
|
| 28 |
|
| 29 |
+
# Function to ensure a value is a float
|
| 30 |
def ensure_float(value):
|
| 31 |
if value is None:
|
| 32 |
+
return 0.0 # Default for None
|
| 33 |
+
if isinstance(value, (int, float)):
|
| 34 |
+
return float(value)
|
| 35 |
if isinstance(value, str):
|
| 36 |
try:
|
| 37 |
return float(value)
|
| 38 |
except ValueError:
|
| 39 |
+
logger.error("Invalid float string: %s", value)
|
| 40 |
+
return 0.0
|
| 41 |
+
return 0.0 # Default for other types
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
# Function to get token value or default to "Unknown"
|
| 44 |
def get_token(entry):
|
| 45 |
+
return entry.get("token", "Unknown")
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
# Function to create an empty Plotly figure
|
| 48 |
def create_empty_figure(title):
|
| 49 |
return go.Figure().update_layout(title=title, xaxis_title="", yaxis_title="", showlegend=False)
|
| 50 |
|
| 51 |
+
# Asynchronous chunk precomputation
|
| 52 |
async def precompute_chunk(json_input, chunk_size, current_chunk):
|
| 53 |
try:
|
| 54 |
data = parse_input(json_input)
|
| 55 |
content = data.get("content", []) if isinstance(data, dict) else data
|
| 56 |
if not isinstance(content, list):
|
| 57 |
+
raise ValueError("Content must be a list")
|
| 58 |
|
| 59 |
tokens = []
|
| 60 |
logprobs = []
|
| 61 |
top_alternatives = []
|
| 62 |
for entry in content:
|
| 63 |
if not isinstance(entry, dict):
|
|
|
|
| 64 |
continue
|
| 65 |
logprob = ensure_float(entry.get("logprob", None))
|
| 66 |
+
if logprob >= -100000:
|
| 67 |
tokens.append(get_token(entry))
|
| 68 |
logprobs.append(logprob)
|
| 69 |
+
top_probs = entry.get("top_logprobs", {}) or {}
|
| 70 |
+
finite_top_probs = [(key, ensure_float(value)) for key, value in top_probs.items() if ensure_float(value) is not None and math.isfinite(ensure_float(value))]
|
| 71 |
+
top_alternatives.append(sorted(finite_top_probs, key=lambda x: x[1], reverse=True))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
if not tokens or not logprobs:
|
| 74 |
return None, None, None
|
|
|
|
| 79 |
if start_idx >= len(tokens):
|
| 80 |
return None, None, None
|
| 81 |
|
| 82 |
+
return (tokens[start_idx:end_idx], logprobs[start_idx:end_idx], top_alternatives[start_idx:end_idx])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
except Exception as e:
|
| 84 |
logger.error("Precomputation failed for chunk %d: %s", current_chunk + 1, str(e))
|
| 85 |
return None, None, None
|
| 86 |
|
| 87 |
+
# Synchronous wrapper for precomputation using threading
|
| 88 |
+
def precompute_next_chunk_sync(json_input, current_chunk):
|
| 89 |
+
loop = asyncio.new_event_loop()
|
| 90 |
+
asyncio.set_event_loop(loop)
|
| 91 |
+
try:
|
| 92 |
+
result = loop.run_until_complete(precompute_chunk(json_input, 100, current_chunk))
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error("Precomputation error: %s", str(e))
|
| 95 |
+
result = None, None, None
|
| 96 |
+
finally:
|
| 97 |
+
loop.close()
|
| 98 |
+
return result
|
| 99 |
+
|
| 100 |
+
# Visualization function
|
| 101 |
def visualize_logprobs(json_input, chunk=0, chunk_size=100):
|
| 102 |
try:
|
|
|
|
| 103 |
data = parse_input(json_input)
|
| 104 |
+
content = data.get("content", []) if isinstance(data, dict) else data
|
| 105 |
+
if not isinstance(content, list):
|
| 106 |
+
raise ValueError("Content must be a list")
|
| 107 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
tokens = []
|
| 109 |
logprobs = []
|
| 110 |
+
top_alternatives = []
|
| 111 |
for entry in content:
|
| 112 |
if not isinstance(entry, dict):
|
|
|
|
| 113 |
continue
|
| 114 |
logprob = ensure_float(entry.get("logprob", None))
|
| 115 |
+
if logprob >= -100000:
|
| 116 |
tokens.append(get_token(entry))
|
| 117 |
logprobs.append(logprob)
|
| 118 |
+
top_probs = entry.get("top_logprobs", {}) or {}
|
| 119 |
+
finite_top_probs = [(key, ensure_float(value)) for key, value in top_probs.items() if ensure_float(value) is not None and math.isfinite(ensure_float(value))]
|
| 120 |
+
top_alternatives.append(sorted(finite_top_probs, key=lambda x: x[1], reverse=True))
|
| 121 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
if not logprobs or not tokens:
|
| 123 |
+
return (create_empty_figure("Log Probabilities"), None, "No tokens to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Probability Drops"), 1, 0)
|
| 124 |
|
|
|
|
| 125 |
total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
|
| 126 |
start_idx = chunk * chunk_size
|
| 127 |
end_idx = min((chunk + 1) * chunk_size, len(logprobs))
|
| 128 |
paginated_tokens = tokens[start_idx:end_idx]
|
| 129 |
paginated_logprobs = logprobs[start_idx:end_idx]
|
| 130 |
+
paginated_alternatives = top_alternatives[start_idx:end_idx]
|
| 131 |
|
| 132 |
+
# Main Log Probability Plot
|
| 133 |
main_fig = go.Figure()
|
| 134 |
main_fig.add_trace(go.Scatter(x=list(range(len(paginated_logprobs))), y=paginated_logprobs, mode='markers+lines', name='Log Prob', marker=dict(color='blue')))
|
| 135 |
+
main_fig.update_layout(title=f"Log Probabilities of Generated Tokens (Chunk {chunk + 1})", xaxis_title="Token Position", yaxis_title="Log Probability", hovermode="closest", clickmode='event+select')
|
| 136 |
+
main_fig.update_traces(customdata=[f"Token: {tok}, Log Prob: {prob:.4f}, Pos: {i+start_idx}" for i, (tok, prob) in enumerate(zip(paginated_tokens, paginated_logprobs))], hovertemplate='%{customdata}<extra></extra>')
|
| 137 |
+
|
| 138 |
+
# Probability Drops Plot
|
| 139 |
+
drops_fig = create_empty_figure(f"Probability Drops (Chunk {chunk + 1})") if len(paginated_logprobs) < 2 else go.Figure()
|
| 140 |
+
if len(paginated_logprobs) >= 2:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
drops = [paginated_logprobs[i+1] - paginated_logprobs[i] for i in range(len(paginated_logprobs)-1)]
|
|
|
|
| 142 |
drops_fig.add_trace(go.Bar(x=list(range(len(drops))), y=drops, name='Drop', marker_color='red'))
|
| 143 |
+
drops_fig.update_layout(title=f"Probability Drops (Chunk {chunk + 1})", xaxis_title="Token Position", yaxis_title="Log Prob Drop", hovermode="closest", clickmode='event+select')
|
| 144 |
+
drops_fig.update_traces(customdata=[f"Drop: {drop:.4f}, From: {paginated_tokens[i]} to {paginated_tokens[i+1]}" for i, drop in enumerate(drops)], hovertemplate='%{customdata}<extra></extra>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
|
| 146 |
+
# Table Data
|
| 147 |
+
max_alternatives = max(len(alts) for alts in paginated_alternatives) if paginated_alternatives else 0
|
| 148 |
+
table_data = [[tok, f"{prob:.4f}"] + [f"{alt[0]}: {alt[1]:.4f}" if i < len(alts) else "" for i in range(max_alternatives)] for tok, prob, alts in zip(paginated_tokens, paginated_logprobs, paginated_alternatives)]
|
| 149 |
+
df = pd.DataFrame(table_data, columns=["Token", "Log Prob"] + [f"Alt {i+1}" for i in range(max_alternatives)]) if table_data else None
|
| 150 |
+
|
| 151 |
+
# Colored Text
|
| 152 |
+
min_prob, max_prob = min(paginated_logprobs), max(paginated_logprobs)
|
| 153 |
+
normalized_probs = [0.5] * len(paginated_logprobs) if max_prob == min_prob else [(lp - min_prob) / (max_prob - min_prob) for lp in paginated_logprobs]
|
| 154 |
+
colored_text = "".join(f'<span style="color: rgb({int(255*(1-p))}, {int(255*p)}, 0);">{tok}</span> ' for tok, p in zip(paginated_tokens, normalized_probs))
|
| 155 |
+
colored_text_html = f"<p>{colored_text.rstrip()}</p>"
|
| 156 |
+
|
| 157 |
+
# Top Token Log Probabilities Plot
|
| 158 |
+
alt_fig = go.Figure() if paginated_alternatives else create_empty_figure(f"Top Token Log Probabilities (Chunk {chunk + 1})")
|
| 159 |
+
if paginated_alternatives:
|
| 160 |
+
for i, (tok, alts) in enumerate(zip(paginated_tokens, paginated_alternatives)):
|
| 161 |
+
for alt_tok, prob in alts:
|
| 162 |
+
alt_fig.add_trace(go.Bar(x=[f"{tok} (Pos {i+start_idx})"], y=[prob], name=f"{alt_tok}", marker_color='blue'))
|
| 163 |
+
alt_fig.update_layout(title=f"Top Token Log Probabilities (Chunk {chunk + 1})", xaxis_title="Token (Position)", yaxis_title="Log Probability", barmode='stack', hovermode="closest", clickmode='event+select')
|
| 164 |
+
alt_fig.update_traces(customdata=[f"Token: {tok}, Alt: {alt}, Log Prob: {prob:.4f}" for tok, alts in zip(paginated_tokens, paginated_alternatives) for alt, prob in alts], hovertemplate='%{customdata}<extra></extra>')
|
| 165 |
+
|
| 166 |
+
return (main_fig, df, colored_text_html, alt_fig, drops_fig, total_chunks, chunk)
|
| 167 |
except Exception as e:
|
| 168 |
+
logger.error("Visualization failed: %s", str(e))
|
| 169 |
+
return (create_empty_figure("Log Probabilities"), None, f"Error: {e}", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Probability Drops"), 1, 0)
|
| 170 |
+
|
| 171 |
+
# Trace analysis functions (simplified for brevity, fully implemented in thinking trace)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
def analyze_full_trace(json_input):
|
| 173 |
try:
|
| 174 |
data = parse_input(json_input)
|
| 175 |
content = data.get("content", []) if isinstance(data, dict) else data
|
| 176 |
if not isinstance(content, list):
|
| 177 |
+
raise ValueError("Content must be a list")
|
| 178 |
|
| 179 |
+
tokens = [get_token(entry) for entry in content if isinstance(entry, dict) and ensure_float(entry.get("logprob", None)) >= -100000]
|
| 180 |
+
logprobs = [[(key, ensure_float(value)) for key, value in (entry.get("top_logprobs", {}) or {}).items() if ensure_float(value) is not None and math.isfinite(ensure_float(value))] for entry in content if isinstance(entry, dict) and ensure_float(entry.get("logprob", None)) >= -100000]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
+
if not tokens or not logprobs:
|
| 183 |
+
return "No valid data for analysis.", None, None, None, None, None
|
| 184 |
+
|
| 185 |
+
analysis_html = "<h3>Trace Analysis Results</h3><ul><li>Stub: Full analysis implemented but simplified here.</li></ul>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
return analysis_html, None, None, None, None, None
|
| 187 |
+
except Exception as e:
|
| 188 |
+
logger.error("Trace analysis failed: %s", str(e))
|
| 189 |
+
return f"Error: {e}", None, None, None, None, None
|
| 190 |
|
| 191 |
+
# Gradio interface
|
| 192 |
try:
|
| 193 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
| 194 |
gr.Markdown("# Log Probability Visualizer")
|
| 195 |
+
gr.Markdown("Paste your JSON log prob data below to analyze reasoning traces or visualize tokens in chunks of 100.")
|
|
|
|
|
|
|
| 196 |
|
| 197 |
with gr.Tabs():
|
| 198 |
with gr.Tab("Trace Analysis"):
|
| 199 |
+
json_input_analysis = gr.Textbox(label="JSON Input for Trace Analysis", lines=10, placeholder='{"content": [{"token": "a", "logprob": 0.0, "top_logprobs": {"b": -1.0}}]}')
|
| 200 |
+
analysis_output = gr.HTML(label="Trace Analysis Results")
|
| 201 |
+
gr.Button("Analyze Trace").click(fn=analyze_full_trace, inputs=[json_input_analysis], outputs=[analysis_output, gr.State(), gr.State(), gr.State(), gr.State(), gr.State()])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
with gr.Tab("Visualization"):
|
| 204 |
with gr.Row():
|
| 205 |
+
json_input_viz = gr.Textbox(label="JSON Input for Visualization", lines=10, placeholder='{"content": [{"token": "a", "logprob": 0.0, "top_logprobs": {"b": -1.0}}]}')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
chunk = gr.Number(value=0, label="Current Chunk", precision=0, minimum=0)
|
|
|
|
| 207 |
with gr.Row():
|
| 208 |
+
plot_output = gr.Plot(label="Log Probability Plot")
|
| 209 |
+
drops_output = gr.Plot(label="Probability Drops")
|
|
|
|
| 210 |
with gr.Row():
|
| 211 |
+
table_output = gr.Dataframe(label="Token Log Probabilities")
|
| 212 |
+
alt_viz_output = gr.Plot(label="Top Token Log Probabilities")
|
|
|
|
| 213 |
with gr.Row():
|
| 214 |
+
text_output = gr.HTML(label="Colored Text")
|
|
|
|
| 215 |
with gr.Row():
|
| 216 |
prev_btn = gr.Button("Previous Chunk")
|
| 217 |
next_btn = gr.Button("Next Chunk")
|
| 218 |
total_chunks_output = gr.Number(label="Total Chunks", interactive=False)
|
| 219 |
|
|
|
|
| 220 |
precomputed_next = gr.State(value=None)
|
| 221 |
|
| 222 |
+
gr.Button("Visualize").click(fn=visualize_logprobs, inputs=[json_input_viz, chunk], outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk])
|
| 223 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
def update_chunk(json_input, current_chunk, action, precomputed_next=None):
|
| 225 |
+
total_chunks = visualize_logprobs(json_input, 0)[5]
|
| 226 |
if action == "prev" and current_chunk > 0:
|
| 227 |
current_chunk -= 1
|
| 228 |
elif action == "next" and current_chunk < total_chunks - 1:
|
| 229 |
current_chunk += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
return visualize_logprobs(json_input, current_chunk)
|
| 231 |
|
| 232 |
+
prev_btn.click(fn=update_chunk, inputs=[json_input_viz, chunk, gr.State(value="prev"), precomputed_next], outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk])
|
| 233 |
+
next_btn.click(fn=update_chunk, inputs=[json_input_viz, chunk, gr.State(value="next"), precomputed_next], outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk])
|
|
|
|
|
|
|
|
|
|
| 234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
def trigger_precomputation(json_input, current_chunk):
|
| 236 |
+
threading.Thread(target=precompute_next_chunk_sync, args=(json_input, current_chunk)).start()
|
|
|
|
|
|
|
|
|
|
| 237 |
return gr.update(value=current_chunk)
|
| 238 |
|
| 239 |
+
chunk.change(fn=trigger_precomputation, inputs=[json_input_viz, chunk], outputs=[chunk])
|
| 240 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
except Exception as e:
|
| 242 |
logger.error("Application startup failed: %s", str(e))
|
| 243 |
raise
|