Better error handling in chat-full.js and chat-copil-web.html
Browse files- chat-copil-web.html +94 -59
- chat-full.js +24 -3
chat-copil-web.html
CHANGED
|
@@ -19,6 +19,7 @@
|
|
| 19 |
--err: #ff5c5c;
|
| 20 |
--bubble-user: #2a2a34;
|
| 21 |
--bubble-bot: #1d2233;
|
|
|
|
| 22 |
--border: #2a2a34;
|
| 23 |
--shadow: 0 8px 24px rgba(0,0,0,0.25);
|
| 24 |
}
|
|
@@ -128,6 +129,8 @@
|
|
| 128 |
}
|
| 129 |
.msg.user .avatar { background: #223; color: #9fb4ff; }
|
| 130 |
.msg.bot .avatar { background: #23252f; color: #9fe3c7; }
|
|
|
|
|
|
|
| 131 |
|
| 132 |
form#composer {
|
| 133 |
display: grid;
|
|
@@ -249,7 +252,7 @@
|
|
| 249 |
elInput.disabled = busy;
|
| 250 |
elSend.disabled = busy;
|
| 251 |
elModel.disabled = busy;
|
| 252 |
-
|
| 253 |
|
| 254 |
let text = 'Idle';
|
| 255 |
let cls = '';
|
|
@@ -267,13 +270,15 @@
|
|
| 267 |
});
|
| 268 |
}
|
| 269 |
|
| 270 |
-
function addMessage(role, text) {
|
| 271 |
const msg = document.createElement('div');
|
| 272 |
-
|
|
|
|
|
|
|
| 273 |
|
| 274 |
const avatar = document.createElement('div');
|
| 275 |
avatar.className = 'avatar';
|
| 276 |
-
avatar.textContent = role === 'user' ? 'U' : 'AI';
|
| 277 |
|
| 278 |
const bubble = document.createElement('div');
|
| 279 |
bubble.className = 'bubble';
|
|
@@ -284,34 +289,56 @@
|
|
| 284 |
elChat.appendChild(msg);
|
| 285 |
scrollToBottom();
|
| 286 |
|
| 287 |
-
return bubble; // return node for
|
| 288 |
}
|
| 289 |
|
| 290 |
-
function
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
elStatusText.textContent = 'Error';
|
| 292 |
elStatusDot.className = 'dot err';
|
| 293 |
-
|
|
|
|
| 294 |
}
|
| 295 |
|
| 296 |
async function ensureModelLoaded(modelId) {
|
| 297 |
-
// Cached?
|
| 298 |
if (state.resources.has(modelId)) return state.resources.get(modelId);
|
| 299 |
|
| 300 |
setBusy(true, 'loading');
|
| 301 |
try {
|
| 302 |
const task = resolveTask(modelId);
|
| 303 |
-
|
| 304 |
-
// Load pipeline
|
| 305 |
const pipe = await pipeline(task, modelId, {
|
| 306 |
-
progress_callback: (
|
| 307 |
-
// Optional: could surface p.progress or p.status
|
| 308 |
-
// Keeping status general to avoid noisy UI.
|
| 309 |
-
},
|
| 310 |
});
|
| 311 |
-
|
| 312 |
-
// Load tokenizer (for chat templates when available)
|
| 313 |
const tokenizer = await AutoTokenizer.from_pretrained(modelId).catch(() => null);
|
| 314 |
-
|
| 315 |
const res = { task, pipe, tokenizer };
|
| 316 |
state.resources.set(modelId, res);
|
| 317 |
return res;
|
|
@@ -321,93 +348,87 @@
|
|
| 321 |
}
|
| 322 |
|
| 323 |
function buildPromptForModel(tokenizer, task, history, systemPrompt) {
|
| 324 |
-
// Convert app history to message list
|
| 325 |
const messages = [];
|
| 326 |
if (systemPrompt && systemPrompt.trim()) {
|
| 327 |
messages.push({ role: 'system', content: systemPrompt.trim() });
|
| 328 |
}
|
| 329 |
-
for (const m of history) {
|
| 330 |
-
messages.push({ role: m.role, content: m.content });
|
| 331 |
-
}
|
| 332 |
|
| 333 |
-
// Try model-native chat template if available
|
| 334 |
if (tokenizer && typeof tokenizer.apply_chat_template === 'function') {
|
| 335 |
try {
|
| 336 |
const prompt = tokenizer.apply_chat_template(messages, {
|
| 337 |
add_generation_prompt: true,
|
| 338 |
tokenize: false,
|
| 339 |
});
|
| 340 |
-
if (typeof prompt === 'string' && prompt.trim().length)
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
// Fall back if no template or error.
|
| 345 |
}
|
| 346 |
}
|
| 347 |
|
| 348 |
-
// Fallback: simple, generic chat-style formatting
|
| 349 |
const parts = [];
|
| 350 |
if (systemPrompt) parts.push(`System: ${systemPrompt.trim()}`);
|
| 351 |
for (const m of history) {
|
| 352 |
const role = m.role === 'assistant' ? 'Assistant' : 'User';
|
| 353 |
parts.push(`${role}: ${m.content}`);
|
| 354 |
}
|
| 355 |
-
parts.push('Assistant:');
|
| 356 |
return parts.join('\n');
|
| 357 |
}
|
| 358 |
|
| 359 |
async function generateReply(userText) {
|
| 360 |
-
|
| 361 |
-
const res = await ensureModelLoaded(modelId);
|
| 362 |
-
const { task, pipe, tokenizer } = res;
|
| 363 |
-
|
| 364 |
-
// Add user message to history
|
| 365 |
-
state.history.push({ role: 'user', content: userText });
|
| 366 |
-
|
| 367 |
-
// Render user message
|
| 368 |
addMessage('user', userText);
|
|
|
|
| 369 |
|
| 370 |
-
//
|
| 371 |
const botBubble = addMessage('assistant', '…');
|
| 372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
setBusy(true, 'generating');
|
| 374 |
try {
|
|
|
|
| 375 |
const prompt = buildPromptForModel(tokenizer, task, state.history, state.systemPrompt);
|
| 376 |
|
| 377 |
-
// Generation options (conservative defaults)
|
| 378 |
const genOpts = {
|
| 379 |
max_new_tokens: 200,
|
| 380 |
temperature: 0.7,
|
| 381 |
top_p: 0.95,
|
| 382 |
do_sample: true,
|
| 383 |
repetition_penalty: 1.1,
|
| 384 |
-
// For causal LMs, this returns only the new text
|
| 385 |
return_full_text: false,
|
| 386 |
};
|
| 387 |
|
| 388 |
const output = await pipe(prompt, genOpts);
|
| 389 |
-
|
| 390 |
let text = '';
|
| 391 |
if (Array.isArray(output) && output.length) {
|
| 392 |
-
|
| 393 |
-
text = output[0].generated_text ?? '';
|
| 394 |
-
// Some tasks may return other keys; keep a graceful fallback
|
| 395 |
-
if (!text) {
|
| 396 |
-
text = output[0].summary_text || output[0].translation_text || '';
|
| 397 |
-
}
|
| 398 |
} else if (typeof output === 'string') {
|
| 399 |
text = output;
|
| 400 |
}
|
| 401 |
-
|
| 402 |
text = (text || '').toString().trim();
|
| 403 |
if (!text) text = '(no output)';
|
| 404 |
|
| 405 |
-
// Update UI and history
|
| 406 |
botBubble.textContent = text;
|
| 407 |
state.history.push({ role: 'assistant', content: text });
|
| 408 |
scrollToBottom();
|
| 409 |
} catch (err) {
|
| 410 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 411 |
} finally {
|
| 412 |
setBusy(false);
|
| 413 |
}
|
|
@@ -419,8 +440,14 @@
|
|
| 419 |
const text = (elInput.value || '').trim();
|
| 420 |
if (!text || state.busy) return;
|
| 421 |
elInput.value = '';
|
| 422 |
-
|
| 423 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 424 |
});
|
| 425 |
|
| 426 |
elInput.addEventListener('keydown', (e) => {
|
|
@@ -434,31 +461,39 @@
|
|
| 434 |
if (state.busy) return;
|
| 435 |
const nextId = elModel.value;
|
| 436 |
state.modelId = nextId;
|
| 437 |
-
|
| 438 |
-
// Indicate model switch
|
| 439 |
addMessage('assistant', `Switching to model: ${nextId}`);
|
| 440 |
-
// Warm-load in background; keep UI responsive
|
| 441 |
try {
|
| 442 |
await ensureModelLoaded(nextId);
|
| 443 |
elStatusText.textContent = 'Ready';
|
| 444 |
elStatusDot.className = 'dot ok';
|
| 445 |
} catch (err) {
|
| 446 |
-
|
| 447 |
}
|
| 448 |
});
|
| 449 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 450 |
// Boot
|
| 451 |
(async function init() {
|
| 452 |
-
// Prime UI with a friendly greeting
|
| 453 |
addMessage('assistant', 'Hello! Pick a model above and say hi. All generation happens in your browser.');
|
| 454 |
-
// Preload default model
|
| 455 |
setTimeout(async () => {
|
| 456 |
try {
|
| 457 |
await ensureModelLoaded(state.modelId);
|
| 458 |
elStatusText.textContent = 'Ready';
|
| 459 |
elStatusDot.className = 'dot ok';
|
| 460 |
} catch (err) {
|
| 461 |
-
|
| 462 |
}
|
| 463 |
}, 150);
|
| 464 |
})();
|
|
|
|
| 19 |
--err: #ff5c5c;
|
| 20 |
--bubble-user: #2a2a34;
|
| 21 |
--bubble-bot: #1d2233;
|
| 22 |
+
--bubble-err: #2a1416;
|
| 23 |
--border: #2a2a34;
|
| 24 |
--shadow: 0 8px 24px rgba(0,0,0,0.25);
|
| 25 |
}
|
|
|
|
| 129 |
}
|
| 130 |
.msg.user .avatar { background: #223; color: #9fb4ff; }
|
| 131 |
.msg.bot .avatar { background: #23252f; color: #9fe3c7; }
|
| 132 |
+
.msg.error .bubble { background: var(--bubble-err); border-color: var(--err); }
|
| 133 |
+
.msg.error .avatar { background: #3a1b1e; color: #ff9aa5; }
|
| 134 |
|
| 135 |
form#composer {
|
| 136 |
display: grid;
|
|
|
|
| 252 |
elInput.disabled = busy;
|
| 253 |
elSend.disabled = busy;
|
| 254 |
elModel.disabled = busy;
|
| 255 |
+
elChat.setAttribute('aria-busy', String(busy));
|
| 256 |
|
| 257 |
let text = 'Idle';
|
| 258 |
let cls = '';
|
|
|
|
| 270 |
});
|
| 271 |
}
|
| 272 |
|
| 273 |
+
function addMessage(role, text, variant = '') {
|
| 274 |
const msg = document.createElement('div');
|
| 275 |
+
const classes = ['msg', role === 'user' ? 'user' : 'bot'];
|
| 276 |
+
if (variant) classes.push(variant);
|
| 277 |
+
msg.className = classes.join(' ');
|
| 278 |
|
| 279 |
const avatar = document.createElement('div');
|
| 280 |
avatar.className = 'avatar';
|
| 281 |
+
avatar.textContent = role === 'user' ? 'U' : (variant === 'error' ? '!' : 'AI');
|
| 282 |
|
| 283 |
const bubble = document.createElement('div');
|
| 284 |
bubble.className = 'bubble';
|
|
|
|
| 289 |
elChat.appendChild(msg);
|
| 290 |
scrollToBottom();
|
| 291 |
|
| 292 |
+
return bubble; // return node for updates
|
| 293 |
}
|
| 294 |
|
| 295 |
+
function normalizeError(err) {
|
| 296 |
+
try {
|
| 297 |
+
if (err instanceof Error) {
|
| 298 |
+
return { name: err.name || 'Error', message: err.message || String(err), stack: err.stack || '' };
|
| 299 |
+
}
|
| 300 |
+
if (typeof err === 'string') {
|
| 301 |
+
return { name: 'Error', message: err, stack: '' };
|
| 302 |
+
}
|
| 303 |
+
if (err && typeof err === 'object') {
|
| 304 |
+
const msg = err.message || err.reason || JSON.stringify(err);
|
| 305 |
+
return { name: err.name || 'Error', message: String(msg), stack: err.stack || '' };
|
| 306 |
+
}
|
| 307 |
+
return { name: 'Error', message: String(err), stack: '' };
|
| 308 |
+
} catch {
|
| 309 |
+
return { name: 'Error', message: 'Unknown error', stack: '' };
|
| 310 |
+
}
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
function formatErrorText(err, context) {
|
| 314 |
+
const { name, message, stack } = normalizeError(err);
|
| 315 |
+
let text = `⚠️ ${context ? context + ': ' : ''}${name}: ${message}`;
|
| 316 |
+
if (stack) {
|
| 317 |
+
const first = String(stack).split('\n')[1] || '';
|
| 318 |
+
if (first.trim()) text += `\n${first.trim()}`;
|
| 319 |
+
}
|
| 320 |
+
return text;
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
function pushError(err, context = 'Error') {
|
| 324 |
+
const msg = formatErrorText(err, context);
|
| 325 |
+
addMessage('assistant', msg, 'error');
|
| 326 |
elStatusText.textContent = 'Error';
|
| 327 |
elStatusDot.className = 'dot err';
|
| 328 |
+
// Also record it in the dialogue history as an assistant turn
|
| 329 |
+
state.history.push({ role: 'assistant', content: msg });
|
| 330 |
}
|
| 331 |
|
| 332 |
async function ensureModelLoaded(modelId) {
|
|
|
|
| 333 |
if (state.resources.has(modelId)) return state.resources.get(modelId);
|
| 334 |
|
| 335 |
setBusy(true, 'loading');
|
| 336 |
try {
|
| 337 |
const task = resolveTask(modelId);
|
|
|
|
|
|
|
| 338 |
const pipe = await pipeline(task, modelId, {
|
| 339 |
+
progress_callback: () => {},
|
|
|
|
|
|
|
|
|
|
| 340 |
});
|
|
|
|
|
|
|
| 341 |
const tokenizer = await AutoTokenizer.from_pretrained(modelId).catch(() => null);
|
|
|
|
| 342 |
const res = { task, pipe, tokenizer };
|
| 343 |
state.resources.set(modelId, res);
|
| 344 |
return res;
|
|
|
|
| 348 |
}
|
| 349 |
|
| 350 |
function buildPromptForModel(tokenizer, task, history, systemPrompt) {
|
|
|
|
| 351 |
const messages = [];
|
| 352 |
if (systemPrompt && systemPrompt.trim()) {
|
| 353 |
messages.push({ role: 'system', content: systemPrompt.trim() });
|
| 354 |
}
|
| 355 |
+
for (const m of history) messages.push({ role: m.role, content: m.content });
|
|
|
|
|
|
|
| 356 |
|
|
|
|
| 357 |
if (tokenizer && typeof tokenizer.apply_chat_template === 'function') {
|
| 358 |
try {
|
| 359 |
const prompt = tokenizer.apply_chat_template(messages, {
|
| 360 |
add_generation_prompt: true,
|
| 361 |
tokenize: false,
|
| 362 |
});
|
| 363 |
+
if (typeof prompt === 'string' && prompt.trim().length) return prompt;
|
| 364 |
+
} catch (e) {
|
| 365 |
+
// Non-fatal: fall back to a simple format
|
| 366 |
+
console.warn('Chat template failed; falling back:', e);
|
|
|
|
| 367 |
}
|
| 368 |
}
|
| 369 |
|
|
|
|
| 370 |
const parts = [];
|
| 371 |
if (systemPrompt) parts.push(`System: ${systemPrompt.trim()}`);
|
| 372 |
for (const m of history) {
|
| 373 |
const role = m.role === 'assistant' ? 'Assistant' : 'User';
|
| 374 |
parts.push(`${role}: ${m.content}`);
|
| 375 |
}
|
| 376 |
+
parts.push('Assistant:');
|
| 377 |
return parts.join('\n');
|
| 378 |
}
|
| 379 |
|
| 380 |
async function generateReply(userText) {
|
| 381 |
+
// Immediately reflect user's message in UI and history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 382 |
addMessage('user', userText);
|
| 383 |
+
state.history.push({ role: 'user', content: userText });
|
| 384 |
|
| 385 |
+
// Prepare a placeholder assistant bubble that we will update
|
| 386 |
const botBubble = addMessage('assistant', '…');
|
| 387 |
|
| 388 |
+
// 1) Ensure model is loaded
|
| 389 |
+
try {
|
| 390 |
+
await ensureModelLoaded(state.modelId);
|
| 391 |
+
} catch (err) {
|
| 392 |
+
const text = formatErrorText(err, 'Model loading failed');
|
| 393 |
+
botBubble.textContent = text;
|
| 394 |
+
state.history.push({ role: 'assistant', content: text });
|
| 395 |
+
return; // stop here; do not attempt generation
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
// 2) Generate
|
| 399 |
setBusy(true, 'generating');
|
| 400 |
try {
|
| 401 |
+
const { task, pipe, tokenizer } = state.resources.get(state.modelId);
|
| 402 |
const prompt = buildPromptForModel(tokenizer, task, state.history, state.systemPrompt);
|
| 403 |
|
|
|
|
| 404 |
const genOpts = {
|
| 405 |
max_new_tokens: 200,
|
| 406 |
temperature: 0.7,
|
| 407 |
top_p: 0.95,
|
| 408 |
do_sample: true,
|
| 409 |
repetition_penalty: 1.1,
|
|
|
|
| 410 |
return_full_text: false,
|
| 411 |
};
|
| 412 |
|
| 413 |
const output = await pipe(prompt, genOpts);
|
|
|
|
| 414 |
let text = '';
|
| 415 |
if (Array.isArray(output) && output.length) {
|
| 416 |
+
text = output[0].generated_text ?? output[0].summary_text ?? output[0].translation_text ?? '';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
} else if (typeof output === 'string') {
|
| 418 |
text = output;
|
| 419 |
}
|
|
|
|
| 420 |
text = (text || '').toString().trim();
|
| 421 |
if (!text) text = '(no output)';
|
| 422 |
|
|
|
|
| 423 |
botBubble.textContent = text;
|
| 424 |
state.history.push({ role: 'assistant', content: text });
|
| 425 |
scrollToBottom();
|
| 426 |
} catch (err) {
|
| 427 |
+
const text = formatErrorText(err, 'Generation failed');
|
| 428 |
+
botBubble.textContent = text;
|
| 429 |
+
state.history.push({ role: 'assistant', content: text });
|
| 430 |
+
elStatusText.textContent = 'Error';
|
| 431 |
+
elStatusDot.className = 'dot err';
|
| 432 |
} finally {
|
| 433 |
setBusy(false);
|
| 434 |
}
|
|
|
|
| 440 |
const text = (elInput.value || '').trim();
|
| 441 |
if (!text || state.busy) return;
|
| 442 |
elInput.value = '';
|
| 443 |
+
try {
|
| 444 |
+
await generateReply(text);
|
| 445 |
+
} catch (err) {
|
| 446 |
+
// Catch any unexpected errors from generateReply
|
| 447 |
+
pushError(err, 'Unexpected chat error');
|
| 448 |
+
} finally {
|
| 449 |
+
elInput.focus();
|
| 450 |
+
}
|
| 451 |
});
|
| 452 |
|
| 453 |
elInput.addEventListener('keydown', (e) => {
|
|
|
|
| 461 |
if (state.busy) return;
|
| 462 |
const nextId = elModel.value;
|
| 463 |
state.modelId = nextId;
|
|
|
|
|
|
|
| 464 |
addMessage('assistant', `Switching to model: ${nextId}`);
|
|
|
|
| 465 |
try {
|
| 466 |
await ensureModelLoaded(nextId);
|
| 467 |
elStatusText.textContent = 'Ready';
|
| 468 |
elStatusDot.className = 'dot ok';
|
| 469 |
} catch (err) {
|
| 470 |
+
pushError(err, 'Model switch failed');
|
| 471 |
}
|
| 472 |
});
|
| 473 |
|
| 474 |
+
// Global error surfaces
|
| 475 |
+
window.addEventListener('error', (event) => {
|
| 476 |
+
const { message, filename, lineno, colno, error } = event;
|
| 477 |
+
const meta = filename ? ` @ ${filename}:${lineno}:${colno}` : '';
|
| 478 |
+
const err = error || new Error((message || 'Uncaught error') + meta);
|
| 479 |
+
pushError(err, 'Uncaught error');
|
| 480 |
+
});
|
| 481 |
+
|
| 482 |
+
window.addEventListener('unhandledrejection', (event) => {
|
| 483 |
+
pushError(event.reason, 'Unhandled promise rejection');
|
| 484 |
+
});
|
| 485 |
+
|
| 486 |
// Boot
|
| 487 |
(async function init() {
|
|
|
|
| 488 |
addMessage('assistant', 'Hello! Pick a model above and say hi. All generation happens in your browser.');
|
| 489 |
+
// Preload default model after a short delay
|
| 490 |
setTimeout(async () => {
|
| 491 |
try {
|
| 492 |
await ensureModelLoaded(state.modelId);
|
| 493 |
elStatusText.textContent = 'Ready';
|
| 494 |
elStatusDot.className = 'dot ok';
|
| 495 |
} catch (err) {
|
| 496 |
+
pushError(err, 'Initial model preload failed');
|
| 497 |
}
|
| 498 |
}, 150);
|
| 499 |
})();
|
chat-full.js
CHANGED
|
@@ -70,9 +70,16 @@
|
|
| 70 |
const task = taskForModel(model);
|
| 71 |
setStatus(`Завантаження моделі (${task})…`, true);
|
| 72 |
try {
|
| 73 |
-
//
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
const pipe = await window.transformers.pipeline(task, model);
|
| 77 |
const entry = { pipe, task };
|
| 78 |
cache.set(model, entry);
|
|
@@ -157,4 +164,18 @@
|
|
| 157 |
|
| 158 |
// Стартовий стан
|
| 159 |
promptEl.focus();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
})();
|
|
|
|
| 70 |
const task = taskForModel(model);
|
| 71 |
setStatus(`Завантаження моделі (${task})…`, true);
|
| 72 |
try {
|
| 73 |
+
// Перевірка наявності Transformers.js
|
| 74 |
+
if (!window.transformers || typeof window.transformers.pipeline !== 'function') {
|
| 75 |
+
const msg = "Transformers.js не завантажено або недоступно. Перевірте підключення скрипта (має бути перед chat-full.js).";
|
| 76 |
+
const err = new Error(msg);
|
| 77 |
+
setStatus("Помилка завантаження моделі", false);
|
| 78 |
+
pushMsg("sys", `Помилка при завантаженні '${model}': ${msg}`,
|
| 79 |
+
err.stack);
|
| 80 |
+
throw err;
|
| 81 |
+
}
|
| 82 |
+
// window.transformers.env.backends.onnx.wasm.wasmPaths = ...
|
| 83 |
const pipe = await window.transformers.pipeline(task, model);
|
| 84 |
const entry = { pipe, task };
|
| 85 |
cache.set(model, entry);
|
|
|
|
| 164 |
|
| 165 |
// Стартовий стан
|
| 166 |
promptEl.focus();
|
| 167 |
+
|
| 168 |
+
// Глобальний обробник помилок
|
| 169 |
+
window.onerror = function (msg, url, line, col, error) {
|
| 170 |
+
let details = '';
|
| 171 |
+
if (error && error.stack) {
|
| 172 |
+
details = error.stack;
|
| 173 |
+
} else {
|
| 174 |
+
details = `${msg} at ${url}:${line}:${col}`;
|
| 175 |
+
}
|
| 176 |
+
pushMsg('sys', `global unhandled: ${msg}`, details);
|
| 177 |
+
// Не перериваємо стандартну поведінку
|
| 178 |
+
return false;
|
| 179 |
+
};
|
| 180 |
+
|
| 181 |
})();
|