xsum-kk3 / talgat.py
talgatzh's picture
Upload talgat.py
ce765ed verified
# -*- coding: utf-8 -*-
import re, json, sys, subprocess
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from datasets import load_dataset
from tqdm import tqdm
#!pip install datasets==3.6.0
#
# ===== Параметры =====
BASE_MODEL = "google/gemma-3-4b-it"
MODEL_PATH = "talgatzh/gemma-finetuned-model2"
OUTPUT_FILE = "gemma_inference_results_from_multidomain_fixedzxcs555.jsonl"
MAX_NEW_TOKENS = 60
MAX_TEXTS = 20 # увеличь для более стабильной метрики (>=200)
# ===== ROUGE (установим при необходимости) =====
try:
import evaluate
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "evaluate"])
import evaluate
# ===== Модель и токенизатор =====
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_PATH,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True
).eval()
# pad_token для стабильности (у Gemma pad = eos)
if tokenizer.pad_token_id is None:
tokenizer.pad_token = tokenizer.eos_token
# ===== Утилиты =====
def is_kazakh(text: str) -> bool:
return any(c in text.lower() for c in "қәөүңғұһі")
_SENT_SPLIT = re.compile(r'(?<=[\.\!\?…])\s+|\n+')
def lead_n(text: str, n=3) -> str:
sents = [s.strip() for s in _SENT_SPLIT.split(text.strip()) if s.strip()]
return " ".join(sents[:n])
def build_chat_prompt(text: str) -> str:
instr = (
"Мақсат: Экстрактивті қысқаша мазмұн.\n"
"Ереже: Тек бастапқы мәтіндегі сөйлемдерді көшір. Өз сөзіңмен жазба. Синоним қолданба.\n"
"Мәтіннен тек 2–3 ең маңызды сөйлемді таңда да, сол күйінде жаз.\n"
"Формат: тек сөйлемдер, жаңа сөздер қоспа.\n\n"
"Мәтін:\n"
f"{text.strip()}\n\n"
"Қысқаша мазмұн:"
)
messages = [{"role": "user", "content": instr}]
# Правильный chat-template для Gemma-IT
return tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
# ===== Данные =====
dataset = load_dataset("kz-transformers/multidomain-kazakh-dataset",
split="train", streaming=True)
INPUT_TEXTS = []
for ex in dataset:
txt = (ex.get("text") or "").strip()
if is_kazakh(txt) and len(txt.split()) > 20:
INPUT_TEXTS.append(txt)
if len(INPUT_TEXTS) >= MAX_TEXTS:
break
print(f"✔ Отобрано {len(INPUT_TEXTS)} казахских текстов из multidomain")
# ===== Генерация =====
results, preds, refs = [], [], []
for text in tqdm(INPUT_TEXTS, desc="Generating summaries"):
prompt_text = build_chat_prompt(text)
toks = tokenizer(prompt_text, return_tensors="pt", truncation=True, max_length=2048)
toks = {k: v.to(model.device) for k, v in toks.items()}
with torch.no_grad():
out = model.generate(
**toks,
max_new_tokens=MAX_NEW_TOKENS,
do_sample=False,
temperature=0.0,
repetition_penalty=1.05,
no_repeat_ngram_size=6,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
use_cache=True,
)
# === Берём ТОЛЬКО новые токены после входа ===
input_len = toks["input_ids"].shape[1]
gen_ids = out[0, input_len:]
generated = tokenizer.decode(gen_ids, skip_special_tokens=True).strip()
# Чистим возможные «утечки» ролей/маркеров
for bad in ("model", "<start_of_turn>", "<end_of_turn>"):
if generated.lower().startswith(bad):
generated = generated[len(bad):].lstrip(": ").strip()
generated = generated.replace(bad, "").strip()
# Fallback: если пусто — берём первые 2–3 предложения исходника
if not generated:
generated = lead_n(text, n=3)
reference = lead_n(text, n=3)
results.append({"text": text, "summary": generated, "reference": reference})
preds.append(generated)
refs.append(reference)
# ===== Сохранение =====
with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
for r in results:
f.write(json.dumps(r, ensure_ascii=False) + "\n")
print(f"✅ Сохранено {len(results)} суммаризаций → {OUTPUT_FILE}")
# ===== ROUGE к Lead-3 (прокси для быстрой диагностики) =====
rouge = evaluate.load("rouge")
scores = rouge.compute(predictions=preds, references=refs, use_stemmer=True)
scores_pct = {k: round(v * 100, 2) for k, v in scores.items()}
print("🔎 ROUGE vs Lead-3:")
for k in ("rouge1", "rouge2", "rougeL", "rougeLsum"):
print(f"{k.upper()}: {scores_pct.get(k, 0)}%")
# ===== Быстрый дебаг первых 3 пар =====
for i in range(min(3, len(results))):
print("\n--- SAMPLE", i+1, "---")
print("PRED:", results[i]["summary"])
print("REF :", results[i]["reference"])