Datasets:
				
			
			
	
			
			
	
		ArXiv:
	
	
	
	
	
	
	
	
License:
	
	
	
	
	
	
	
File size: 3,271 Bytes
			
			| e2ee063 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | import csv
import json
import numpy as np
import torch
import os
from minicons import scorer
from pathlib import Path
def load_sentences(filepath):
    sentence_pairs = []
    with open(filepath, 'r', encoding='utf-8') as file:
        reader = csv.reader(file, delimiter=';')
        next(reader)
        for row in reader:
            good_sentence = row[0]
            bad_sentence = row[1]
            sentence_pairs.append([good_sentence, bad_sentence])
    return sentence_pairs
def compute_score(data, model, mode):
    if mode == 'ilm':
        score = model.sequence_score(data, reduction=lambda x: x.sum(0).item())
    elif mode == 'mlm':
        score = model.sequence_score(data, reduction=lambda x: x.sum(0).item(), PLL_metric='within_word_l2r')
    return score
def process_files(model, mode, model_name):
    root_folder = Path("./data/base")
    file_names = sorted([str(file) for file in root_folder.iterdir() if file.name.endswith("csv")])
    print(f"Perform benchmarking on the following {len(file_names)} datasets:\n*", "\n* ".join(file_names))
    f_out = open(f'results-{model_name.replace("/", "-")}.jsonl', "wt")
    for file_path in file_names:
        try:
            pairs = load_sentences(file_path)
            results = []
            differences = 0
            accuracy = 0
            for pair in pairs:
                score = compute_score(pair, model, mode)
                results.append({
                    'good_sentence': pair[0],
                    'bad_sentence': pair[1],
                    'good_score': score[0],
                    'bad_score': score[1],
                    'difference': score[0] - score[1],
                    'correct': score[0] > score[1]
                })
                if score[0] > score[1]:
                    accuracy += 1
                differences += score[0] - score[1]
            mean_difference = differences / len(pairs)
            accuracy = accuracy / len(pairs)
            summary = {
                'file_name': file_path,
                'mean_difference': mean_difference,
                'accuracy': accuracy * 100,
                'total_pairs': len(pairs),
                'model_name': model_name,
            }
            f_out.write(json.dumps(summary) + "\n")
            print(summary)
        except Exception as e:
            print(f"Error processing {file_path}: {str(e)}")
            continue
    f_out.close()
mlm_model_names = [
    "dbmdz/electra-small-turkish-cased-generator",
    "dbmdz/electra-base-turkish-cased-generator",
    "dbmdz/electra-base-turkish-mc4-cased-generator",
    "dbmdz/electra-base-turkish-mc4-uncased-generator",
    "dbmdz/bert-base-turkish-cased",
    "dbmdz/bert-base-turkish-uncased",
    "dbmdz/bert-base-turkish-128k-cased",
    "dbmdz/bert-base-turkish-128k-uncased",
    "dbmdz/distilbert-base-turkish-cased",
    "dbmdz/convbert-base-turkish-cased",
    "dbmdz/convbert-base-turkish-mc4-cased",
    "dbmdz/convbert-base-turkish-mc4-uncased",
]
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mode = 'mlm'
for model_name in mlm_model_names:
    model = scorer.MaskedLMScorer(model_name, device)
    process_files(
        model=model,
        mode=mode,
        model_name=model_name,
    )
 | 
