Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
code
Size:
100K - 1M
ArXiv:
License:
| from tqdm import tqdm | |
| from datasets import Dataset | |
| """to run inside XLCOST_DATA folder after downloading XLCost data from this repo https://github.com/reddy-lab-code-research/XLCoST""" | |
| class Example(object): | |
| """A single training/test example.""" | |
| def __init__(self, | |
| idx, | |
| source, | |
| target, | |
| ): | |
| self.idx = idx | |
| self.source = source | |
| self.target = target | |
| def read_examples(filename): | |
| """Read examples from filename.""" | |
| examples=[] | |
| assert len(filename.split(','))==2 | |
| src_filename = filename.split(',')[0] | |
| trg_filename = filename.split(',')[1] | |
| idx = 0 | |
| with open(src_filename) as f1,open(trg_filename) as f2: | |
| for line1,line2 in zip(f1,f2): | |
| examples.append( | |
| Example( | |
| idx = idx, | |
| source=line1.strip(), | |
| target=line2.strip(), | |
| ) | |
| ) | |
| idx+=1 | |
| return examples | |
| def create_data(filename): | |
| examples = read_examples(filename) | |
| text = [] | |
| code = [] | |
| print(len(examples)) | |
| for i in tqdm(range(len(examples))): | |
| text.append(examples[i].source) | |
| code.append(examples[i].target) | |
| data = {"text": text, "code": code} | |
| data = Dataset.from_dict(data) | |
| return data | |
| if __name__ == "__main__": | |
| #clone xlcost-text-to-code hub repo | |
| LANG = ["Python", "C", "C#", "Java", "PHP", "Javascript", "C++"] | |
| EXTENSION = ["py", "c", "cs", "java", "php", "js", "cpp"] | |
| for i in range(len(LANG)): | |
| # for each language this saves train test and validation subsets for both snippet and program levels | |
| lang = LANG[i] | |
| ext = EXTENSION[i] | |
| print(f"language: {lang}") | |
| if lang == "C#": | |
| path_snippet = f"Csharp-snippet-level" | |
| path_program = f"Csharp-program-level" | |
| else: | |
| path_snippet = f"{lang}-snippet-level" | |
| path_program = f"{lang}-program-level" | |
| train_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/train-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/train-{lang}-comment-tok.{ext}" | |
| valid_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/val-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/val-{lang}-comment-tok.{ext}" | |
| test_filename = f"generation/pair_data_tok_1_comment/{lang}-comment/test-{lang}-comment-tok.txt,generation/pair_data_tok_1_comment/{lang}-comment/test-{lang}-comment-tok.{ext}" | |
| train = create_data(train_filename) | |
| valid = create_data(valid_filename) | |
| test = create_data(test_filename) | |
| train.to_json(f"xlcost-text-to-code/data/{path_snippet}/train.json", lines=True) | |
| valid.to_json(f"xlcost-text-to-code/data/{path_snippet}/valid.json", lines=True) | |
| test.to_json(f"xlcost-text-to-code/data/{path_snippet}/test.json", lines=True) | |
| train_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/train-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/train-{lang}-desc-tok.{ext}" | |
| valid_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/val-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/val-{lang}-desc-tok.{ext}" | |
| test_filename = f"generation/pair_data_tok_full_desc_comment/{lang}-desc/test-{lang}-desc-tok.txt,generation/pair_data_tok_full_desc_comment/{lang}-desc/test-{lang}-desc-tok.{ext}" | |
| train = create_data(train_filename) | |
| valid = create_data(valid_filename) | |
| test = create_data(test_filename) | |
| train.to_json(f"xlcost-text-to-code/data/{path_program}/train.json", lines=True) | |
| valid.to_json(f"xlcost-text-to-code/data/{path_program}/valid.json", lines=True) | |
| test.to_json(f"xlcost-text-to-code/data/{path_program}/test.json", lines=True) | |
| #push to hub the folder xlcost (containing data/ and xlcost.py dataset builder script) | |