| import random |
| from glob import glob |
|
|
| from datasets import load_dataset, Dataset, DatasetDict |
|
|
| |
| token_files = glob('tokenized/*.tokens') |
| total_files = len(token_files) |
|
|
| print(f"Found {total_files} token files") |
|
|
| |
| train_size = 23 |
| dev_size = 8 |
| test_size = 8 |
|
|
| |
| if total_files < (train_size + dev_size + test_size): |
| print(f"Warning: Not enough files ({total_files}) for the requested split sizes.") |
| |
| total_requested = train_size + dev_size + test_size |
| train_size = int(total_files * (train_size / total_requested)) |
| dev_size = int(total_files * (dev_size / total_requested)) |
| test_size = total_files - train_size - dev_size |
|
|
| |
| random.seed(42) |
| random.shuffle(token_files) |
|
|
| |
| train_files = token_files[:train_size] |
| dev_files = token_files[train_size:train_size + dev_size] |
| test_files = token_files[train_size + dev_size:train_size + dev_size + test_size] |
|
|
| |
| def process_files(file_list): |
| result = [] |
| for file in file_list: |
| tokens = [] |
| ner_tags = [] |
| |
| with open(file, 'r') as f: |
| for line in f: |
| line = line.strip() |
| |
| |
| if not line: |
| if tokens: |
| result.append({ |
| "tokens": tokens, |
| "ner_tags": ner_tags, |
| "file_name": file |
| }) |
| tokens = [] |
| ner_tags = [] |
| continue |
| |
| |
| parts = line.split() |
| |
| |
| if len(parts) >= 3: |
| token = parts[0] |
| ner_tag = parts[2] |
| |
| tokens.append(token) |
| ner_tags.append(ner_tag) |
| |
| |
| if tokens: |
| result.append({ |
| "tokens": tokens, |
| "ner_tags": ner_tags, |
| "file_name": file |
| }) |
| |
| return result |
|
|
| |
| train_data = process_files(train_files) |
| dev_data = process_files(dev_files) |
| test_data = process_files(test_files) |
|
|
| |
| train_dataset = Dataset.from_list(train_data) |
| dev_dataset = Dataset.from_list(dev_data) |
| test_dataset = Dataset.from_list(test_data) |
|
|
| |
| dataset_dict = DatasetDict({ |
| "train": train_dataset, |
| "validation": dev_dataset, |
| "test": test_dataset |
| }) |
|
|
| |
| print(f"Train split: {len(train_data)} files") |
| print(f"Validation split: {len(dev_data)} files") |
| print(f"Test split: {len(test_data)} files") |
| print(f"Dataset features: {train_dataset.features}") |
|
|
| |
| dataset_dict.push_to_hub('extraordinarylab/malware-text-db') |