Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
arc_agi_v1 / arc_to_my_hf.py
Sinjhin's picture
getting the data right 1 (#2)
de87c48 verified
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.12,<3.14"
# dependencies = [
# "datasets",
# "pyarrow",
# ]
# ///
import json
from pathlib import Path
from typing import Dict
import argparse
from datasets import Dataset, DatasetDict, load_dataset
class ARCToHFConverter:
"""Converts ARC-AGI task JSON files to HuggingFace Arrow format."""
def __init__(self, input_dir: Path):
self.input_dir = Path(input_dir)
self.output_dir = self.input_dir.parent / f"hf_{self.input_dir.name}"
def load_task(self, json_path: Path) -> Dict:
"""Load single task JSON file."""
with open(json_path, 'r') as f:
return json.load(f)
def convert_task(self, task_data: Dict, task_id: str) -> Dict:
"""Convert single task to HF schema.
Returns:
{
"id": str,
"list": [
[grid, grid, ...], # example inputs
[grid, grid, ...], # example outputs
[grid, ...] # test inputs
],
"label": [grid, ...] # test outputs
}
"""
return {
"id": task_id,
"list": [
[ex["input"] for ex in task_data["train"]], # index 0: example inputs
[ex["output"] for ex in task_data["train"]], # index 1: example outputs
[ex["input"] for ex in task_data["test"]] # index 2: test inputs
],
"label": [ex["output"] for ex in task_data["test"]] # test outputs
}
def convert_directory(self, subdir_name: str) -> Dataset:
"""Convert all JSON files in a subdirectory to HF Dataset."""
subdir = self.input_dir / subdir_name
json_files = sorted(subdir.glob("*.json"))
print(f"Converting {subdir_name}/ directory ({len(json_files)} tasks)...")
tasks = []
for json_path in json_files:
task_id = json_path.stem # filename without .json
task_data = self.load_task(json_path)
converted = self.convert_task(task_data, task_id)
tasks.append(converted)
return Dataset.from_list(tasks)
def convert_all(self) -> DatasetDict:
"""Convert both training and evaluation subdirectories."""
train_dataset = self.convert_directory("training")
test_dataset = self.convert_directory("evaluation")
return DatasetDict({
"train": train_dataset,
"test": test_dataset
})
def save(self, dataset_dict: DatasetDict):
"""Save dataset to disk in Parquet format for HuggingFace Hub."""
# Create output directory structure
self.output_dir.mkdir(parents=True, exist_ok=True)
data_dir = self.output_dir / "data"
data_dir.mkdir(exist_ok=True)
# Export to parquet files (HuggingFace Hub standard format)
print(f"Saving train split to {data_dir / 'train-00000-of-00001.parquet'}...")
dataset_dict['train'].to_parquet(data_dir / 'train-00000-of-00001.parquet')
print(f"Saving test split to {data_dir / 'test-00000-of-00001.parquet'}...")
dataset_dict['test'].to_parquet(data_dir / 'test-00000-of-00001.parquet')
print(f"\n✓ Dataset saved to {self.output_dir}")
print(f" - Train: {len(dataset_dict['train'])} examples")
print(f" - Test: {len(dataset_dict['test'])} examples")
def look_at_data():
# Load the dataset from parquet files
print("Loading dataset from parquet files...")
dataset = load_dataset('parquet', data_files={
'train': 'data/train-00000-of-00001.parquet',
'test': 'data/test-00000-of-00001.parquet'
})
print("\nDataset loaded successfully!")
print(f"Splits: {list(dataset.keys())}")
print(f"Train size: {len(dataset['train'])}")
print(f"Test size: {len(dataset['test'])}")
print(f"\nFeatures: {dataset['train'].features}")
print(f"\nFirst example ID: {dataset['train'][0]['id']}")
def main():
parser = argparse.ArgumentParser(
description="Convert ARC-AGI JSON tasks to HuggingFace dataset"
)
parser.add_argument(
"input_dir",
type=str,
help="Parent directory containing training/ and evaluation/ subdirectories"
)
args = parser.parse_args()
print(f"Input directory: {args.input_dir}")
converter = ARCToHFConverter(args.input_dir)
print(f"Output directory: {converter.output_dir}\n")
dataset_dict = converter.convert_all()
converter.save(dataset_dict)
if __name__ == "__main__":
main()