|
|
"""Sanskrit OCR Post-Correction Dataset"""
|
|
|
|
|
|
import csv
|
|
|
import os
|
|
|
import datasets
|
|
|
|
|
|
_CITATION = """\
|
|
|
@inproceedings{maheshwari2022benchmark,
|
|
|
title={A Benchmark and Dataset for Post-OCR text correction in Sanskrit},
|
|
|
author={Maheshwari, Ayush and Singh, Nikhil and Krishna, Amrith and Ramakrishnan, Ganesh},
|
|
|
booktitle={Findings of the Association for Computational Linguistics: EMNLP 2022},
|
|
|
pages={6258--6265},
|
|
|
year={2022}
|
|
|
}
|
|
|
"""
|
|
|
|
|
|
_DESCRIPTION = """\
|
|
|
A Benchmark and Dataset for Post-OCR text correction in Sanskrit.
|
|
|
|
|
|
This dataset contains manually post-edited OCR data for Sanskrit texts in Devanagari script.
|
|
|
It includes:
|
|
|
- Train/Validation/Test splits with OCR text and corrected ground truth
|
|
|
- An out-of-domain test set of 500 sentences
|
|
|
- Source texts from classical Sanskrit works including Brahmasutra Bhashyam, Grahalaghava, and Goladhyaya
|
|
|
"""
|
|
|
|
|
|
_HOMEPAGE = "https://github.com/ayushbits/pe-ocr-sanskrit"
|
|
|
|
|
|
_LICENSE = "MIT"
|
|
|
|
|
|
_URLS = {
|
|
|
"train": "train_devnagari.csv",
|
|
|
"validation": "val_devnagari.csv",
|
|
|
"test": "test_devnagari.csv",
|
|
|
"ood_test": "ood-test.csv",
|
|
|
}
|
|
|
|
|
|
|
|
|
class SanskritOCRPostCorrection(datasets.GeneratorBasedBuilder):
|
|
|
"""Sanskrit OCR Post-Correction Dataset"""
|
|
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
|
|
BUILDER_CONFIGS = [
|
|
|
datasets.BuilderConfig(
|
|
|
name="default",
|
|
|
version=VERSION,
|
|
|
description="Sanskrit OCR post-correction dataset with all splits",
|
|
|
),
|
|
|
]
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default"
|
|
|
|
|
|
def _info(self):
|
|
|
features = datasets.Features(
|
|
|
{
|
|
|
"input_text": datasets.Value("string"),
|
|
|
"target_text": datasets.Value("string"),
|
|
|
}
|
|
|
)
|
|
|
|
|
|
return datasets.DatasetInfo(
|
|
|
description=_DESCRIPTION,
|
|
|
features=features,
|
|
|
homepage=_HOMEPAGE,
|
|
|
license=_LICENSE,
|
|
|
citation=_CITATION,
|
|
|
)
|
|
|
|
|
|
def _split_generators(self, dl_manager):
|
|
|
"""Returns SplitGenerators."""
|
|
|
data_files = dl_manager.download_and_extract(_URLS)
|
|
|
|
|
|
return [
|
|
|
datasets.SplitGenerator(
|
|
|
name=datasets.Split.TRAIN,
|
|
|
gen_kwargs={
|
|
|
"filepath": data_files["train"],
|
|
|
"split": "train",
|
|
|
},
|
|
|
),
|
|
|
datasets.SplitGenerator(
|
|
|
name=datasets.Split.VALIDATION,
|
|
|
gen_kwargs={
|
|
|
"filepath": data_files["validation"],
|
|
|
"split": "validation",
|
|
|
},
|
|
|
),
|
|
|
datasets.SplitGenerator(
|
|
|
name=datasets.Split.TEST,
|
|
|
gen_kwargs={
|
|
|
"filepath": data_files["test"],
|
|
|
"split": "test",
|
|
|
},
|
|
|
),
|
|
|
datasets.SplitGenerator(
|
|
|
name="ood_test",
|
|
|
gen_kwargs={
|
|
|
"filepath": data_files["ood_test"],
|
|
|
"split": "ood_test",
|
|
|
},
|
|
|
),
|
|
|
]
|
|
|
|
|
|
def _generate_examples(self, filepath, split):
|
|
|
"""Yields examples."""
|
|
|
|
|
|
|
|
|
delimiter = ";" if split == "ood_test" else ","
|
|
|
|
|
|
with open(filepath, encoding="utf-8") as f:
|
|
|
reader = csv.DictReader(f, delimiter=delimiter)
|
|
|
for idx, row in enumerate(reader):
|
|
|
yield idx, {
|
|
|
"input_text": row["input_text"],
|
|
|
"target_text": row["target_text"],
|
|
|
}
|
|
|
|