|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import itertools |
|
|
import json |
|
|
import re |
|
|
import sqlite3 |
|
|
from abc import abstractmethod |
|
|
from functools import partial |
|
|
from hashlib import md5 |
|
|
from multiprocessing import Pool |
|
|
from pathlib import Path |
|
|
from typing import Any, Dict, List, Optional, Iterator |
|
|
import pyarrow as pa |
|
|
|
|
|
from datetime import datetime |
|
|
from urllib.parse import unquote_plus |
|
|
|
|
|
import datasets |
|
|
from datasets import load_dataset, Dataset |
|
|
from langdetect import detect |
|
|
|
|
|
|
|
|
_CITATION = """""" |
|
|
|
|
|
|
|
|
_DESCRIPTION = """""" |
|
|
|
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
_FEATURES = datasets.Features( |
|
|
{ |
|
|
|
|
|
"image_url": datasets.Value("string"), |
|
|
"image": datasets.Image(), |
|
|
|
|
|
"texts": [datasets.Value("string")], |
|
|
|
|
|
"source": datasets.Value("string"), |
|
|
|
|
|
"meta": datasets.Value("string"), |
|
|
} |
|
|
) |
|
|
|
|
|
def json_serializer(o): |
|
|
if isinstance(o, datetime): |
|
|
return str(o) |
|
|
|
|
|
raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable") |
|
|
|
|
|
|
|
|
class BaseLoader: |
|
|
def __init__(self, source: str, split: str, writer_batch_size: int): |
|
|
self.source = source |
|
|
self.split = split |
|
|
self.writer_batch_size = writer_batch_size |
|
|
|
|
|
@abstractmethod |
|
|
def _generate_batches(self): |
|
|
raise NotImplementedError() |
|
|
|
|
|
|
|
|
class DatasetsLoader(BaseLoader): |
|
|
"""Helper as some datasets are already implemented""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dataset_name: str, |
|
|
config_name: Optional[str], |
|
|
split: str, |
|
|
num_proc: int, |
|
|
datasets_batch_size: int = 1000, |
|
|
): |
|
|
super(DatasetsLoader, self).__init__( |
|
|
source=dataset_name, split=split, writer_batch_size=datasets_batch_size |
|
|
) |
|
|
self.dataset_name = dataset_name |
|
|
self.config_name = config_name |
|
|
self.num_proc = num_proc |
|
|
self.datasets_batch_size = datasets_batch_size |
|
|
|
|
|
@abstractmethod |
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
"""Return list of caster rows. Casted row are either PMD features""" |
|
|
raise NotImplementedError() |
|
|
|
|
|
def _generate_batch_table(self, batch_start: int, dset: Dataset) -> pa.Table: |
|
|
dataset_size = len(dset) |
|
|
batch_end = min(batch_start + self.datasets_batch_size, dataset_size) |
|
|
batch = dset[batch_start:batch_end] |
|
|
output_batch = self.cast_to_pmd_features(batch) |
|
|
return pa.table(_FEATURES.encode_batch(output_batch)) |
|
|
|
|
|
def _generate_batches(self): |
|
|
dataset = load_dataset(self.dataset_name, self.config_name, split=self.split) |
|
|
dataset_size = len(dataset) |
|
|
|
|
|
|
|
|
if self.num_proc == 1: |
|
|
for batch_start in range(0, dataset_size, self.datasets_batch_size): |
|
|
batch_casted_pmd_features = self._generate_batch_table( |
|
|
dset=dataset, batch_start=batch_start |
|
|
) |
|
|
yield batch_casted_pmd_features |
|
|
|
|
|
|
|
|
else: |
|
|
assert self.num_proc > 1 |
|
|
with Pool(self.num_proc) as pool: |
|
|
|
|
|
batch_iterator = pool.imap( |
|
|
partial(self._generate_batch_table, dset=dataset), |
|
|
range(0, dataset_size, self.datasets_batch_size), |
|
|
) |
|
|
for batch_casted_pmd_features in batch_iterator: |
|
|
yield batch_casted_pmd_features |
|
|
|
|
|
|
|
|
class BaseLoaderWithDLManager(BaseLoader): |
|
|
"""We use dl_manager to generate `gen_kwargs` needed in order to generate examples.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
source: str, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int = 10_000, |
|
|
): |
|
|
super(BaseLoaderWithDLManager, self).__init__( |
|
|
source=source, split=split, writer_batch_size=writer_batch_size |
|
|
) |
|
|
self.gen_kwargs = self.generate_gen_kwargs(dl_manager) |
|
|
|
|
|
self.chunk_size = chunk_size |
|
|
self.num_proc = num_proc |
|
|
|
|
|
@abstractmethod |
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
raise NotImplementedError() |
|
|
|
|
|
@abstractmethod |
|
|
def _build_rows_iterator(self, chunk_size: int, **kwargs) -> Iterator[List[Any]]: |
|
|
raise NotImplementedError() |
|
|
|
|
|
@abstractmethod |
|
|
def _generate_examples(self, examples: List[Any], **kwargs) -> Dict[str, List[Any]]: |
|
|
raise NotImplementedError |
|
|
|
|
|
def _generate_tables(self, examples: List[Any], **kwargs) -> pa.Table: |
|
|
return pa.table(_FEATURES.encode_batch(self._generate_examples(examples, **kwargs))) |
|
|
|
|
|
def _generate_batches(self): |
|
|
rows_iterator = self._build_rows_iterator(chunk_size=self.chunk_size, **self.gen_kwargs) |
|
|
|
|
|
with Pool(self.num_proc) as pool: |
|
|
tables_iterator = pool.imap( |
|
|
partial(self._generate_tables, **self.gen_kwargs), |
|
|
rows_iterator, |
|
|
chunksize=1, |
|
|
) |
|
|
for table in tables_iterator: |
|
|
yield table |
|
|
|
|
|
|
|
|
class COCOloader(BaseLoaderWithDLManager): |
|
|
|
|
|
_ANNOTATION_URL = ( |
|
|
"http://images.cocodataset.org/annotations/annotations_trainval2017.zip" |
|
|
) |
|
|
_IMAGES_URLS = { |
|
|
"train": "http://images.cocodataset.org/zips/train2017.zip", |
|
|
"validation": "http://images.cocodataset.org/zips/val2017.zip", |
|
|
} |
|
|
_SPLIT_MAP = {"train": "train2017", "validation": "val207"} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(COCOloader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="coco", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
annotation_file = ( |
|
|
Path(dl_manager.download_and_extract(self._ANNOTATION_URL)) |
|
|
/ "annotations" |
|
|
/ f"captions_{self._SPLIT_MAP[self.split]}.json" |
|
|
) |
|
|
image_folder = Path( |
|
|
dl_manager.download_and_extract(self._IMAGES_URLS[self.split]) |
|
|
) |
|
|
return { |
|
|
"annotation_file": annotation_file, |
|
|
"base_image_path": image_folder / self._SPLIT_MAP[self.split], |
|
|
} |
|
|
|
|
|
def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]: |
|
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
|
annotations = json.load(fi) |
|
|
|
|
|
|
|
|
annotations_per_image_id = {} |
|
|
for annotation in annotations["annotations"]: |
|
|
image_id = annotation["image_id"] |
|
|
if image_id in annotations_per_image_id: |
|
|
annotations_per_image_id[image_id].append(annotation) |
|
|
else: |
|
|
annotations_per_image_id[image_id] = [annotation] |
|
|
|
|
|
|
|
|
buffer = [] |
|
|
for image_metadata in annotations["images"]: |
|
|
image_id = image_metadata["id"] |
|
|
for annotation in annotations_per_image_id[image_id]: |
|
|
buffer.append({ |
|
|
"annotation": annotation, |
|
|
"image_metadata": image_metadata |
|
|
}) |
|
|
if len(buffer) == chunk_size: |
|
|
yield buffer |
|
|
buffer = [] |
|
|
|
|
|
if len(buffer) > 0: |
|
|
yield buffer |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, List[Any]]: |
|
|
return { |
|
|
"image_url": [None for _ in examples], |
|
|
"image": [str((base_image_path / f"{example['image_metadata']['id']:012}.jpg").absolute()) for example in examples], |
|
|
"texts": [example["annotation"]["caption"] for example in examples], |
|
|
"source": [self.source for _ in examples], |
|
|
"meta": [json.dumps( |
|
|
example, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) for example in examples], |
|
|
} |
|
|
|
|
|
|
|
|
class SBULoader(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(SBULoader, self).__init__( |
|
|
dataset_name="sbu_captions", |
|
|
config_name=None, |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]} |
|
|
batch_size = len(next(iter(batch.values()))) |
|
|
return { |
|
|
"image_url": batch["image_url"], |
|
|
"image": [None] * batch_size, |
|
|
"texts": [[caption] for caption in batch["caption"]], |
|
|
"source": [self.source] * batch_size, |
|
|
"meta": [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(batch_size) |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
class LocalizedNarrativesOpenImagesLoader(BaseLoaderWithDLManager): |
|
|
_ANNOTATION_URLs = { |
|
|
"train": "https://storage.googleapis.com/localized-narratives/annotations/open_images_train_v6_captions.jsonl", |
|
|
"validation": ( |
|
|
"https://storage.googleapis.com/localized-narratives/annotations/open_images_validation_captions.jsonl" |
|
|
), |
|
|
"test": "https://storage.googleapis.com/localized-narratives/annotations/open_images_test_captions.jsonl", |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(LocalizedNarrativesOpenImagesLoader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="localized_narratives__coco", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split]) |
|
|
return {"annotation_file": annotation_file, "split": self.split} |
|
|
|
|
|
def _build_rows_iterator(self, chunk_size: int, annotation_file: str, split: str) -> Iterator[List[Any]]: |
|
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
|
chunk = tuple(itertools.islice(fi, chunk_size)) |
|
|
|
|
|
if not chunk: |
|
|
return |
|
|
|
|
|
yield chunk |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], annotation_file: str, split: str) -> Dict[str, List[Any]]: |
|
|
annotations = [json.loads(line) for line in examples] |
|
|
|
|
|
|
|
|
for annotation in annotations: |
|
|
assert "image_url" not in annotation |
|
|
|
|
|
return { |
|
|
"image_url": [f"https://s3.amazonaws.com/open-images-dataset/{split}/{annotation['image_id']}.jpg" for annotation in annotations], |
|
|
"image": [None for _ in annotations], |
|
|
"texts": [[annotation["caption"]] for annotation in annotations], |
|
|
"source": [self.source for _ in annotations], |
|
|
"meta": [json.dumps( |
|
|
annotation, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) for annotation in annotations], |
|
|
} |
|
|
|
|
|
|
|
|
class LocalizedNarrativesCOCOLoader(BaseLoaderWithDLManager): |
|
|
|
|
|
_ANNOTATION_URLs = { |
|
|
"train": "https://storage.googleapis.com/localized-narratives/annotations/coco_train_captions.jsonl", |
|
|
"validation": "https://storage.googleapis.com/localized-narratives/annotations/coco_val_captions.jsonl", |
|
|
} |
|
|
_IMAGES_URLS = { |
|
|
"train": "http://images.cocodataset.org/zips/train2017.zip", |
|
|
"validation": "http://images.cocodataset.org/zips/val2017.zip", |
|
|
} |
|
|
_SPLIT_MAP = {"train": "train2017", "validation": "val207"} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(LocalizedNarrativesCOCOLoader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="localized_narratives__coco", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split]) |
|
|
image_folder = Path( |
|
|
dl_manager.download_and_extract(self._IMAGES_URLS[self.split]) |
|
|
) |
|
|
return { |
|
|
"annotation_file": annotation_file, |
|
|
"base_image_path": image_folder / self._SPLIT_MAP[self.split], |
|
|
} |
|
|
|
|
|
def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]: |
|
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
|
chunk = tuple(itertools.islice(fi, chunk_size)) |
|
|
|
|
|
if not chunk: |
|
|
return |
|
|
|
|
|
yield chunk |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, List[Any]]: |
|
|
annotations = [json.loads(line) for line in examples] |
|
|
|
|
|
return { |
|
|
"image_url": [None for _ in examples], |
|
|
"image": [str((base_image_path / f"{annotation['image_id'].zfill(12)}.jpg").absolute()) for annotation in annotations], |
|
|
"texts": [[annotation["caption"]] for annotation in annotations], |
|
|
"source": [self.source for _ in annotations], |
|
|
"meta": [json.dumps( |
|
|
annotation, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) for annotation in annotations], |
|
|
} |
|
|
|
|
|
class LocalizedNarrativesFlickr30kLoader(BaseLoaderWithDLManager): |
|
|
_LOCAL_IMAGE_FOLDER_NAME = "flickr30k-images" |
|
|
_ANNOTATION_URLs = { |
|
|
"train": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_train_captions.jsonl", |
|
|
"validation": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_val_captions.jsonl", |
|
|
"test": "https://storage.googleapis.com/localized-narratives/annotations/flickr30k_test_captions.jsonl", |
|
|
} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(LocalizedNarrativesFlickr30kLoader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="localized_narratives__flickr30k", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
if dl_manager.manual_dir is None: |
|
|
raise FileNotFoundError( |
|
|
f"Please set manual dir via `datasets.load_dataset('pmd', data_dir={{PATH}})` where `{{PATH}}/flickr30k` includes `{self._LOCAL_IMAGE_FOLDER_NAME}`.\n. Manual download instructions: {self.manual_download_instruction}" |
|
|
) |
|
|
|
|
|
manual_dir = Path(dl_manager.manual_dir) / "flickr30k" |
|
|
if not manual_dir.exists(): |
|
|
raise FileNotFoundError( |
|
|
f"Please set manual dir via `datasets.load_dataset('pmd', data_dir={{PATH}})` where `{{PATH}}/flickr30k` includes `{self._LOCAL_IMAGE_FOLDER_NAME}`.\n. Manual download instructions: {self.manual_download_instruction}" |
|
|
) |
|
|
|
|
|
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split]) |
|
|
|
|
|
return {"annotation_file": annotation_file, "base_image_path": manual_dir} |
|
|
|
|
|
@property |
|
|
def manual_download_instruction(self): |
|
|
return """\ |
|
|
You need to go to http://shannon.cs.illinois.edu/DenotationGraph/data/index.html, |
|
|
and manually download the dataset ("Flickr 30k images."). Once it is completed, |
|
|
a file named `flickr30k-images.tar.gz` will appear in your Downloads folder |
|
|
or whichever folder your browser chooses to save files to. You then have |
|
|
to unzip the file and move `flickr30k-images` under <path/to/folder>/flickr30k. |
|
|
The <path/to/folder> can e.g. be "~/manual_data". |
|
|
dataset can then be loaded using the following command `datasets.load_dataset("pmd", data_dir="<path/to/folder>")`. |
|
|
""" |
|
|
|
|
|
def _build_rows_iterator(self, chunk_size: int, annotation_file: str, base_image_path: Path) -> Iterator[List[Any]]: |
|
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
|
chunk = tuple(itertools.islice(fi, chunk_size)) |
|
|
|
|
|
if not chunk: |
|
|
return |
|
|
|
|
|
yield chunk |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[ |
|
|
str, List[Any]]: |
|
|
annotations = [json.loads(line) for line in examples] |
|
|
|
|
|
return { |
|
|
"image_url": [None for _ in examples], |
|
|
"image": [str((base_image_path / f"{annotation['image_id']}.jpg").absolute()) for annotation in |
|
|
annotations], |
|
|
"texts": [[annotation["caption"]] for annotation in annotations], |
|
|
"source": [self.source for _ in annotations], |
|
|
"meta": [json.dumps( |
|
|
annotation, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) for annotation in annotations], |
|
|
} |
|
|
|
|
|
|
|
|
class LocalizedNarrativesADE20kLoader(BaseLoaderWithDLManager): |
|
|
_ANNOTATION_URLs = { |
|
|
"train": "https://storage.googleapis.com/localized-narratives/annotations/ade20k_train_captions.jsonl", |
|
|
"validation": "https://storage.googleapis.com/localized-narratives/annotations/ade20k_validation_captions.jsonl", |
|
|
} |
|
|
_IMAGES_URL = ( |
|
|
"http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip" |
|
|
) |
|
|
_SPLIT_MAP = {"train": "training", "validation": "validation"} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(LocalizedNarrativesADE20kLoader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="localized_narratives__ADE20k", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
annotation_file = dl_manager.download(self._ANNOTATION_URLs[self.split]) |
|
|
image_base_dir = ( |
|
|
Path(dl_manager.download_and_extract(self._IMAGES_URL)) |
|
|
/ "ADEChallengeData2016" |
|
|
/ "images" |
|
|
) |
|
|
|
|
|
return { |
|
|
"annotation_file": annotation_file, |
|
|
"base_image_path": image_base_dir / self._SPLIT_MAP[self.split], |
|
|
} |
|
|
|
|
|
def _build_rows_iterator(self, annotation_file: str, base_image_path: Path, chunk_size: int) -> Iterator[List[Any]]: |
|
|
with open(annotation_file, "r", encoding="utf-8") as fi: |
|
|
chunk = tuple(itertools.islice(fi, chunk_size)) |
|
|
|
|
|
if not chunk: |
|
|
return |
|
|
|
|
|
yield chunk |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], annotation_file: str, base_image_path: Path) -> Dict[str, Any]: |
|
|
annotations = [json.loads(line) for line in examples] |
|
|
return { |
|
|
"image_url": [None for _ in examples], |
|
|
"image": [str((base_image_path / f"{annotation['image_id']}.jpg").absolute()) for annotation in annotations], |
|
|
"texts": [annotation["caption"] for annotation in annotations], |
|
|
"source": [self.source for _ in examples], |
|
|
"meta": [json.dumps( |
|
|
annotation, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) for annotation in annotations], |
|
|
} |
|
|
|
|
|
|
|
|
class VisualGenomeLoader(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(VisualGenomeLoader, self).__init__( |
|
|
dataset_name="visual_genome", |
|
|
config_name="region_descriptions_v1.2.0", |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = {k: v for k, v in batch.items() if k not in ["image", "regions"]} |
|
|
input_batch_size = len(next(iter(batch.values()))) |
|
|
serialized_metas = [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(input_batch_size) |
|
|
] |
|
|
output_list_rows = [ |
|
|
{ |
|
|
"image": image.crop( |
|
|
( |
|
|
region["x"], |
|
|
region["y"], |
|
|
region["x"] + region["width"], |
|
|
region["y"] + region["height"], |
|
|
) |
|
|
), |
|
|
"image_url": None, |
|
|
"texts": [region["phrase"]], |
|
|
"source": self.source, |
|
|
"meta": serialized_meta, |
|
|
} |
|
|
for image, regions, serialized_meta in zip( |
|
|
batch["image"], batch["regions"], serialized_metas |
|
|
) |
|
|
for region in regions |
|
|
] |
|
|
return { |
|
|
column_name: [row[column_name] for row in output_list_rows] |
|
|
for column_name in ["image_url", "image", "texts", "source", "meta"] |
|
|
} |
|
|
|
|
|
|
|
|
class WITLoader(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(WITLoader, self).__init__( |
|
|
dataset_name="google/wit", |
|
|
config_name=None, |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = {k: v for k, v in batch.items() if k not in ["image_url"]} |
|
|
batch_size = len(next(iter(batch.values()))) |
|
|
return { |
|
|
"image_url": batch["image_url"], |
|
|
"image": [None] * batch_size, |
|
|
"texts": [ |
|
|
texts |
|
|
|
|
|
for texts in zip( |
|
|
batch["caption_reference_description"], |
|
|
batch["context_section_description"], |
|
|
batch["caption_attribution_description"], |
|
|
) |
|
|
], |
|
|
"source": [self.source] * batch_size, |
|
|
"meta": [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(batch_size) |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
class ConceptualCaptions(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(ConceptualCaptions, self).__init__( |
|
|
dataset_name="conceptual_captions", |
|
|
config_name="unlabeled", |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]} |
|
|
batch_size = len(next(iter(batch.values()))) |
|
|
return { |
|
|
"image_url": batch["image_url"], |
|
|
"image": [None] * batch_size, |
|
|
"texts": [[caption] for caption in batch["caption"]], |
|
|
"source": [self.source] * batch_size, |
|
|
"meta": [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(batch_size) |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
class Conceptual12MLoader(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(Conceptual12MLoader, self).__init__( |
|
|
dataset_name="conceptual_12m", |
|
|
config_name=None, |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = {k: v for k, v in batch.items() if k not in ["image_url", "caption"]} |
|
|
batch_size = len(next(iter(batch.values()))) |
|
|
return { |
|
|
"image_url": batch["image_url"], |
|
|
"image": [None] * batch_size, |
|
|
"texts": [[caption] for caption in batch["caption"]], |
|
|
"source": [self.source] * batch_size, |
|
|
"meta": [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(batch_size) |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
class RedCapsLoader(DatasetsLoader): |
|
|
def __init__(self, split: str, num_proc: int, datasets_batch_size: int = 1000): |
|
|
super(RedCapsLoader, self).__init__( |
|
|
dataset_name="red_caps", |
|
|
config_name="all", |
|
|
split=split, |
|
|
datasets_batch_size=datasets_batch_size, |
|
|
num_proc=num_proc, |
|
|
) |
|
|
|
|
|
def cast_to_pmd_features(self, batch: Dict[str, List[Any]]) -> Dict[str, List[Any]]: |
|
|
metas = { |
|
|
k: v for k, v in batch.items() if k not in ["image_url", "raw_caption"] |
|
|
} |
|
|
batch_size = len(next(iter(batch.values()))) |
|
|
return { |
|
|
"image_url": batch["image_url"], |
|
|
"image": [None] * batch_size, |
|
|
"texts": [[caption] for caption in batch["raw_caption"]], |
|
|
"source": [self.source] * batch_size, |
|
|
"meta": [ |
|
|
json.dumps( |
|
|
{key: value[batch_id] for key, value in metas.items()}, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
) |
|
|
for batch_id in range(batch_size) |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
class YFCC100MLoader(BaseLoaderWithDLManager): |
|
|
_ANNOTATION_URL = "https://multimedia-commons.s3-us-west-2.amazonaws.com/tools/etc/yfcc100m_dataset.sql" |
|
|
|
|
|
_COLUMNS = [ |
|
|
"photoid", |
|
|
"uid", |
|
|
"title", |
|
|
"description", |
|
|
"usertags", |
|
|
"downloadurl", |
|
|
"licensename", |
|
|
"licenseurl", |
|
|
"marker", |
|
|
] |
|
|
|
|
|
_TEXT_COLUMNS = ["title", "description", "usertags"] |
|
|
|
|
|
WHITE_SPACE_REGEX = re.compile(r"\s+") |
|
|
|
|
|
|
|
|
LINE_BREAK_REGEX = re.compile(r"[\n\r]") |
|
|
REMOVE_HTML_TAGS_REGEX = re.compile(r"<.*?>") |
|
|
|
|
|
DATE_HOUR_REGEX = re.compile(r"[0-9](:|\.|-|/)[0-9][0-9](:|\.|-|/)[0-9][0-9]") |
|
|
WEIRD_CHARACTERS_REGEX = re.compile(r"[_©]") |
|
|
SECOND_WORD_REGEX = re.compile(r" [a-zA-Z]+") |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
dl_manager, |
|
|
split: str, |
|
|
num_proc: int, |
|
|
chunk_size: int, |
|
|
writer_batch_size: int, |
|
|
): |
|
|
super(YFCC100MLoader, self).__init__( |
|
|
dl_manager=dl_manager, |
|
|
source="yfcc100m", |
|
|
split=split, |
|
|
num_proc=num_proc, |
|
|
chunk_size=chunk_size, |
|
|
writer_batch_size=writer_batch_size, |
|
|
) |
|
|
self.chunk_size = chunk_size |
|
|
|
|
|
|
|
|
BYTE_MAP = {"%02x" % v: "%x" % v for v in range(256)} |
|
|
|
|
|
@classmethod |
|
|
def yfcc_local_path(cls, url, __bm=BYTE_MAP): |
|
|
h = md5(url.encode("utf-8")).hexdigest() |
|
|
hash_ = "".join(__bm[h[x : x + 2]] for x in range(0, 32, 2)) |
|
|
return f"data/images/{hash_[0:3]}/{hash_[3:6]}/{hash_}.jpg" |
|
|
|
|
|
@classmethod |
|
|
def generate_image_url(cls, downloadurl: str): |
|
|
"""Takes original image url, and download verion store in `multimedia-commons`""" |
|
|
|
|
|
local_path = cls.yfcc_local_path(downloadurl) |
|
|
return f"https://multimedia-commons.s3-us-west-2.amazonaws.com/{local_path}" |
|
|
|
|
|
def generate_gen_kwargs(self, dl_manager): |
|
|
sql_file = dl_manager.download(self._ANNOTATION_URL) |
|
|
return {"sql_file": sql_file} |
|
|
|
|
|
def filter_text(self, text: str) -> bool: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.WEIRD_CHARACTERS_REGEX.search(text) is not None: |
|
|
return False |
|
|
|
|
|
if self.SECOND_WORD_REGEX.search(text) is None: |
|
|
return False |
|
|
|
|
|
if self.DATE_HOUR_REGEX.search(text) is not None: |
|
|
return False |
|
|
|
|
|
|
|
|
try: |
|
|
if detect(text) != "en": |
|
|
return False |
|
|
except Exception: |
|
|
return False |
|
|
|
|
|
return True |
|
|
|
|
|
def clean_text(self, text: str) -> str: |
|
|
"""Inspired from original code""" |
|
|
cleaned_text = unquote_plus(text) |
|
|
cleaned_text = self.LINE_BREAK_REGEX.sub(" ", cleaned_text) |
|
|
cleaned_text = self.REMOVE_HTML_TAGS_REGEX.sub("", cleaned_text) |
|
|
return cleaned_text |
|
|
|
|
|
def get_associated_text(self, annotation: Dict[str, Any]) -> Optional[str]: |
|
|
""" |
|
|
Given an annotation, return text associated to the image |
|
|
We return None when the annotation should be filtered out |
|
|
""" |
|
|
ordered_text_columns_consideration = ["description", "title"] |
|
|
record_text = None |
|
|
for column_name in ordered_text_columns_consideration: |
|
|
text_candidate = annotation[column_name] |
|
|
if column_name == "description" and not (5 < len(text_candidate) < 256): |
|
|
continue |
|
|
cleaned_text_candidate = self.clean_text(text_candidate) |
|
|
if self.filter_text(cleaned_text_candidate): |
|
|
record_text = cleaned_text_candidate |
|
|
break |
|
|
return record_text |
|
|
|
|
|
def _build_rows_iterator(self, sql_file: str, chunk_size: int) -> Iterator[List[Any]]: |
|
|
|
|
|
sql_command = f"select {', '.join(self._COLUMNS)} from yfcc100m_dataset" |
|
|
|
|
|
|
|
|
with sqlite3.connect(sql_file) as connection: |
|
|
cursor = connection.cursor() |
|
|
|
|
|
|
|
|
cursor.execute(sql_command) |
|
|
|
|
|
while True: |
|
|
|
|
|
records = cursor.fetchmany(self.chunk_size) |
|
|
|
|
|
|
|
|
if len(records) == 0: |
|
|
break |
|
|
|
|
|
yield records |
|
|
|
|
|
cursor.close() |
|
|
|
|
|
def _generate_examples(self, examples: List[Any], sql_file: str) -> Dict[str, Any]: |
|
|
buffer = {} |
|
|
|
|
|
for example in examples: |
|
|
annotation = { |
|
|
column_name: value |
|
|
for value, column_name in zip(example, self._COLUMNS) |
|
|
} |
|
|
|
|
|
|
|
|
if annotation["marker"] != 0: |
|
|
continue |
|
|
|
|
|
|
|
|
text = self.get_associated_text(annotation) |
|
|
if text is None: |
|
|
continue |
|
|
|
|
|
for text_column in self._TEXT_COLUMNS: |
|
|
annotation[text_column] = unquote_plus(annotation[text_column]) |
|
|
|
|
|
|
|
|
temp_object = { |
|
|
|
|
|
"image_url": [self.generate_image_url(annotation["downloadurl"])], |
|
|
"image": [None], |
|
|
"texts": [[text]], |
|
|
"source": [self.source], |
|
|
"meta": [json.dumps( |
|
|
annotation, |
|
|
default=json_serializer, |
|
|
indent=2, |
|
|
)], |
|
|
} |
|
|
if len(buffer) == 0: |
|
|
buffer = temp_object |
|
|
else: |
|
|
for column_name in buffer.keys(): |
|
|
buffer[column_name] += temp_object[column_name] |
|
|
return buffer |
|
|
|
|
|
|
|
|
class PMDConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for PMD.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
num_proc: Optional[int] = None, |
|
|
datasets_batch_size: int = 1000, |
|
|
sqlite3_batch_size: int = 10_000, |
|
|
chunk_size: int = 10_000, |
|
|
writer_batch_size: int = 10_000, |
|
|
**kwargs, |
|
|
): |
|
|
if num_proc is None: |
|
|
|
|
|
num_proc = 1 |
|
|
super(PMDConfig, self).__init__(**kwargs) |
|
|
|
|
|
self.datasets_batch_size = datasets_batch_size |
|
|
self.sqlite3_batch_size = sqlite3_batch_size |
|
|
|
|
|
|
|
|
self.num_proc = num_proc |
|
|
self.chunk_size = chunk_size |
|
|
|
|
|
|
|
|
self.writer_batch_size = writer_batch_size |
|
|
|
|
|
|
|
|
class PMD(datasets.ArrowBasedBuilder): |
|
|
"""Builder for Open Images subset of PMD.""" |
|
|
|
|
|
BUILDER_CONFIG_CLASS = PMDConfig |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=_FEATURES, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=split_name, |
|
|
gen_kwargs={ |
|
|
"loaders": [ |
|
|
COCOloader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.chunk_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
SBULoader( |
|
|
split=split_name, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
num_proc=self.config.num_proc, |
|
|
), |
|
|
LocalizedNarrativesOpenImagesLoader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.chunk_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
LocalizedNarrativesCOCOLoader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.chunk_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
LocalizedNarrativesFlickr30kLoader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.chunk_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
LocalizedNarrativesADE20kLoader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.chunk_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
ConceptualCaptions( |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
), |
|
|
VisualGenomeLoader( |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
), |
|
|
WITLoader( |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
), |
|
|
Conceptual12MLoader( |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
), |
|
|
RedCapsLoader( |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
datasets_batch_size=self.config.datasets_batch_size, |
|
|
), |
|
|
YFCC100MLoader( |
|
|
dl_manager=dl_manager, |
|
|
split=split_name, |
|
|
num_proc=self.config.num_proc, |
|
|
chunk_size=self.config.sqlite3_batch_size, |
|
|
writer_batch_size=self.config.writer_batch_size, |
|
|
), |
|
|
] |
|
|
}, |
|
|
) |
|
|
for split_name in [datasets.Split.TRAIN] |
|
|
] |
|
|
|
|
|
def _generate_tables(self, loaders: List[BaseLoader]): |
|
|
idx = 0 |
|
|
print("start") |
|
|
for loader in loaders: |
|
|
print(loader.__class__.__name__) |
|
|
for elt in loader._generate_batches(): |
|
|
yield idx, elt |
|
|
idx += 1 |
|
|
|