Upload 2 files
Browse files- CAPMIT1003.py +2 -20
- README.md +56 -0
CAPMIT1003.py
CHANGED
|
@@ -8,11 +8,11 @@ import pandas as pd
|
|
| 8 |
import sqlite3
|
| 9 |
import datasets
|
| 10 |
|
| 11 |
-
_CITATION = """
|
| 12 |
title={Contrastive Language-Image Pretrained Models are Zero-Shot Human Scanpath Predictors},
|
| 13 |
author={Zanca, Dario and Zugarini, Andrea and Dietz, Simon and Altstidl, Thomas R and Ndjeuha, Mark A Turban and Schwinn, Leo and Eskofier, Bjoern},
|
| 14 |
journal={arXiv preprint arXiv:2305.12380},
|
| 15 |
-
year={2023}
|
| 16 |
}"""
|
| 17 |
|
| 18 |
_DESCRIPTION = """CapMIT1003 is a dataset of captions and click-contingent image explorations collected during captioning tasks.
|
|
@@ -90,22 +90,6 @@ class CapMIT1003DB:
|
|
| 90 |
return pd.read_sql_query('SELECT x, y, click_time AS time FROM clicks WHERE obs_uid = ?', self.cnx,
|
| 91 |
params=[obs_uid])
|
| 92 |
|
| 93 |
-
@staticmethod
|
| 94 |
-
def download_images(quiet=False):
|
| 95 |
-
""" Download stimuli images for MIT1003.
|
| 96 |
-
|
| 97 |
-
Parameters
|
| 98 |
-
----------
|
| 99 |
-
quiet: bool
|
| 100 |
-
Flag that suppresses command-line outputs.
|
| 101 |
-
"""
|
| 102 |
-
if not os.path.exists('mit1003'):
|
| 103 |
-
if not os.path.exists('mit1003.zip'):
|
| 104 |
-
print('Downloading MIT1003 Stimuli') if not quiet else None
|
| 105 |
-
urlretrieve(MIT1003_URL, 'mit1003.zip')
|
| 106 |
-
print('Extracting MIT1003 Stimuli') if not quiet else None
|
| 107 |
-
unpack_archive('mit1003.zip', 'mit1003')
|
| 108 |
-
|
| 109 |
|
| 110 |
class CapMIT1003(datasets.GeneratorBasedBuilder):
|
| 111 |
_URLS = [MIT1003_URL]
|
|
@@ -119,7 +103,6 @@ class CapMIT1003(datasets.GeneratorBasedBuilder):
|
|
| 119 |
"usr_uid": datasets.Value("string"),
|
| 120 |
"caption": datasets.Value("string"),
|
| 121 |
"image": datasets.Image(),
|
| 122 |
-
"start_time": datasets.Value("timestamp[s]"),
|
| 123 |
"clicks_path": datasets.Sequence(datasets.Sequence(datasets.Value("int32"), length=2)),
|
| 124 |
"clicks_time": datasets.Sequence(datasets.Value("timestamp[s]"))
|
| 125 |
}
|
|
@@ -154,7 +137,6 @@ class CapMIT1003(datasets.GeneratorBasedBuilder):
|
|
| 154 |
"usr_uid": pair.usr_uid,
|
| 155 |
"image": pair.img_path,
|
| 156 |
"caption": pair.caption,
|
| 157 |
-
"start_time": pair.start_time,
|
| 158 |
"clicks_path": xy_coordinates,
|
| 159 |
"clicks_time": clicks_time
|
| 160 |
}
|
|
|
|
| 8 |
import sqlite3
|
| 9 |
import datasets
|
| 10 |
|
| 11 |
+
_CITATION = """@article{zanca2023contrastive,
|
| 12 |
title={Contrastive Language-Image Pretrained Models are Zero-Shot Human Scanpath Predictors},
|
| 13 |
author={Zanca, Dario and Zugarini, Andrea and Dietz, Simon and Altstidl, Thomas R and Ndjeuha, Mark A Turban and Schwinn, Leo and Eskofier, Bjoern},
|
| 14 |
journal={arXiv preprint arXiv:2305.12380},
|
| 15 |
+
year={2023}
|
| 16 |
}"""
|
| 17 |
|
| 18 |
_DESCRIPTION = """CapMIT1003 is a dataset of captions and click-contingent image explorations collected during captioning tasks.
|
|
|
|
| 90 |
return pd.read_sql_query('SELECT x, y, click_time AS time FROM clicks WHERE obs_uid = ?', self.cnx,
|
| 91 |
params=[obs_uid])
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
class CapMIT1003(datasets.GeneratorBasedBuilder):
|
| 95 |
_URLS = [MIT1003_URL]
|
|
|
|
| 103 |
"usr_uid": datasets.Value("string"),
|
| 104 |
"caption": datasets.Value("string"),
|
| 105 |
"image": datasets.Image(),
|
|
|
|
| 106 |
"clicks_path": datasets.Sequence(datasets.Sequence(datasets.Value("int32"), length=2)),
|
| 107 |
"clicks_time": datasets.Sequence(datasets.Value("timestamp[s]"))
|
| 108 |
}
|
|
|
|
| 137 |
"usr_uid": pair.usr_uid,
|
| 138 |
"image": pair.img_path,
|
| 139 |
"caption": pair.caption,
|
|
|
|
| 140 |
"clicks_path": xy_coordinates,
|
| 141 |
"clicks_time": clicks_time
|
| 142 |
}
|
README.md
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
dataset_info:
|
| 3 |
+
features:
|
| 4 |
+
- name: obs_uid
|
| 5 |
+
dtype: string
|
| 6 |
+
- name: usr_uid
|
| 7 |
+
dtype: string
|
| 8 |
+
- name: caption
|
| 9 |
+
dtype: string
|
| 10 |
+
- name: image
|
| 11 |
+
dtype: image
|
| 12 |
+
- name: clicks_path
|
| 13 |
+
sequence:
|
| 14 |
+
sequence: int32
|
| 15 |
+
length: 2
|
| 16 |
+
- name: clicks_time
|
| 17 |
+
sequence: timestamp[s]
|
| 18 |
+
splits:
|
| 19 |
+
- name: train
|
| 20 |
+
num_bytes: 1611467
|
| 21 |
+
num_examples: 3848
|
| 22 |
+
download_size: 241443505
|
| 23 |
+
dataset_size: 1611467
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
### Dataset Description
|
| 27 |
+
CapMIT1003 is a dataset of captions and click-contingent image explorations collected during captioning tasks.
|
| 28 |
+
CapMIT1003 is based on the same stimuli from the well-known MIT1003 benchmark, for which eye-tracking data
|
| 29 |
+
under free-viewing conditions is available, which offers a promising opportunity to concurrently study human attention under both tasks.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
### Usage
|
| 33 |
+
You can load CapMIT1003 as follows:
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
from datasets import load_dataset
|
| 38 |
+
|
| 39 |
+
capmit1003_dataset = load_dataset("azugarini/CAPMIT1003", trust_remote_code=True)
|
| 40 |
+
print(capmit1003_dataset["train"][0]) #print first example
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
### Citation Information
|
| 48 |
+
If you use this dataset in your research or work, please cite the following paper:
|
| 49 |
+
|
| 50 |
+
```
|
| 51 |
+
@article{zanca2023contrastive,
|
| 52 |
+
title={Contrastive Language-Image Pretrained Models are Zero-Shot Human Scanpath Predictors},
|
| 53 |
+
author={Zanca, Dario and Zugarini, Andrea and Dietz, Simon and Altstidl, Thomas R and Ndjeuha, Mark A Turban and Schwinn, Leo and Eskofier, Bjoern},
|
| 54 |
+
journal={arXiv preprint arXiv:2305.12380},
|
| 55 |
+
year={2023}
|
| 56 |
+
```
|