Datasets:

Modalities:
Text
Formats:
text
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
M3VIR / Scripts /extract_track1.py
guluthemonster's picture
Upload folder using huggingface_hub
6f5985d verified
import webdataset as wds
import os
import json
from pathlib import Path
import glob
import argparse
def extract_webdataset(input_path, output_path):
"""
Extract all WebDataset tar files back to original directory structure
Args:
input_path: Path to the directory containing WebDataset tar files
output_path: Directory where to extract the dataset
"""
# Create base output directory
os.makedirs(output_path, exist_ok=True)
# Get all tar files
tar_files = sorted(glob.glob(os.path.join(input_path, "*.tar")))
print(f"Found {len(tar_files)} tar files")
sample_count = 0
for tar_idx, tar_file in enumerate(tar_files):
print(f"\nProcessing tar file {tar_idx + 1}/{len(tar_files)}: {os.path.basename(tar_file)}")
# Initialize WebDataset reader for current tar file
dataset = wds.WebDataset(
tar_file,
shardshuffle=False,
empty_check=False
)
for sample in dataset:
try:
# Get path information
path_info = json.loads(sample['path_info.json'])
# Reconstruct directory structure
category = path_info['category']
scene = path_info['scene']
style = path_info['style']
resolution = path_info['resolution']
# Create directories for each image type
depth_dir = os.path.join(output_path, category, scene, style,
resolution, "Depth_images")
id_dir = os.path.join(output_path, category, scene, style,
resolution, "ID_images")
rgb_dir = os.path.join(output_path, category, scene, style,
resolution, "RGB_images")
# Create directories if they don't exist
os.makedirs(depth_dir, exist_ok=True)
os.makedirs(id_dir, exist_ok=True)
os.makedirs(rgb_dir, exist_ok=True)
# Get base filename from key
base_name = sample['__key__'].split('/')[-1]
# Save Depth files
depth_exr_path = os.path.join(depth_dir, f"{base_name}.exr")
depth_json_path = os.path.join(depth_dir, f"{base_name}.json")
with open(depth_exr_path, 'wb') as f:
f.write(sample['depth.exr'])
with open(depth_json_path, 'w') as f:
json.dump(json.loads(sample['depth.json']), f, indent=4)
# Save ID files
id_png_path = os.path.join(id_dir, f"{base_name}.png")
id_json_path = os.path.join(id_dir, f"{base_name}.json")
with open(id_png_path, 'wb') as f:
f.write(sample['id.png'])
with open(id_json_path, 'w') as f:
json.dump(json.loads(sample['id.json']), f, indent=4)
# Save RGB files
rgb_png_path = os.path.join(rgb_dir, f"{base_name}.png")
rgb_json_path = os.path.join(rgb_dir, f"{base_name}.json")
with open(rgb_png_path, 'wb') as f:
f.write(sample['rgb.png'])
with open(rgb_json_path, 'w') as f:
json.dump(json.loads(sample['rgb.json']), f, indent=4)
sample_count += 1
if sample_count % 100 == 0:
print(f"Extracted {sample_count} samples...")
except Exception as e:
print(f"Error extracting sample {sample['__key__']}: {str(e)}")
continue
print(f"\nExtraction complete. Total samples extracted: {sample_count}")
def verify_extraction(extracted_dir):
"""
Verify the extraction by counting files
"""
print("\nVerifying extraction...")
def count_files(directory):
exr_count = 0
png_count = 0
json_count = 0
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.exr'):
exr_count += 1
elif file.endswith('.png'):
png_count += 1
elif file.endswith('.json'):
json_count += 1
return {'exr': exr_count, 'png': png_count, 'json': json_count}
# Count files in extracted directory
extracted_counts = count_files(extracted_dir)
print("\nExtracted file counts:")
print(f" EXR files: {extracted_counts['exr']}")
print(f" PNG files: {extracted_counts['png']}")
print(f" JSON files: {extracted_counts['json']}")
# Basic sanity check
if extracted_counts['exr'] == 0 or extracted_counts['png'] == 0 or extracted_counts['json'] == 0:
print("\nWARNING: Some file types have zero count!")
else:
print("\nAll file types present in extraction.")
if __name__ == "__main__":
# Add argument parser
parser = argparse.ArgumentParser(description='Extract WebDataset back to original format')
parser.add_argument('--input_path', type=str, required=True,
help='Directory containing WebDataset tar files')
parser.add_argument('--output_path', type=str, required=True,
help='Directory where to extract the dataset')
parser.add_argument('--verify', action='store_true',
help='Verify the extraction after completion')
args = parser.parse_args()
print(f"WebDataset path: {args.input_path}")
print(f"Output directory: {args.output_path}")
print("\nStarting extraction...")
extract_webdataset(args.input_path, args.output_path)
if args.verify:
verify_extraction(args.output_path)