Datasets:

Modalities:
Text
Formats:
text
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
M3VIR / Scripts /extract_track23.py
guluthemonster's picture
Upload folder using huggingface_hub
6f5985d verified
import webdataset as wds
import os
import json
import glob
import argparse
def extract_webdataset(webdataset_path, output_dir):
"""
Extract all WebDataset tar files back to original directory structure
Args:
webdataset_path: Path to the directory containing WebDataset tar files
output_dir: Directory where to extract the dataset
"""
os.makedirs(output_dir, exist_ok=True)
# Get all tar files in the directory
tar_files = sorted(glob.glob(os.path.join(webdataset_path, "*.tar")))
if not tar_files:
print(f"No tar files found in {webdataset_path}")
return
print(f"Found {len(tar_files)} tar files")
total_sample_count = 0
for tar_file in tar_files:
print(f"\nProcessing {os.path.basename(tar_file)}")
# Initialize WebDataset reader for current tar file
dataset = wds.WebDataset(
tar_file,
shardshuffle=False,
empty_check=False
)
sample_count = 0
for sample in dataset:
try:
# Get path information
path_info = json.loads(sample['path_info.json'])
category = path_info['category']
scene = path_info['scene']
style = path_info['style']
resolution = path_info['resolution']
view = path_info['view']
# Create base directory structure
base_dir = os.path.join(output_dir, category, scene, style, resolution)
# Create image type directories
depth_dir = os.path.join(base_dir, "Depth_images")
id_dir = os.path.join(base_dir, "ID_images")
rgb_dir = os.path.join(base_dir, "RGB_images")
# Create view subdirectories under each image type
depth_view_dir = os.path.join(depth_dir, view)
id_view_dir = os.path.join(id_dir, view)
rgb_view_dir = os.path.join(rgb_dir, view)
# Create all necessary directories
os.makedirs(depth_view_dir, exist_ok=True)
os.makedirs(id_view_dir, exist_ok=True)
os.makedirs(rgb_view_dir, exist_ok=True)
# Get base filename from key (excluding view)
base_name = sample['__key__'].split('/')[-2] # Changed to get correct part of key
# Save Depth files
depth_exr_path = os.path.join(depth_view_dir, f"{base_name}.exr")
depth_json_path = os.path.join(depth_view_dir, f"{base_name}.json")
with open(depth_exr_path, 'wb') as f:
f.write(sample['depth.exr'])
with open(depth_json_path, 'w') as f:
json.dump(json.loads(sample['depth.json']), f, indent=4)
# Save ID files
id_png_path = os.path.join(id_view_dir, f"{base_name}.png")
id_json_path = os.path.join(id_view_dir, f"{base_name}.json")
with open(id_png_path, 'wb') as f:
f.write(sample['id.png'])
with open(id_json_path, 'w') as f:
json.dump(json.loads(sample['id.json']), f, indent=4)
# Save RGB files
rgb_png_path = os.path.join(rgb_view_dir, f"{base_name}.png")
rgb_json_path = os.path.join(rgb_view_dir, f"{base_name}.json")
with open(rgb_png_path, 'wb') as f:
f.write(sample['rgb.png'])
with open(rgb_json_path, 'w') as f:
json.dump(json.loads(sample['rgb.json']), f, indent=4)
sample_count += 1
if sample_count % 100 == 0:
print(f"Extracted {sample_count} samples from current tar...")
except Exception as e:
print(f"Error extracting sample {sample['__key__']}: {str(e)}")
continue
total_sample_count += sample_count
print(f"Completed {os.path.basename(tar_file)}: {sample_count} samples extracted")
print(f"\nExtraction complete. Total samples extracted: {total_sample_count}")
def verify_extraction(extracted_dir):
"""
Verify the extraction by counting files
"""
print("\nVerifying extraction...")
def count_files(directory):
exr_count = 0
png_count = 0
json_count = 0
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.exr'):
exr_count += 1
elif file.endswith('.png'):
png_count += 1
elif file.endswith('.json'):
json_count += 1
return {
'exr': exr_count,
'png': png_count,
'json': json_count
}
counts = count_files(extracted_dir)
print("\nExtracted file counts:")
print(f" EXR files: {counts['exr']}")
print(f" PNG files: {counts['png']}")
print(f" JSON files: {counts['json']}")
if all(count > 0 for count in counts.values()):
print("\nAll file types present in extraction.")
else:
print("\nWARNING: Some file types are missing!")
def parse_arguments():
parser = argparse.ArgumentParser(description='Extract WebDataset tar files')
parser.add_argument('--input_path', '-i', required=True,
help='Input directory containing WebDataset tar files')
parser.add_argument('--output_path', '-o', required=True,
help='Output directory for extracted files')
parser.add_argument('--verify', '-v', action='store_true',
help='Verify the extraction after completion')
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
print(f"Input directory: {args.input_path}")
print(f"Output directory: {args.output_path}")
print("\nStarting extraction...")
extract_webdataset(args.input_path, args.output_path)
if args.verify:
verify_extraction(args.output_path)