cl-lmm/dataset/OCR-VQA-200K/download.py

52 lines
1.6 KiB
Python

import os
import json
import urllib.request as ureq
import urllib.error
import concurrent.futures
import threading
# Set the file paths for your Google Drive
dataset_path = "./dataset.json"
images_path = "./images"
download = 1 # Set to 0 if images are already downloaded
# Load dataset json file
with open(dataset_path, "r") as fp:
data = json.load(fp)
# Initialize a counter and a lock for thread-safe counting
downloaded_count = 0
count_lock = threading.Lock()
# Function to download an image
def download_image(k):
global downloaded_count
imageURL = data[k]["imageURL"]
ext = os.path.splitext(imageURL)[1]
outputFile = os.path.join(images_path, f"{k}{ext}")
# Only download the image if it doesn't exist
if not os.path.exists(outputFile):
try:
ureq.urlretrieve(imageURL, outputFile)
with count_lock:
downloaded_count += 1
if downloaded_count % 100 == 0:
print(f"{downloaded_count} images downloaded.")
except urllib.error.URLError as e:
print(f"Error downloading {outputFile}: {e}")
# Download images using multiple threads
if download == 1:
if not os.path.exists(images_path):
os.makedirs(images_path)
# Create a thread pool and download the images in parallel
# Increase max_workers to potentially speed up downloads for many small files.
# The optimal number may vary based on your network and the server's capacity.
with concurrent.futures.ThreadPoolExecutor(max_workers=400) as executor:
executor.map(download_image, data.keys())