Skip to content

Commit

Permalink
flickr download
Browse files Browse the repository at this point in the history
  • Loading branch information
chail committed Jul 29, 2022
1 parent dcc6c5a commit dd0fb1c
Show file tree
Hide file tree
Showing 7 changed files with 126,493 additions and 2 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ debug*
training-runs
pretrained
web/
/datasets/*
!/datasets/download

*.npz
*.mp4
Expand Down
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ Training progress can be visualized using:
tensorboard --logdir training-runs/
```

Note on datasets: beyond the standard FFHQ and LSUN Church datasets, we train on datasets scraped from flickr. Due to licensing we cannot release this images directly but can provide the image IDs used to construct the datasets.
Note on datasets: beyond the standard FFHQ and LSUN Church datasets, we train on datasets scraped from flickr. Due to licensing we cannot release this images directly. Please see `datasets/download/download_dataset.sh` for examples on how to download the flickr datasets. You will need to fill in a flickr api key and secret.

<a name="evalution"/>
<a name="evaluation"/>

## Evaluations

Expand Down
90 changes: 90 additions & 0 deletions datasets/download/download_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import os
import numpy as np
import flickr_api as f
import argparse
from tqdm import tqdm
from PIL import Image

# file list
# min size
# max size


# python data/trace_data.py [[PATH]]
parser = argparse.ArgumentParser()
parser.add_argument('--filelist', type=str, required=True)
parser.add_argument('--minsize', type=int)
parser.add_argument('--maxsize', type=int)
parser.add_argument('--outputpath', type=str)
parser.add_argument('--download_HR', action='store_true')
opt = parser.parse_args()

f.set_keys(api_key = 'XXXX', api_secret = 'XXXX')

if opt.outputpath is None:
opt.outputpath = '../%s' % os.path.basename(opt.filelist).split('.')[0]
if(not os.path.exists(opt.outputpath)):
os.makedirs(opt.outputpath, exist_ok=True)
print('Making output [%s]'%opt.outputpath)
else:
print('Output [%s] already exists' % opt.outputpath)

with open(opt.filelist, 'r') as r:
files = [l.strip() for l in r.readlines()]

exception_ids = []
for filename in files:
license, idnum = filename.split('_')
idnum, filetype = idnum.split('.')
if os.path.exists(os.path.join(opt.outputpath, filename)):
print('[%s] already exists'%filename)
continue
try:
photo = f.Photo(id=idnum)
sizes = photo.getSizes()
if opt.download_HR:
# download highest resolution between minsize and maxsize
downloaded = False
for s in ['Original', 'X-Large 5K', 'X-Large 4K', 'X-Large 3K', 'Large 2048',
'Large 1600', 'Large', 'Medium 800', 'Medium 640', 'Medium']:
if s in sizes:
height = sizes[s]['height']
width = sizes[s]['width']
if opt.maxsize is not None and min(height, width) > opt.maxsize:
# above max size, go to next size
continue
if opt.minsize is not None and min(height, width) < opt.minsize:
# below min size skip
continue
photo_size = sizes[s]
print('%s: trying %s [%sx%s]'%(idnum, photo_size['label'], photo_size['height'], photo_size['width']))
os.system('wget -q %s -O %s/%s'%(photo_size['source'], opt.outputpath, filename))
downloaded = True
break
if not downloaded:
# did not find suitable size
exception_ids.append(idnum)
else:
# download the smallest img larger than minsize for LR dataset
downloaded = False
for k, v in sizes.items(): # smallest to largest
height = v['height']
width = v['width']
if min(height, width) >= opt.minsize:
photo_size = sizes[k]
print('%s: trying %s [%sx%s]'%(idnum, photo_size['label'], photo_size['height'], photo_size['width']))
os.system('wget -q %s -O %s/%s' %(photo_size['source'], opt.outputpath, filename))
downloaded=True
break
if not downloaded:
# did not find suitable size
exception_ids.append(idnum)
except Exception as ex:
# import pdb; pdb.set_trace()
print('[%s] not processed'%filename)
exception_ids.append(id_num)

if len(exception_ids) > 0:
with open(opt.filelist.replace('.txt', '_exception.txt'), 'w') as r:
[r.write('%s\n' % idnum) for idnum in exception_ids]

8 changes: 8 additions & 0 deletions datasets/download/download_dataset.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
### birds HR: min = 512, max = 2048
python download_dataset.py --filelist flickr_birds_HR.txt --minsize 512 --maxsize 2048 --download_HR

### birds LR: smallest img above 256
# python download_dataset.py --filelist flickr_birds_LR.txt --minsize 256

### church HR: min = 1024, no max
# python download_dataset.py --filelist flickr_church_exteriors_HR.txt --minsize 1024 --download_HR
Loading

0 comments on commit dd0fb1c

Please sign in to comment.