forked from thtrieu/darkflow
-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathprepare_data.py
More file actions
executable file
·123 lines (99 loc) · 4.04 KB
/
prepare_data.py
File metadata and controls
executable file
·123 lines (99 loc) · 4.04 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#! /usr/bin/env python3
import os, sys, math, time, cv2
import numpy as np
from darkflow.net.build import TFNet
from darkflow.defaults import argHandler
from multiprocessing.pool import ThreadPool
from pycocotools.coco import COCO
import tensorflow as tf
import pickle
def process_coco(coco, img_path):
res = []
cat_ids = coco.getCatIds(catNms=['person'])
img_ids = coco.getImgIds(catIds=cat_ids)
imgs = coco.loadImgs(ids = img_ids)
processed = 0
iter1 = 0
imgs = imgs[:10000]
for img in imgs:
iter1 += 1
processed += 1
if iter1 > 1000:
iter1 = 0
print("processed", processed, len(imgs))
ann_ids = coco.getAnnIds(imgIds=img['id'])
anns = coco.loadAnns(ann_ids)
fnd = False
for ann in anns:
if ('bbox' in ann) and (ann['bbox'] != []) and ('segmentation' in ann):
fnd = True
if fnd:
res.append((img_path, img['file_name']))
return res
def my_postprocess(framework, net_out, im, img_name):
#np.save(img_name.replace('.jpg', ''), net_out)
np.savez_compressed(img_name.replace('.jpg', ''), net_out)
def my_save_ckpt(tfnet, step, loss_profile):
file = '{}-{}{}'
model = tfnet.meta['name']
profile = file.format(model, step, '.profile')
profile = os.path.join(tfnet.FLAGS.backup, profile)
with open(profile, 'wb') as profile_ckpt:
pickle.dump(loss_profile, profile_ckpt)
ckpt = file.format(model, step, '')
ckpt = os.path.join(tfnet.FLAGS.backup, ckpt)
tfnet.say('Checkpoint at step {}'.format(step))
tfnet.saver.save(tfnet.sess, ckpt)
if __name__ == "__main__":
FLAGS = argHandler()
FLAGS.setDefaults()
FLAGS.parseArgs(sys.argv)
def _get_dir(dirs):
for d in dirs:
this = os.path.abspath(os.path.join(os.path.curdir, d))
if not os.path.exists(this): os.makedirs(this)
requiredDirectories = [FLAGS.imgdir, FLAGS.binary, FLAGS.backup, os.path.join(FLAGS.imgdir, 'out')]
if FLAGS.summary:
requiredDirectories.append(FLAGS.summary)
_get_dir(requiredDirectories)
tfnet = TFNet(FLAGS)
profile = [(0.0, 0.0)]
args = [0, profile]
my_save_ckpt(tfnet, *args)
aaaa;
my_pool = ThreadPool()
bdir = '../darknet/scripts/coco'
all_inps = process_coco(COCO(bdir + "/annotations/person_keypoints_train2014.json"), bdir + "/images/train2014")
all_inps += process_coco(COCO(bdir + "/annotations/person_keypoints_val2014.json"), bdir + "/images/val2014")
batch = min(FLAGS.batch, len(all_inps))
# predict in batches
n_batch = int(math.ceil(len(all_inps) / batch))
for j in range(n_batch):
from_idx = j * batch
to_idx = min(from_idx + batch, len(all_inps))
# collect images input in the batch
this_batch = all_inps[from_idx:to_idx]
inp_feed = my_pool.map(lambda inp: (
np.expand_dims(tfnet.framework.preprocess(
os.path.join(inp[0], inp[1])), 0)), this_batch)
# Feed to the net
feed_dict = {tfnet.inp : np.concatenate(inp_feed, 0)}
tfnet.say('Forwarding {} inputs ...'.format(len(inp_feed)))
start = time.time()
out = tfnet.sess.run([tfnet.out, tfnet.my_out], feed_dict)
my_out = out[1]
out = out[0]
stop = time.time(); last = stop - start
tfnet.say('Total time = {}s / {} inps = {} ips'.format(
last, len(inp_feed), len(inp_feed) / last))
# Post processing
tfnet.say('Post processing {} inputs ...'.format(len(inp_feed)))
start = time.time()
my_pool.map(lambda p: (lambda i, prediction:
my_postprocess(tfnet.framework,
prediction, os.path.join(this_batch[i][0], this_batch[i][1]), os.path.join("masknet_data_17", this_batch[i][1])))(*p),
enumerate(my_out))
stop = time.time(); last = stop - start
# Timing
tfnet.say('Total time = {}s / {} inps = {} ips, processed {}/{}'.format(
last, len(inp_feed), len(inp_feed) / last, to_idx, len(all_inps)))