Skip to content

Commit

Permalink
Merge remote-tracking branch 'refs/remotes/thtrieu/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
abagshaw committed Jan 10, 2018
2 parents abdb910 + 178debe commit da59eb6
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 21 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,27 +88,27 @@ flow --h
First, let's take a closer look at one of a very useful option `--load`

```bash
# 1. Load yolo-tiny.weights
flow --model cfg/yolo-tiny.cfg --load bin/yolo-tiny.weights
# 1. Load tiny-yolo.weights
flow --model cfg/tiny-yolo.cfg --load bin/tiny-yolo.weights

# 2. To completely initialize a model, leave the --load option
flow --model cfg/yolo-new.cfg

# 3. It is useful to reuse the first identical layers of tiny for `yolo-new`
flow --model cfg/yolo-new.cfg --load bin/yolo-tiny.weights
flow --model cfg/yolo-new.cfg --load bin/tiny-yolo.weights
# this will print out which layers are reused, which are initialized
```

All input images from default folder `sample_img/` are flowed through the net and predictions are put in `sample_img/out/`. We can always specify more parameters for such forward passes, such as detection threshold, batch size, images folder, etc.

```bash
# Forward all images in sample_img/ using tiny yolo and 100% GPU usage
flow --imgdir sample_img/ --model cfg/yolo-tiny.cfg --load bin/yolo-tiny.weights --gpu 1.0
flow --imgdir sample_img/ --model cfg/tiny-yolo.cfg --load bin/tiny-yolo.weights --gpu 1.0
```
json output can be generated with descriptions of the pixel location of each bounding box and the pixel location. Each prediction is stored in the `sample_img/out` folder by default. An example json array is shown below.
```bash
# Forward all images in sample_img/ using tiny yolo and JSON output.
flow --imgdir sample_img/ --model cfg/yolo-tiny.cfg --load bin/yolo-tiny.weights --json
flow --imgdir sample_img/ --model cfg/tiny-yolo.cfg --load bin/tiny-yolo.weights --json
```
JSON output:
```json
Expand All @@ -127,7 +127,7 @@ Training is simple as you only have to add option `--train`. Training set and an

```bash
# Initialize yolo-new from yolo-tiny, then train the net on 100% GPU:
flow --model cfg/yolo-new.cfg --load bin/yolo-tiny.weights --train --gpu 1.0
flow --model cfg/yolo-new.cfg --load bin/tiny-yolo.weights --train --gpu 1.0

# Completely initialize yolo-new and train it with ADAM optimizer
flow --model cfg/yolo-new.cfg --train --trainer adam
Expand All @@ -143,7 +143,7 @@ flow --train --model cfg/yolo-new.cfg --load -1
flow --model cfg/yolo-new.cfg --load 1500

# Fine tuning yolo-tiny from the original one
flow --train --model cfg/yolo-tiny.cfg --load bin/yolo-tiny.weights
flow --train --model cfg/tiny-yolo.cfg --load bin/tiny-yolo.weights
```

Example of training on Pascal VOC 2007:
Expand Down Expand Up @@ -248,7 +248,7 @@ options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights", "threshold": 0.1

tfnet = TFNet(options)

imgcv = cv2.imread("./sample_img/dog.jpg")
imgcv = cv2.imread("./sample_img/sample_dog.jpg")
result = tfnet.return_predict(imgcv)
print(result)
```
Expand Down
11 changes: 3 additions & 8 deletions darkflow/net/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,10 @@ def predict(self):
to_idx = min(from_idx + batch, len(all_inps))

# collect images input in the batch
inp_feed = list(); new_all = list()
this_batch = all_inps[from_idx:to_idx]
for inp in this_batch:
new_all += [inp]
this_inp = os.path.join(inp_path, inp)
this_inp = self.framework.preprocess(this_inp)
expanded = np.expand_dims(this_inp, 0)
inp_feed.append(expanded)
this_batch = new_all
inp_feed = pool.map(lambda inp: (
np.expand_dims(self.framework.preprocess(
os.path.join(inp_path, inp)), 0)), this_batch)

# Feed to the net
feed_dict = {self.inp : np.concatenate(inp_feed, 0)}
Expand Down
2 changes: 1 addition & 1 deletion darkflow/net/help.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def build_train_op(self):

def load_from_ckpt(self):
if self.FLAGS.load < 0: # load lastest ckpt
with open(self.FLAGS.backup + 'checkpoint', 'r') as f:
with open(os.path.join(self.FLAGS.backup, 'checkpoint'), 'r') as f:
last = f.readlines()[-1].strip()
load_point = last.split(' ')[1]
load_point = load_point.split('"')[1]
Expand Down
4 changes: 2 additions & 2 deletions darkflow/net/yolo/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def labels(meta, FLAGS):
meta['labels'] = labels20

def is_inp(self, name):
return name[-4:] in ['.jpg','.JPG', '.jpeg', '.JPEG', '.png', '.PNG']
return name.lower().endswith(('.jpg', '.jpeg', '.png'))

def show(im, allobj, S, w, h, cellx, celly):
for obj in allobj:
Expand All @@ -59,7 +59,7 @@ def show(im, allobj, S, w, h, cellx, celly):
(int(centerx - ww/2), int(centery - hh/2)),
(int(centerx + ww/2), int(centery + hh/2)),
(0,0,255), 2)
cv2.imshow("result", im)
cv2.imshow('result', im)
cv2.waitKey()
cv2.destroyAllWindows()

Expand Down
5 changes: 3 additions & 2 deletions darkflow/utils/im_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ def imcv2_recolor(im, a = .1):
im = im * (1 + t * a)
mx = 255. * (1 + a)
up = np.random.uniform() * 2 - 1
im = np.power(im/mx, 1. + up * .5)
# im = np.power(im/mx, 1. + up * .5)
im = cv2.pow(im/mx, 1. + up * .5)
return np.array(im * 255., np.uint8)

def imcv2_affine_trans(im):
Expand All @@ -27,4 +28,4 @@ def imcv2_affine_trans(im):
im = im[offy : (offy + h), offx : (offx + w)]
flip = np.random.binomial(1, .5)
if flip: im = cv2.flip(im, 1)
return im, [w, h, c], [scale, [offx, offy], flip]
return im, [w, h, c], [scale, [offx, offy], flip]

0 comments on commit da59eb6

Please sign in to comment.