-
Notifications
You must be signed in to change notification settings - Fork 0
/
tensorflowLiteMobilenet.py
70 lines (60 loc) · 2.33 KB
/
tensorflowLiteMobilenet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import tensorflow as tf
import cv2 as cv
# left = "/dev/v4l/by-path/platform-70090000.xusb-usb-0:2.1:1.0-video-index0"
#
# cam = cv.VideoCapture(left)
# img = cv.imread('/home/harry/git/mqp/AITraining/data/images/train/00000001.jpg')
# cv.imshow('img', img)
# # cv.waitKey(0)
# Load TFLite model and allocate tensors.
# with open('/home/harry/git/mqp/AITraining/models/tflite/labelmap.txt', 'r') as f:
# label_names = f.read().split('\n')
# label_names.remove('???')
# print(label_names)
interpreter = tf.lite.Interpreter(model_path="/home/harry/git/mqp/AITraining/models/tflite/new_mv2.tflite")
print(interpreter)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
print("== Input details ==")
print("shape:", input_details[0]['shape'])
print("type:", input_details[0]['dtype'])
cam = cv.VideoCapture("/home/harry/git/mqp/AITraining/Divers Fight the Invasive Lionfish National Geographic.mp4")
print(cam.isOpened())
while True:
ret, img = cam.read()
# print(ret)
if not ret:
cam = cv.VideoCapture("/home/harry/git/mqp/AITraining/Divers Fight the Invasive Lionfish National Geographic.mp4")
img = cv.resize(img, (300, 300))
# print(img.shape)
input_image = np.asarray(img, dtype=np.float32)
#
input_image = np.expand_dims(input_image, 0)
# input_image = (np.float32(input_image) - 127.5) / 127.5
# print(input_image.shape)
# print(type(input_tensor))
# print(input_image.shape)
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
#
boxes = np.squeeze(interpreter.get_tensor(output_details[0]['index']))
labels = np.squeeze(interpreter.get_tensor(output_details[1]['index']))
scores = np.squeeze(interpreter.get_tensor(output_details[2]['index']))
indices = [index for index in range(0, 10)]
print(labels)
for index in indices:
# print(scores[index], labels[index])
box = boxes[index]
ymin = int(box[0] * 300)
xmin = int(box[1] * 300)
ymax = int(box[2] * 300)
xmax = int(box[3] * 300)
cv.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), thickness=4)
cv.imshow('img', img)
cv.waitKey(1)
#