Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python 3 compatibility #791

Open
wants to merge 2 commits into
base: 4.x
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions testdata/dnn/darknet/generate_darknet_models.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# export PYTHONPATH=/path/to/darknet/python/:$PYTHONPATH
# export LD_LIBRARY_PATH=/path/to/darknet/:$LD_LIBRARY_PATH
from builtins import range
import darknet as dn
import numpy as np

Expand Down
15 changes: 10 additions & 5 deletions testdata/dnn/download_models.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,23 @@
#!/usr/bin/env python

from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import object
from past.utils import old_div
import hashlib
import sys
import tarfile
import requests

if sys.version_info[0] < 3:
from urllib2 import urlopen
from urllib.request import urlopen
else:
from urllib.request import urlopen


class Model:
class Model(object):
MB = 1024*1024
BUFSIZE = 10*MB

Expand All @@ -33,7 +38,7 @@ def getMB(r):
d = dict(r.info())
for c in ['content-length', 'Content-Length']:
if c in d:
return int(d[c]) / self.MB
return old_div(int(d[c]), self.MB)
return '<unknown>'
print(' {} {} [{} Mb]'.format(r.getcode(), r.msg, getMB(r)))

Expand Down Expand Up @@ -72,7 +77,7 @@ def get(self):
assert self.downloader
print(' hash check failed - downloading')
sz = self.downloader(self.filename)
print(' size = %.2f Mb' % (sz / (1024.0 * 1024)))
print(' size = %.2f Mb' % (old_div(sz, (1024.0 * 1024))))

print(' done')
print(' file {}'.format(self.filename))
Expand Down Expand Up @@ -115,7 +120,7 @@ def download_gdrive(dst):
response = session.get(URL, params = { 'id' : gid }, stream = True)

def get_confirm_token(response): # in case of large files
for key, value in response.cookies.items():
for key, value in list(response.cookies.items()):
if key.startswith('download_warning'):
return value
return None
Expand Down
9 changes: 5 additions & 4 deletions testdata/dnn/layers/run.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# coding: utf-8

from __future__ import print_function
import sys, os, glob

CAFFE_ROOT = "/home/vitaliy/opencv/caffe/"
Expand All @@ -20,7 +21,7 @@ def get_cafe_output(inp_blob, proto_name, caffemodel_name):
out_blob = net.blobs['output'].data[...];

if net.params.get('output'):
print "Params count:", len(net.params['output'])
print("Params count:", len(net.params['output']))
net.save(caffemodel_name)

return out_blob
Expand All @@ -37,9 +38,9 @@ def get_cafe_output(inp_blob, proto_name, caffemodel_name):
inp_blob_name = proto_basename + ".input.npy"
inp_blob = np.load(inp_blob_name) if os.path.exists(inp_blob_name) else np.load('blob.npy')

print "\nGenerate data for:"
print cfmod_basename, inp_blob.shape
print("\nGenerate data for:")
print(cfmod_basename, inp_blob.shape)

out_blob = get_cafe_output(inp_blob, proto_filename, cfmod_basename)
print out_blob.shape
print(out_blob.shape)
np.save(npy_filename, out_blob)
19 changes: 11 additions & 8 deletions testdata/dnn/onnx/generate_onnx_models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from __future__ import print_function
from __future__ import division
from builtins import str
from past.utils import old_div
import torch
from torch.autograd import Variable
import torch.nn.init as init
Expand All @@ -12,7 +15,7 @@


def assertExpected(s):
if not (isinstance(s, str) or (sys.version_info[0] == 2 and isinstance(s, unicode))):
if not (isinstance(s, str) or (sys.version_info[0] == 2 and isinstance(s, str))):
raise TypeError("assertExpected is strings only")

def assertONNXExpected(binary_pb):
Expand Down Expand Up @@ -556,7 +559,7 @@ def __init__(self):
def forward(self, x):
exp = torch.exp(x)
sum = torch.sum(exp, dim=2, keepdim=True)
return exp / sum
return old_div(exp, sum)

input = Variable(torch.randn(1, 2, 4, 3))
model = SoftMaxUnfused()
Expand Down Expand Up @@ -587,7 +590,7 @@ def forward(self, image):
channels = image.size(1)
h = image.size(2)
w = image.size(3)
image = image.view(batch_size, channels*h* (w / 2), -1)
image = image.view(batch_size, channels*h* (old_div(w, 2)), -1)
return image

input = Variable(torch.randn(1, 2, 3, 4))
Expand All @@ -600,7 +603,7 @@ def __init__(self):
super(Broadcast, self).__init__()

def forward(self, x, y):
return x * y + (x - x) / y - y
return x * y + old_div((x - x), y) - y

input1 = Variable(torch.randn(1, 4, 1, 2))
input2 = Variable(torch.randn(1, 4, 1, 1))
Expand Down Expand Up @@ -746,7 +749,7 @@ def forward(self, x):
norm = torch.norm(x, p=2, dim=1, keepdim=True)
clip = torch.clamp(norm, min=0)
expand = clip.expand_as(x)
return x / expand
return old_div(x, expand)

model = NormL2()
x = Variable(torch.randn(1, 2, 3, 4))
Expand Down Expand Up @@ -813,7 +816,7 @@ def forward(self, x, y):
save_data_and_model_multy_inputs("upsample_unfused_two_inputs_opset9_torch1.4", UpsampleUnfusedTwoInput(), input_0, input_1, version=9)
save_data_and_model_multy_inputs("upsample_unfused_two_inputs_opset11_torch1.4", UpsampleUnfusedTwoInput(), input_0, input_1, version=11)

class FrozenBatchNorm2d(nn.Module):
class FrozenBatchNorm2d(nn.Module):
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
Expand All @@ -832,15 +835,15 @@ def forward(self, x):
model = FrozenBatchNorm2d(2)
save_data_and_model("batch_norm_subgraph", x, model)

class GatherScalar(nn.Module):
class GatherScalar(nn.Module):
def forward(self, x):
return x[1]

x = Variable(torch.randn(2))
model = GatherScalar()
save_data_and_model("gather_scalar", x, model)

class Gather(nn.Module):
class Gather(nn.Module):
def forward(self, x):
return x[..., 1]

Expand Down
3 changes: 2 additions & 1 deletion testdata/dnn/tensorflow/generate_tf2_models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# This script is used to generate test data for OpenCV deep learning module.
from builtins import next
import numpy as np
import tensorflow as tf
import shutil
Expand Down Expand Up @@ -37,7 +38,7 @@ def save(model, name, **kwargs):

assert(len(kwargs) == 1)

inputData = gen_data(next(iter(kwargs.values())))
inputData = gen_data(next(iter(list(kwargs.values()))))
outputData = model(inputData)

writeBlob(inputData, name + '_in')
Expand Down
7 changes: 5 additions & 2 deletions testdata/dnn/tensorflow/generate_tf_models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from __future__ import division
# This script is used to generate test data for OpenCV deep learning module.
from builtins import zip
from past.utils import old_div
import numpy as np
import tensorflow as tf
import os
Expand Down Expand Up @@ -869,8 +872,8 @@ def pad_depth(x, desired_channels):
inp = tf.placeholder(tf.float32, [1, 9, 6, 2], 'input')
conv = tf.layers.conv2d(inp, filters=2, kernel_size=[1, 1])
shape_input = tf.shape(inp)
hi = shape_input[1] / 3
wi = shape_input[2] / 2
hi = old_div(shape_input[1], 3)
wi = old_div(shape_input[2], 2)
input_down = tf.image.resize(conv, size=[hi,wi], method=0, name='resize_down')
save(inp, input_down, 'resize_bilinear_down')
################################################################################
Expand Down
5 changes: 3 additions & 2 deletions testdata/perf/append.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
import sys, re, os.path
from xml.dom.minidom import parse

Expand All @@ -22,7 +23,7 @@ def processLogFile(outname, inname):
if case.nodeName in tests:
del tests[case.nodeName]

for case in tests.items():
for case in list(tests.items()):
fstorage.appendChild(case[1])

if tests:
Expand All @@ -39,7 +40,7 @@ def processLogFile(outname, inname):

if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml <new_log_name>.xml"
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml <new_log_name>.xml")
exit(0)

processLogFile(sys.argv[1], sys.argv[2])
Expand Down
87 changes: 44 additions & 43 deletions testdata/perf/clean_regex.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,44 @@
import sys, re, os.path
from xml.dom.minidom import parse

def parseLogFile(filename):
tests = []
log = parse(open(filename, 'rb'))
fstorage = log.firstChild
#print help(log)
for case in fstorage.childNodes:
if case.nodeName == "#text":
continue
#print case.nodeName
tests.append(case.nodeName)
return tests

def process(filename, expr, save_results):
log = parse(open(filename, 'rb'))
fstorage = log.firstChild
for case in fstorage.childNodes:
if case.nodeName == "#text":
continue
if expr.search(case.nodeName):
print case.nodeName
fstorage.removeChild(case)

if save_results:
xmlstr = log.toxml()
xmlstr = re.sub(r"(\s*\n)+", "\n", xmlstr)
xmlstr = re.sub(r"(\s*\r\n)+", "\r\n", xmlstr)
xmlstr = re.sub(r"<(\w*)/>", "<\\1></\\1>", xmlstr)
xmlstr = xmlstr.replace("&quot;", "\"")
f = open(filename, 'wb')
f.write(xmlstr)
f.close()

if __name__ == "__main__":
if len(sys.argv) < 3:
print "This script is used to remove entries from sanity xml"
print " Usage:\n", os.path.basename(sys.argv[0]), "<name>.xml <regex>"
exit(0)

process(sys.argv[1], re.compile(sys.argv[2]), len(sys.argv) == 4)

from __future__ import print_function
import sys, re, os.path
from xml.dom.minidom import parse

def parseLogFile(filename):
tests = []
log = parse(open(filename, 'rb'))
fstorage = log.firstChild
#print help(log)
for case in fstorage.childNodes:
if case.nodeName == "#text":
continue
#print case.nodeName
tests.append(case.nodeName)
return tests

def process(filename, expr, save_results):
log = parse(open(filename, 'rb'))
fstorage = log.firstChild
for case in fstorage.childNodes:
if case.nodeName == "#text":
continue
if expr.search(case.nodeName):
print(case.nodeName)
fstorage.removeChild(case)

if save_results:
xmlstr = log.toxml()
xmlstr = re.sub(r"(\s*\n)+", "\n", xmlstr)
xmlstr = re.sub(r"(\s*\r\n)+", "\r\n", xmlstr)
xmlstr = re.sub(r"<(\w*)/>", "<\\1></\\1>", xmlstr)
xmlstr = xmlstr.replace("&quot;", "\"")
f = open(filename, 'wb')
f.write(xmlstr)
f.close()

if __name__ == "__main__":
if len(sys.argv) < 3:
print("This script is used to remove entries from sanity xml")
print(" Usage:\n", os.path.basename(sys.argv[0]), "<name>.xml <regex>")
exit(0)

process(sys.argv[1], re.compile(sys.argv[2]), len(sys.argv) == 4)

3 changes: 2 additions & 1 deletion testdata/perf/clean_unused.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
import sys, re, os.path
from xml.dom.minidom import parse

Expand Down Expand Up @@ -33,7 +34,7 @@ def processLogFile(outname, inname, tests):

if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml <log_name>.backup.xml"
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml <log_name>.backup.xml")
exit(0)

tests = parseLogFile(sys.argv[1])
Expand Down