Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Maintenance: lint and remove unused code #60

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ dynamic programming and is linear O(n):
Linear: time = -1.8E-06 + 7.3E-06*n (sec)

Report Generation
-------
-----------------

This feature allows users to generate a report based on the outputs received from
calling the :code:`big-o` function.
Expand Down
6 changes: 1 addition & 5 deletions big_o/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
from __future__ import absolute_import

from big_o import complexities, datagen, reports # noqa
from big_o.big_o import ( # noqa
big_o,
infer_big_o_class,
measure_execution_time,
)
from big_o import reports # noqa
from big_o import complexities # noqa
from big_o import datagen # noqa
2 changes: 0 additions & 2 deletions big_o/big_o.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from __future__ import absolute_import

from timeit import Timer

import numpy as np
Expand Down
6 changes: 4 additions & 2 deletions big_o/complexities.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ def __eq__(self, other):
def __hash__(self):
return id(self)


# --- Concrete implementations of the most popular complexity classes


Expand Down Expand Up @@ -263,5 +264,6 @@ def coefficients(self):
return np.exp(a), np.exp(b)


ALL_CLASSES = [Constant, Linear, Quadratic, Cubic, Polynomial,
Logarithmic, Linearithmic, Exponential]
ALL_CLASSES = [Constant, Logarithmic, Linear, Linearithmic,
Quadratic, Cubic, Polynomial,
Exponential]
2 changes: 1 addition & 1 deletion big_o/datagen.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def n_(n):

def range_n(n, start=0):
""" Return the sequence [start, start+1, ..., start+N-1]. """
return list(range(start, start+n))
return list(range(start, start + n))


def integers(n, min_, max_):
Expand Down
1 change: 1 addition & 0 deletions big_o/reports.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from big_o.complexities import ComplexityClass


def big_o_report(best, others):
""" Creates a human-readable report of the output of the big_o function.

Expand Down
45 changes: 21 additions & 24 deletions big_o/test/test_big_o.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import unittest
import time
import unittest

import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_array_almost_equal, assert_array_equal

import big_o
from big_o import datagen
from big_o import complexities as compl
from big_o import complexities as compl, datagen


def dummy_constant_function(n):
Expand Down Expand Up @@ -64,16 +64,15 @@ def test_measure_execution_time(self):
def f(n):
time.sleep(0.1 * n)
return n

ns, t = big_o.measure_execution_time(
f, datagen.n_,
min_n=1, max_n=5, n_measures=5, n_repeats=1, n_timings=5
)
assert_array_equal(ns, np.arange(1, 6))
assert_array_almost_equal(t*10., np.arange(1, 6), 1)
assert_array_almost_equal(t * 10., np.arange(1, 6), 1)

def test_infer_big_o(self):
x = np.linspace(10, 100, 100)

desired = [
(lambda x: x*0.+2., compl.Constant, [2.]),
(lambda x: 4.*x, compl.Linear, [0., 4.]),
Expand All @@ -85,6 +84,7 @@ def test_infer_big_o(self):
(lambda x: 0.6**x, compl.Exponential, [0., np.log(0.6)]),
]

x = np.linspace(10, 100, 100)
for f, class_, coeff in desired:
y = f(x)
res_class, fitted = big_o.infer_big_o_class(x, y)
Expand All @@ -94,7 +94,7 @@ def test_infer_big_o(self):
def test_infer_big_o_list_input(self):
# Check a normal list / iterable can be passed to infer_big_o_class()
ns = range(10, 100, 10)
time = [x**2 for x in ns]
time = [x ** 2 for x in ns]

best, fitted = big_o.infer_big_o_class(ns, time)

Expand All @@ -104,23 +104,18 @@ def test_infer_big_o_list_input(self):
best_check, fitted_check = big_o.infer_big_o_class(ns_np, time_np)

self.assertEqual(best.order, best_check.order,
msg = "Order of complexity {} did not match check complexity {}".format(
best, best_check))
msg="Order of complexity {} did not match check complexity {}".format(
best, best_check))
self.assertAlmostEqual(fitted[best], fitted_check[best_check])

def test_big_o(self):
# Numpy sorts are fast enough that they are very close to linear
# In testing, heapsort was found to follow the best clear n * log(n) curve
random_state = np.random.RandomState()
random_array = random_state.rand(100000)

# Each test case is a tuple
# (function_to_evaluate, expected_complexity_class, range_for_n)
desired = [
(dummy_constant_function, compl.Constant, (1000, 10000)),
(dummy_linear_function, compl.Linear, (100, 5000)),
(dummy_constant_function, compl.Constant, (1000, 50000)),
(dummy_linear_function, compl.Linear, (100, 50000)),
(dummy_quadratic_function, compl.Quadratic, (1, 100)),
(dummy_linearithmic_function, compl.Linearithmic, (10, 5000)),
(dummy_linearithmic_function, compl.Linearithmic, (100, 100000)),
]
for func, class_, n_range in desired:
res_class, fitted = big_o.big_o(
Expand All @@ -129,8 +124,8 @@ def test_big_o(self):
max_n=n_range[1],
n_measures=25,
n_repeats=1,
n_timings=10,
return_raw_data = True)
n_timings=3,
return_raw_data=True)

residuals = fitted[res_class]

Expand All @@ -151,9 +146,11 @@ def test_big_o(self):
(complexity, residuals) for complexity, residuals in fitted.items()
if isinstance(complexity, class_))

self.assertIsInstance(res_class, class_,
msg = "Best matched complexity is {} (r={:f}) when {} (r={:f}) was expected"
.format(res_class, residuals, sol_class, sol_residuals))
self.assertIsInstance(
res_class, class_,
msg="Best matched complexity is {} (r={:f}) when {} (r={:f}) was expected"
.format(res_class, residuals, sol_class, sol_residuals)
)

def test_big_o_return_raw_data_default(self):
_, fitted = big_o.big_o(
Expand Down Expand Up @@ -202,7 +199,7 @@ def dummy(n):
measures = fitted['measures']
self.assertEqual(len(measures), n_measures)
for i in range(1, n_measures):
self.assertGreater(measures[i], measures[i-1])
self.assertGreater(measures[i], measures[i - 1])

self.assertIn('times', fitted)
times = fitted['times']
Expand Down
27 changes: 16 additions & 11 deletions big_o/test/test_complexities.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import unittest

import numpy as np
from numpy.testing import assert_allclose

Expand Down Expand Up @@ -27,13 +28,13 @@ def test_compute(self):

ref_y = complexity.compute(x)
assert_allclose(y, ref_y,
err_msg = "compute() failed to match expected values for class %r" % class_)
err_msg="compute() failed to match expected values for class %r" % class_)

# Check residuals are correct
# Use the atol constant from np.allclose() because the default for
# np.testing.assert_allclose() for atol (0) is too low for this comparison
assert_allclose(residuals, np.sum((y - ref_y) ** 2), rtol=1e-07, atol=1e-08,
err_msg = "fit() residuals failed to match expected value for class %r" % class_)
err_msg="fit() residuals failed to match expected value for class %r" % class_)

def test_not_fitted(self):
for class_ in complexities.ALL_CLASSES:
Expand Down Expand Up @@ -65,7 +66,7 @@ def test_fit_residuals(self):
for f, class_ in desired:
# Adding random noise so the residual doesn't approximate zero
y = f(counts + np.abs(rng.standard_normal(counts.size)) * .1) \
+ np.abs(rng.standard_normal(counts.size))
+ np.abs(rng.standard_normal(counts.size))

complexity = class_()
residuals = complexity.fit(counts, y)
Expand All @@ -75,12 +76,12 @@ def test_fit_residuals(self):
# Use the atol constant from np.allclose() because the default for
# np.testing.assert_allclose() for atol (0) is too low for this comparison
assert_allclose(residuals, np.sum((y - ref_y) ** 2), rtol=1e-07, atol=1e-08,
err_msg = "fit() residuals failed to match expected value for class %r" % class_)
err_msg="fit() residuals failed to match expected value for class %r" % class_)

def test_fit_list_input(self):
# Check a normal list / iterable can be passed to fit()
ns = range(10, 100, 10)
time = [x**2 for x in ns]
time = [x ** 2 for x in ns]

quadratic = complexities.Quadratic()
quadratic.fit(ns, time)
Expand All @@ -97,9 +98,13 @@ def test_fit_list_input(self):
coeff_check = quadratic_check.coeff
coefficients_check = quadratic_check.coefficients()

assert_allclose(coeff, coeff_check,
err_msg = "coeff of {} did not match coeff of check complexity {}".format(
quadratic, quadratic_check))
assert_allclose(coefficients, coefficients_check,
err_msg = "coefficients of {} did not match coefficients of check complexity {}".format(
quadratic, quadratic_check))
assert_allclose(
coeff, coeff_check,
err_msg="coeff of {} did not match coeff of check complexity {}".format(
quadratic, quadratic_check)
)
assert_allclose(
coefficients, coefficients_check,
err_msg="coefficients of {} did not match coefficients of check complexity {}".format(
quadratic, quadratic_check)
)
6 changes: 4 additions & 2 deletions big_o/test/test_reports.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import unittest

import big_o
from big_o import reports


class TestReport(unittest.TestCase):

def test_report_return(self):
Expand All @@ -10,9 +12,9 @@ def test_report_return(self):
assert isinstance(reports.big_o_report(best, others), str)

def test_report_return_raw_data_true(self):
best, others = big_o.big_o(sorted, lambda n: big_o.datagen.integers(n, 100, 500), return_raw_data=True)
best, others = big_o.big_o(sorted, lambda n: big_o.datagen.integers(n, 100, 500),
return_raw_data=True)

report = reports.big_o_report(best, others)
assert 'measures' not in report
assert 'times' not in report

Loading